1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008,
4 Free Software Foundation, Inc.
5 Contributed by James E. Wilson <wilson@cygnus.com> and
6 David Mosberger <davidm@hpl.hp.com>.
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 3, or (at your option)
15 GCC is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING3. If not see
22 <http://www.gnu.org/licenses/>. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-attr.h"
43 #include "basic-block.h"
45 #include "diagnostic-core.h"
46 #include "sched-int.h"
49 #include "target-def.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
60 #include "tm-constrs.h"
61 #include "sel-sched.h"
64 /* This is used for communication between ASM_OUTPUT_LABEL and
65 ASM_OUTPUT_LABELREF. */
66 int ia64_asm_output_label = 0;
68 /* Register names for ia64_expand_prologue. */
69 static const char * const ia64_reg_numbers[96] =
70 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
71 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
72 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
73 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
74 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
75 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
76 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
77 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
78 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
79 "r104","r105","r106","r107","r108","r109","r110","r111",
80 "r112","r113","r114","r115","r116","r117","r118","r119",
81 "r120","r121","r122","r123","r124","r125","r126","r127"};
83 /* ??? These strings could be shared with REGISTER_NAMES. */
84 static const char * const ia64_input_reg_names[8] =
85 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
87 /* ??? These strings could be shared with REGISTER_NAMES. */
88 static const char * const ia64_local_reg_names[80] =
89 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
90 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
91 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
92 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
93 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
94 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
95 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
96 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
97 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
98 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
100 /* ??? These strings could be shared with REGISTER_NAMES. */
101 static const char * const ia64_output_reg_names[8] =
102 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
104 /* Which cpu are we scheduling for. */
105 enum processor_type ia64_tune = PROCESSOR_ITANIUM2;
107 /* Determines whether we run our final scheduling pass or not. We always
108 avoid the normal second scheduling pass. */
109 static int ia64_flag_schedule_insns2;
111 /* Determines whether we run variable tracking in machine dependent
113 static int ia64_flag_var_tracking;
115 /* Variables which are this size or smaller are put in the sdata/sbss
118 unsigned int ia64_section_threshold;
120 /* The following variable is used by the DFA insn scheduler. The value is
121 TRUE if we do insn bundling instead of insn scheduling. */
133 number_of_ia64_frame_regs
136 /* Structure to be filled in by ia64_compute_frame_size with register
137 save masks and offsets for the current function. */
139 struct ia64_frame_info
141 HOST_WIDE_INT total_size; /* size of the stack frame, not including
142 the caller's scratch area. */
143 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
144 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
145 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
146 HARD_REG_SET mask; /* mask of saved registers. */
147 unsigned int gr_used_mask; /* mask of registers in use as gr spill
148 registers or long-term scratches. */
149 int n_spilled; /* number of spilled registers. */
150 int r[number_of_ia64_frame_regs]; /* Frame related registers. */
151 int n_input_regs; /* number of input registers used. */
152 int n_local_regs; /* number of local registers used. */
153 int n_output_regs; /* number of output registers used. */
154 int n_rotate_regs; /* number of rotating registers used. */
156 char need_regstk; /* true if a .regstk directive needed. */
157 char initialized; /* true if the data is finalized. */
160 /* Current frame information calculated by ia64_compute_frame_size. */
161 static struct ia64_frame_info current_frame_info;
162 /* The actual registers that are emitted. */
163 static int emitted_frame_related_regs[number_of_ia64_frame_regs];
165 static int ia64_first_cycle_multipass_dfa_lookahead (void);
166 static void ia64_dependencies_evaluation_hook (rtx, rtx);
167 static void ia64_init_dfa_pre_cycle_insn (void);
168 static rtx ia64_dfa_pre_cycle_insn (void);
169 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
170 static bool ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx);
171 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
172 static void ia64_h_i_d_extended (void);
173 static void * ia64_alloc_sched_context (void);
174 static void ia64_init_sched_context (void *, bool);
175 static void ia64_set_sched_context (void *);
176 static void ia64_clear_sched_context (void *);
177 static void ia64_free_sched_context (void *);
178 static int ia64_mode_to_int (enum machine_mode);
179 static void ia64_set_sched_flags (spec_info_t);
180 static ds_t ia64_get_insn_spec_ds (rtx);
181 static ds_t ia64_get_insn_checked_ds (rtx);
182 static bool ia64_skip_rtx_p (const_rtx);
183 static int ia64_speculate_insn (rtx, ds_t, rtx *);
184 static bool ia64_needs_block_p (int);
185 static rtx ia64_gen_spec_check (rtx, rtx, ds_t);
186 static int ia64_spec_check_p (rtx);
187 static int ia64_spec_check_src_p (rtx);
188 static rtx gen_tls_get_addr (void);
189 static rtx gen_thread_pointer (void);
190 static int find_gr_spill (enum ia64_frame_regs, int);
191 static int next_scratch_gr_reg (void);
192 static void mark_reg_gr_used_mask (rtx, void *);
193 static void ia64_compute_frame_size (HOST_WIDE_INT);
194 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
195 static void finish_spill_pointers (void);
196 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
197 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
198 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
199 static rtx gen_movdi_x (rtx, rtx, rtx);
200 static rtx gen_fr_spill_x (rtx, rtx, rtx);
201 static rtx gen_fr_restore_x (rtx, rtx, rtx);
203 static void ia64_option_override (void);
204 static void ia64_option_default_params (void);
205 static bool ia64_can_eliminate (const int, const int);
206 static enum machine_mode hfa_element_mode (const_tree, bool);
207 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
209 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
211 static rtx ia64_function_arg_1 (const CUMULATIVE_ARGS *, enum machine_mode,
212 const_tree, bool, bool);
213 static rtx ia64_function_arg (CUMULATIVE_ARGS *, enum machine_mode,
215 static rtx ia64_function_incoming_arg (CUMULATIVE_ARGS *,
216 enum machine_mode, const_tree, bool);
217 static void ia64_function_arg_advance (CUMULATIVE_ARGS *, enum machine_mode,
219 static unsigned int ia64_function_arg_boundary (enum machine_mode,
221 static bool ia64_function_ok_for_sibcall (tree, tree);
222 static bool ia64_return_in_memory (const_tree, const_tree);
223 static rtx ia64_function_value (const_tree, const_tree, bool);
224 static rtx ia64_libcall_value (enum machine_mode, const_rtx);
225 static bool ia64_function_value_regno_p (const unsigned int);
226 static int ia64_register_move_cost (enum machine_mode, reg_class_t,
228 static int ia64_memory_move_cost (enum machine_mode mode, reg_class_t,
230 static bool ia64_rtx_costs (rtx, int, int, int *, bool);
231 static int ia64_unspec_may_trap_p (const_rtx, unsigned);
232 static void fix_range (const char *);
233 static bool ia64_handle_option (size_t, const char *, int);
234 static struct machine_function * ia64_init_machine_status (void);
235 static void emit_insn_group_barriers (FILE *);
236 static void emit_all_insn_group_barriers (FILE *);
237 static void final_emit_insn_group_barriers (FILE *);
238 static void emit_predicate_relation_info (void);
239 static void ia64_reorg (void);
240 static bool ia64_in_small_data_p (const_tree);
241 static void process_epilogue (FILE *, rtx, bool, bool);
243 static bool ia64_assemble_integer (rtx, unsigned int, int);
244 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
245 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
246 static void ia64_output_function_end_prologue (FILE *);
248 static int ia64_issue_rate (void);
249 static int ia64_adjust_cost_2 (rtx, int, rtx, int, dw_t);
250 static void ia64_sched_init (FILE *, int, int);
251 static void ia64_sched_init_global (FILE *, int, int);
252 static void ia64_sched_finish_global (FILE *, int);
253 static void ia64_sched_finish (FILE *, int);
254 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
255 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
256 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
257 static int ia64_variable_issue (FILE *, int, rtx, int);
259 static void ia64_asm_unwind_emit (FILE *, rtx);
260 static void ia64_asm_emit_except_personality (rtx);
261 static void ia64_asm_init_sections (void);
263 static enum unwind_info_type ia64_debug_unwind_info (void);
264 static enum unwind_info_type ia64_except_unwind_info (struct gcc_options *);
266 static struct bundle_state *get_free_bundle_state (void);
267 static void free_bundle_state (struct bundle_state *);
268 static void initiate_bundle_states (void);
269 static void finish_bundle_states (void);
270 static unsigned bundle_state_hash (const void *);
271 static int bundle_state_eq_p (const void *, const void *);
272 static int insert_bundle_state (struct bundle_state *);
273 static void initiate_bundle_state_table (void);
274 static void finish_bundle_state_table (void);
275 static int try_issue_nops (struct bundle_state *, int);
276 static int try_issue_insn (struct bundle_state *, rtx);
277 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
278 static int get_max_pos (state_t);
279 static int get_template (state_t, int);
281 static rtx get_next_important_insn (rtx, rtx);
282 static bool important_for_bundling_p (rtx);
283 static void bundling (FILE *, int, rtx, rtx);
285 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
286 HOST_WIDE_INT, tree);
287 static void ia64_file_start (void);
288 static void ia64_globalize_decl_name (FILE *, tree);
290 static int ia64_hpux_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
291 static int ia64_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
292 static section *ia64_select_rtx_section (enum machine_mode, rtx,
293 unsigned HOST_WIDE_INT);
294 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
296 static unsigned int ia64_section_type_flags (tree, const char *, int);
297 static void ia64_init_libfuncs (void)
299 static void ia64_hpux_init_libfuncs (void)
301 static void ia64_sysv4_init_libfuncs (void)
303 static void ia64_vms_init_libfuncs (void)
305 static void ia64_soft_fp_init_libfuncs (void)
307 static bool ia64_vms_valid_pointer_mode (enum machine_mode mode)
309 static tree ia64_vms_common_object_attribute (tree *, tree, tree, int, bool *)
312 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
313 static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
314 static void ia64_encode_section_info (tree, rtx, int);
315 static rtx ia64_struct_value_rtx (tree, int);
316 static tree ia64_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
317 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
318 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
319 static bool ia64_cannot_force_const_mem (rtx);
320 static const char *ia64_mangle_type (const_tree);
321 static const char *ia64_invalid_conversion (const_tree, const_tree);
322 static const char *ia64_invalid_unary_op (int, const_tree);
323 static const char *ia64_invalid_binary_op (int, const_tree, const_tree);
324 static enum machine_mode ia64_c_mode_for_suffix (char);
325 static enum machine_mode ia64_promote_function_mode (const_tree,
330 static void ia64_trampoline_init (rtx, tree, rtx);
331 static void ia64_override_options_after_change (void);
333 static void ia64_dwarf_handle_frame_unspec (const char *, rtx, int);
334 static tree ia64_builtin_decl (unsigned, bool);
336 static reg_class_t ia64_preferred_reload_class (rtx, reg_class_t);
337 static enum machine_mode ia64_get_reg_raw_mode (int regno);
338 static section * ia64_hpux_function_section (tree, enum node_frequency,
341 /* Table of valid machine attributes. */
342 static const struct attribute_spec ia64_attribute_table[] =
344 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
345 { "syscall_linkage", 0, 0, false, true, true, NULL },
346 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
347 #if TARGET_ABI_OPEN_VMS
348 { "common_object", 1, 1, true, false, false, ia64_vms_common_object_attribute},
350 { "version_id", 1, 1, true, false, false,
351 ia64_handle_version_id_attribute },
352 { NULL, 0, 0, false, false, false, NULL }
355 /* Implement overriding of the optimization options. */
356 static const struct default_options ia64_option_optimization_table[] =
358 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
359 #ifdef SUBTARGET_OPTIMIZATION_OPTIONS
360 SUBTARGET_OPTIMIZATION_OPTIONS,
362 { OPT_LEVELS_NONE, 0, NULL, 0 }
365 /* Initialize the GCC target structure. */
366 #undef TARGET_ATTRIBUTE_TABLE
367 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
369 #undef TARGET_INIT_BUILTINS
370 #define TARGET_INIT_BUILTINS ia64_init_builtins
372 #undef TARGET_EXPAND_BUILTIN
373 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
375 #undef TARGET_BUILTIN_DECL
376 #define TARGET_BUILTIN_DECL ia64_builtin_decl
378 #undef TARGET_ASM_BYTE_OP
379 #define TARGET_ASM_BYTE_OP "\tdata1\t"
380 #undef TARGET_ASM_ALIGNED_HI_OP
381 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
382 #undef TARGET_ASM_ALIGNED_SI_OP
383 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
384 #undef TARGET_ASM_ALIGNED_DI_OP
385 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
386 #undef TARGET_ASM_UNALIGNED_HI_OP
387 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
388 #undef TARGET_ASM_UNALIGNED_SI_OP
389 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
390 #undef TARGET_ASM_UNALIGNED_DI_OP
391 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
392 #undef TARGET_ASM_INTEGER
393 #define TARGET_ASM_INTEGER ia64_assemble_integer
395 #undef TARGET_OPTION_OVERRIDE
396 #define TARGET_OPTION_OVERRIDE ia64_option_override
397 #undef TARGET_OPTION_OPTIMIZATION_TABLE
398 #define TARGET_OPTION_OPTIMIZATION_TABLE ia64_option_optimization_table
399 #undef TARGET_OPTION_DEFAULT_PARAMS
400 #define TARGET_OPTION_DEFAULT_PARAMS ia64_option_default_params
402 #undef TARGET_ASM_FUNCTION_PROLOGUE
403 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
404 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
405 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
406 #undef TARGET_ASM_FUNCTION_EPILOGUE
407 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
409 #undef TARGET_IN_SMALL_DATA_P
410 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
412 #undef TARGET_SCHED_ADJUST_COST_2
413 #define TARGET_SCHED_ADJUST_COST_2 ia64_adjust_cost_2
414 #undef TARGET_SCHED_ISSUE_RATE
415 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
416 #undef TARGET_SCHED_VARIABLE_ISSUE
417 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
418 #undef TARGET_SCHED_INIT
419 #define TARGET_SCHED_INIT ia64_sched_init
420 #undef TARGET_SCHED_FINISH
421 #define TARGET_SCHED_FINISH ia64_sched_finish
422 #undef TARGET_SCHED_INIT_GLOBAL
423 #define TARGET_SCHED_INIT_GLOBAL ia64_sched_init_global
424 #undef TARGET_SCHED_FINISH_GLOBAL
425 #define TARGET_SCHED_FINISH_GLOBAL ia64_sched_finish_global
426 #undef TARGET_SCHED_REORDER
427 #define TARGET_SCHED_REORDER ia64_sched_reorder
428 #undef TARGET_SCHED_REORDER2
429 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
431 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
432 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
434 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
435 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
437 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
438 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
439 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
440 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
442 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
443 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
444 ia64_first_cycle_multipass_dfa_lookahead_guard
446 #undef TARGET_SCHED_DFA_NEW_CYCLE
447 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
449 #undef TARGET_SCHED_H_I_D_EXTENDED
450 #define TARGET_SCHED_H_I_D_EXTENDED ia64_h_i_d_extended
452 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
453 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT ia64_alloc_sched_context
455 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
456 #define TARGET_SCHED_INIT_SCHED_CONTEXT ia64_init_sched_context
458 #undef TARGET_SCHED_SET_SCHED_CONTEXT
459 #define TARGET_SCHED_SET_SCHED_CONTEXT ia64_set_sched_context
461 #undef TARGET_SCHED_CLEAR_SCHED_CONTEXT
462 #define TARGET_SCHED_CLEAR_SCHED_CONTEXT ia64_clear_sched_context
464 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
465 #define TARGET_SCHED_FREE_SCHED_CONTEXT ia64_free_sched_context
467 #undef TARGET_SCHED_SET_SCHED_FLAGS
468 #define TARGET_SCHED_SET_SCHED_FLAGS ia64_set_sched_flags
470 #undef TARGET_SCHED_GET_INSN_SPEC_DS
471 #define TARGET_SCHED_GET_INSN_SPEC_DS ia64_get_insn_spec_ds
473 #undef TARGET_SCHED_GET_INSN_CHECKED_DS
474 #define TARGET_SCHED_GET_INSN_CHECKED_DS ia64_get_insn_checked_ds
476 #undef TARGET_SCHED_SPECULATE_INSN
477 #define TARGET_SCHED_SPECULATE_INSN ia64_speculate_insn
479 #undef TARGET_SCHED_NEEDS_BLOCK_P
480 #define TARGET_SCHED_NEEDS_BLOCK_P ia64_needs_block_p
482 #undef TARGET_SCHED_GEN_SPEC_CHECK
483 #define TARGET_SCHED_GEN_SPEC_CHECK ia64_gen_spec_check
485 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC
486 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC\
487 ia64_first_cycle_multipass_dfa_lookahead_guard_spec
489 #undef TARGET_SCHED_SKIP_RTX_P
490 #define TARGET_SCHED_SKIP_RTX_P ia64_skip_rtx_p
492 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
493 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
494 #undef TARGET_ARG_PARTIAL_BYTES
495 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
496 #undef TARGET_FUNCTION_ARG
497 #define TARGET_FUNCTION_ARG ia64_function_arg
498 #undef TARGET_FUNCTION_INCOMING_ARG
499 #define TARGET_FUNCTION_INCOMING_ARG ia64_function_incoming_arg
500 #undef TARGET_FUNCTION_ARG_ADVANCE
501 #define TARGET_FUNCTION_ARG_ADVANCE ia64_function_arg_advance
502 #undef TARGET_FUNCTION_ARG_BOUNDARY
503 #define TARGET_FUNCTION_ARG_BOUNDARY ia64_function_arg_boundary
505 #undef TARGET_ASM_OUTPUT_MI_THUNK
506 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
507 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
508 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
510 #undef TARGET_ASM_FILE_START
511 #define TARGET_ASM_FILE_START ia64_file_start
513 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
514 #define TARGET_ASM_GLOBALIZE_DECL_NAME ia64_globalize_decl_name
516 #undef TARGET_REGISTER_MOVE_COST
517 #define TARGET_REGISTER_MOVE_COST ia64_register_move_cost
518 #undef TARGET_MEMORY_MOVE_COST
519 #define TARGET_MEMORY_MOVE_COST ia64_memory_move_cost
520 #undef TARGET_RTX_COSTS
521 #define TARGET_RTX_COSTS ia64_rtx_costs
522 #undef TARGET_ADDRESS_COST
523 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
525 #undef TARGET_UNSPEC_MAY_TRAP_P
526 #define TARGET_UNSPEC_MAY_TRAP_P ia64_unspec_may_trap_p
528 #undef TARGET_MACHINE_DEPENDENT_REORG
529 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
531 #undef TARGET_ENCODE_SECTION_INFO
532 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
534 #undef TARGET_SECTION_TYPE_FLAGS
535 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
538 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
539 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
542 #undef TARGET_PROMOTE_FUNCTION_MODE
543 #define TARGET_PROMOTE_FUNCTION_MODE ia64_promote_function_mode
545 /* ??? Investigate. */
547 #undef TARGET_PROMOTE_PROTOTYPES
548 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
551 #undef TARGET_FUNCTION_VALUE
552 #define TARGET_FUNCTION_VALUE ia64_function_value
553 #undef TARGET_LIBCALL_VALUE
554 #define TARGET_LIBCALL_VALUE ia64_libcall_value
555 #undef TARGET_FUNCTION_VALUE_REGNO_P
556 #define TARGET_FUNCTION_VALUE_REGNO_P ia64_function_value_regno_p
558 #undef TARGET_STRUCT_VALUE_RTX
559 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
560 #undef TARGET_RETURN_IN_MEMORY
561 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
562 #undef TARGET_SETUP_INCOMING_VARARGS
563 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
564 #undef TARGET_STRICT_ARGUMENT_NAMING
565 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
566 #undef TARGET_MUST_PASS_IN_STACK
567 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
568 #undef TARGET_GET_RAW_RESULT_MODE
569 #define TARGET_GET_RAW_RESULT_MODE ia64_get_reg_raw_mode
570 #undef TARGET_GET_RAW_ARG_MODE
571 #define TARGET_GET_RAW_ARG_MODE ia64_get_reg_raw_mode
573 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
574 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
576 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
577 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC ia64_dwarf_handle_frame_unspec
578 #undef TARGET_ASM_UNWIND_EMIT
579 #define TARGET_ASM_UNWIND_EMIT ia64_asm_unwind_emit
580 #undef TARGET_ASM_EMIT_EXCEPT_PERSONALITY
581 #define TARGET_ASM_EMIT_EXCEPT_PERSONALITY ia64_asm_emit_except_personality
582 #undef TARGET_ASM_INIT_SECTIONS
583 #define TARGET_ASM_INIT_SECTIONS ia64_asm_init_sections
585 #undef TARGET_DEBUG_UNWIND_INFO
586 #define TARGET_DEBUG_UNWIND_INFO ia64_debug_unwind_info
587 #undef TARGET_EXCEPT_UNWIND_INFO
588 #define TARGET_EXCEPT_UNWIND_INFO ia64_except_unwind_info
590 #undef TARGET_SCALAR_MODE_SUPPORTED_P
591 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
592 #undef TARGET_VECTOR_MODE_SUPPORTED_P
593 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
595 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
596 in an order different from the specified program order. */
597 #undef TARGET_RELAXED_ORDERING
598 #define TARGET_RELAXED_ORDERING true
600 #undef TARGET_DEFAULT_TARGET_FLAGS
601 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
602 #undef TARGET_HANDLE_OPTION
603 #define TARGET_HANDLE_OPTION ia64_handle_option
605 #undef TARGET_CANNOT_FORCE_CONST_MEM
606 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
608 #undef TARGET_MANGLE_TYPE
609 #define TARGET_MANGLE_TYPE ia64_mangle_type
611 #undef TARGET_INVALID_CONVERSION
612 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
613 #undef TARGET_INVALID_UNARY_OP
614 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
615 #undef TARGET_INVALID_BINARY_OP
616 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
618 #undef TARGET_C_MODE_FOR_SUFFIX
619 #define TARGET_C_MODE_FOR_SUFFIX ia64_c_mode_for_suffix
621 #undef TARGET_CAN_ELIMINATE
622 #define TARGET_CAN_ELIMINATE ia64_can_eliminate
624 #undef TARGET_TRAMPOLINE_INIT
625 #define TARGET_TRAMPOLINE_INIT ia64_trampoline_init
627 #undef TARGET_INVALID_WITHIN_DOLOOP
628 #define TARGET_INVALID_WITHIN_DOLOOP hook_constcharptr_const_rtx_null
630 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
631 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE ia64_override_options_after_change
633 #undef TARGET_PREFERRED_RELOAD_CLASS
634 #define TARGET_PREFERRED_RELOAD_CLASS ia64_preferred_reload_class
636 struct gcc_target targetm = TARGET_INITIALIZER;
640 ADDR_AREA_NORMAL, /* normal address area */
641 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
645 static GTY(()) tree small_ident1;
646 static GTY(()) tree small_ident2;
651 if (small_ident1 == 0)
653 small_ident1 = get_identifier ("small");
654 small_ident2 = get_identifier ("__small__");
658 /* Retrieve the address area that has been chosen for the given decl. */
660 static ia64_addr_area
661 ia64_get_addr_area (tree decl)
665 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
671 id = TREE_VALUE (TREE_VALUE (model_attr));
672 if (id == small_ident1 || id == small_ident2)
673 return ADDR_AREA_SMALL;
675 return ADDR_AREA_NORMAL;
679 ia64_handle_model_attribute (tree *node, tree name, tree args,
680 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
682 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
684 tree arg, decl = *node;
687 arg = TREE_VALUE (args);
688 if (arg == small_ident1 || arg == small_ident2)
690 addr_area = ADDR_AREA_SMALL;
694 warning (OPT_Wattributes, "invalid argument of %qE attribute",
696 *no_add_attrs = true;
699 switch (TREE_CODE (decl))
702 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
704 && !TREE_STATIC (decl))
706 error_at (DECL_SOURCE_LOCATION (decl),
707 "an address area attribute cannot be specified for "
709 *no_add_attrs = true;
711 area = ia64_get_addr_area (decl);
712 if (area != ADDR_AREA_NORMAL && addr_area != area)
714 error ("address area of %q+D conflicts with previous "
715 "declaration", decl);
716 *no_add_attrs = true;
721 error_at (DECL_SOURCE_LOCATION (decl),
722 "address area attribute cannot be specified for "
724 *no_add_attrs = true;
728 warning (OPT_Wattributes, "%qE attribute ignored",
730 *no_add_attrs = true;
737 /* The section must have global and overlaid attributes. */
738 #define SECTION_VMS_OVERLAY SECTION_MACH_DEP
740 /* Part of the low level implementation of DEC Ada pragma Common_Object which
741 enables the shared use of variables stored in overlaid linker areas
742 corresponding to the use of Fortran COMMON. */
745 ia64_vms_common_object_attribute (tree *node, tree name, tree args,
746 int flags ATTRIBUTE_UNUSED,
754 DECL_COMMON (decl) = 1;
755 id = TREE_VALUE (args);
756 if (TREE_CODE (id) == IDENTIFIER_NODE)
757 val = build_string (IDENTIFIER_LENGTH (id), IDENTIFIER_POINTER (id));
758 else if (TREE_CODE (id) == STRING_CST)
762 warning (OPT_Wattributes,
763 "%qE attribute requires a string constant argument", name);
764 *no_add_attrs = true;
767 DECL_SECTION_NAME (decl) = val;
771 /* Part of the low level implementation of DEC Ada pragma Common_Object. */
774 ia64_vms_output_aligned_decl_common (FILE *file, tree decl, const char *name,
775 unsigned HOST_WIDE_INT size,
778 tree attr = DECL_ATTRIBUTES (decl);
780 /* As common_object attribute set DECL_SECTION_NAME check it before
781 looking up the attribute. */
782 if (DECL_SECTION_NAME (decl) && attr)
783 attr = lookup_attribute ("common_object", attr);
789 /* Code from elfos.h. */
790 fprintf (file, "%s", COMMON_ASM_OP);
791 assemble_name (file, name);
792 fprintf (file, ","HOST_WIDE_INT_PRINT_UNSIGNED",%u\n",
793 size, align / BITS_PER_UNIT);
797 ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT));
798 ASM_OUTPUT_LABEL (file, name);
799 ASM_OUTPUT_SKIP (file, size ? size : 1);
803 /* Definition of TARGET_ASM_NAMED_SECTION for VMS. */
806 ia64_vms_elf_asm_named_section (const char *name, unsigned int flags,
809 if (!(flags & SECTION_VMS_OVERLAY))
811 default_elf_asm_named_section (name, flags, decl);
814 if (flags != (SECTION_VMS_OVERLAY | SECTION_WRITE))
817 if (flags & SECTION_DECLARED)
819 fprintf (asm_out_file, "\t.section\t%s\n", name);
823 fprintf (asm_out_file, "\t.section\t%s,\"awgO\"\n", name);
827 ia64_encode_addr_area (tree decl, rtx symbol)
831 flags = SYMBOL_REF_FLAGS (symbol);
832 switch (ia64_get_addr_area (decl))
834 case ADDR_AREA_NORMAL: break;
835 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
836 default: gcc_unreachable ();
838 SYMBOL_REF_FLAGS (symbol) = flags;
842 ia64_encode_section_info (tree decl, rtx rtl, int first)
844 default_encode_section_info (decl, rtl, first);
846 /* Careful not to prod global register variables. */
847 if (TREE_CODE (decl) == VAR_DECL
848 && GET_CODE (DECL_RTL (decl)) == MEM
849 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
850 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
851 ia64_encode_addr_area (decl, XEXP (rtl, 0));
854 /* Return 1 if the operands of a move are ok. */
857 ia64_move_ok (rtx dst, rtx src)
859 /* If we're under init_recog_no_volatile, we'll not be able to use
860 memory_operand. So check the code directly and don't worry about
861 the validity of the underlying address, which should have been
862 checked elsewhere anyway. */
863 if (GET_CODE (dst) != MEM)
865 if (GET_CODE (src) == MEM)
867 if (register_operand (src, VOIDmode))
870 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
871 if (INTEGRAL_MODE_P (GET_MODE (dst)))
872 return src == const0_rtx;
874 return satisfies_constraint_G (src);
877 /* Return 1 if the operands are ok for a floating point load pair. */
880 ia64_load_pair_ok (rtx dst, rtx src)
882 if (GET_CODE (dst) != REG || !FP_REGNO_P (REGNO (dst)))
884 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
886 switch (GET_CODE (XEXP (src, 0)))
895 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
897 if (GET_CODE (adjust) != CONST_INT
898 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
909 addp4_optimize_ok (rtx op1, rtx op2)
911 return (basereg_operand (op1, GET_MODE(op1)) !=
912 basereg_operand (op2, GET_MODE(op2)));
915 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
916 Return the length of the field, or <= 0 on failure. */
919 ia64_depz_field_mask (rtx rop, rtx rshift)
921 unsigned HOST_WIDE_INT op = INTVAL (rop);
922 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
924 /* Get rid of the zero bits we're shifting in. */
927 /* We must now have a solid block of 1's at bit 0. */
928 return exact_log2 (op + 1);
931 /* Return the TLS model to use for ADDR. */
933 static enum tls_model
934 tls_symbolic_operand_type (rtx addr)
936 enum tls_model tls_kind = TLS_MODEL_NONE;
938 if (GET_CODE (addr) == CONST)
940 if (GET_CODE (XEXP (addr, 0)) == PLUS
941 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
942 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
944 else if (GET_CODE (addr) == SYMBOL_REF)
945 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
950 /* Return true if X is a constant that is valid for some immediate
951 field in an instruction. */
954 ia64_legitimate_constant_p (rtx x)
956 switch (GET_CODE (x))
963 if (GET_MODE (x) == VOIDmode || GET_MODE (x) == SFmode
964 || GET_MODE (x) == DFmode)
966 return satisfies_constraint_G (x);
970 /* ??? Short term workaround for PR 28490. We must make the code here
971 match the code in ia64_expand_move and move_operand, even though they
972 are both technically wrong. */
973 if (tls_symbolic_operand_type (x) == 0)
975 HOST_WIDE_INT addend = 0;
978 if (GET_CODE (op) == CONST
979 && GET_CODE (XEXP (op, 0)) == PLUS
980 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
982 addend = INTVAL (XEXP (XEXP (op, 0), 1));
983 op = XEXP (XEXP (op, 0), 0);
986 if (any_offset_symbol_operand (op, GET_MODE (op))
987 || function_operand (op, GET_MODE (op)))
989 if (aligned_offset_symbol_operand (op, GET_MODE (op)))
990 return (addend & 0x3fff) == 0;
997 enum machine_mode mode = GET_MODE (x);
999 if (mode == V2SFmode)
1000 return satisfies_constraint_Y (x);
1002 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
1003 && GET_MODE_SIZE (mode) <= 8);
1011 /* Don't allow TLS addresses to get spilled to memory. */
1014 ia64_cannot_force_const_mem (rtx x)
1016 if (GET_MODE (x) == RFmode)
1018 return tls_symbolic_operand_type (x) != 0;
1021 /* Expand a symbolic constant load. */
1024 ia64_expand_load_address (rtx dest, rtx src)
1026 gcc_assert (GET_CODE (dest) == REG);
1028 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
1029 having to pointer-extend the value afterward. Other forms of address
1030 computation below are also more natural to compute as 64-bit quantities.
1031 If we've been given an SImode destination register, change it. */
1032 if (GET_MODE (dest) != Pmode)
1033 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest),
1034 byte_lowpart_offset (Pmode, GET_MODE (dest)));
1038 if (small_addr_symbolic_operand (src, VOIDmode))
1041 if (TARGET_AUTO_PIC)
1042 emit_insn (gen_load_gprel64 (dest, src));
1043 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
1044 emit_insn (gen_load_fptr (dest, src));
1045 else if (sdata_symbolic_operand (src, VOIDmode))
1046 emit_insn (gen_load_gprel (dest, src));
1049 HOST_WIDE_INT addend = 0;
1052 /* We did split constant offsets in ia64_expand_move, and we did try
1053 to keep them split in move_operand, but we also allowed reload to
1054 rematerialize arbitrary constants rather than spill the value to
1055 the stack and reload it. So we have to be prepared here to split
1056 them apart again. */
1057 if (GET_CODE (src) == CONST)
1059 HOST_WIDE_INT hi, lo;
1061 hi = INTVAL (XEXP (XEXP (src, 0), 1));
1062 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
1068 src = plus_constant (XEXP (XEXP (src, 0), 0), hi);
1072 tmp = gen_rtx_HIGH (Pmode, src);
1073 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
1074 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1076 tmp = gen_rtx_LO_SUM (Pmode, dest, src);
1077 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1081 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
1082 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1089 static GTY(()) rtx gen_tls_tga;
1091 gen_tls_get_addr (void)
1094 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
1098 static GTY(()) rtx thread_pointer_rtx;
1100 gen_thread_pointer (void)
1102 if (!thread_pointer_rtx)
1103 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
1104 return thread_pointer_rtx;
1108 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
1109 rtx orig_op1, HOST_WIDE_INT addend)
1111 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
1113 HOST_WIDE_INT addend_lo, addend_hi;
1117 case TLS_MODEL_GLOBAL_DYNAMIC:
1120 tga_op1 = gen_reg_rtx (Pmode);
1121 emit_insn (gen_load_dtpmod (tga_op1, op1));
1123 tga_op2 = gen_reg_rtx (Pmode);
1124 emit_insn (gen_load_dtprel (tga_op2, op1));
1126 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1127 LCT_CONST, Pmode, 2, tga_op1,
1128 Pmode, tga_op2, Pmode);
1130 insns = get_insns ();
1133 if (GET_MODE (op0) != Pmode)
1135 emit_libcall_block (insns, op0, tga_ret, op1);
1138 case TLS_MODEL_LOCAL_DYNAMIC:
1139 /* ??? This isn't the completely proper way to do local-dynamic
1140 If the call to __tls_get_addr is used only by a single symbol,
1141 then we should (somehow) move the dtprel to the second arg
1142 to avoid the extra add. */
1145 tga_op1 = gen_reg_rtx (Pmode);
1146 emit_insn (gen_load_dtpmod (tga_op1, op1));
1148 tga_op2 = const0_rtx;
1150 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1151 LCT_CONST, Pmode, 2, tga_op1,
1152 Pmode, tga_op2, Pmode);
1154 insns = get_insns ();
1157 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1159 tmp = gen_reg_rtx (Pmode);
1160 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
1162 if (!register_operand (op0, Pmode))
1163 op0 = gen_reg_rtx (Pmode);
1166 emit_insn (gen_load_dtprel (op0, op1));
1167 emit_insn (gen_adddi3 (op0, tmp, op0));
1170 emit_insn (gen_add_dtprel (op0, op1, tmp));
1173 case TLS_MODEL_INITIAL_EXEC:
1174 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1175 addend_hi = addend - addend_lo;
1177 op1 = plus_constant (op1, addend_hi);
1180 tmp = gen_reg_rtx (Pmode);
1181 emit_insn (gen_load_tprel (tmp, op1));
1183 if (!register_operand (op0, Pmode))
1184 op0 = gen_reg_rtx (Pmode);
1185 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
1188 case TLS_MODEL_LOCAL_EXEC:
1189 if (!register_operand (op0, Pmode))
1190 op0 = gen_reg_rtx (Pmode);
1196 emit_insn (gen_load_tprel (op0, op1));
1197 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
1200 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
1208 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
1209 orig_op0, 1, OPTAB_DIRECT);
1210 if (orig_op0 == op0)
1212 if (GET_MODE (orig_op0) == Pmode)
1214 return gen_lowpart (GET_MODE (orig_op0), op0);
1218 ia64_expand_move (rtx op0, rtx op1)
1220 enum machine_mode mode = GET_MODE (op0);
1222 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1223 op1 = force_reg (mode, op1);
1225 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1227 HOST_WIDE_INT addend = 0;
1228 enum tls_model tls_kind;
1231 if (GET_CODE (op1) == CONST
1232 && GET_CODE (XEXP (op1, 0)) == PLUS
1233 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
1235 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
1236 sym = XEXP (XEXP (op1, 0), 0);
1239 tls_kind = tls_symbolic_operand_type (sym);
1241 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
1243 if (any_offset_symbol_operand (sym, mode))
1245 else if (aligned_offset_symbol_operand (sym, mode))
1247 HOST_WIDE_INT addend_lo, addend_hi;
1249 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1250 addend_hi = addend - addend_lo;
1254 op1 = plus_constant (sym, addend_hi);
1263 if (reload_completed)
1265 /* We really should have taken care of this offset earlier. */
1266 gcc_assert (addend == 0);
1267 if (ia64_expand_load_address (op0, op1))
1273 rtx subtarget = !can_create_pseudo_p () ? op0 : gen_reg_rtx (mode);
1275 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1277 op1 = expand_simple_binop (mode, PLUS, subtarget,
1278 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1287 /* Split a move from OP1 to OP0 conditional on COND. */
1290 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1292 rtx insn, first = get_last_insn ();
1294 emit_move_insn (op0, op1);
1296 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1298 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1302 /* Split a post-reload TImode or TFmode reference into two DImode
1303 components. This is made extra difficult by the fact that we do
1304 not get any scratch registers to work with, because reload cannot
1305 be prevented from giving us a scratch that overlaps the register
1306 pair involved. So instead, when addressing memory, we tweak the
1307 pointer register up and back down with POST_INCs. Or up and not
1308 back down when we can get away with it.
1310 REVERSED is true when the loads must be done in reversed order
1311 (high word first) for correctness. DEAD is true when the pointer
1312 dies with the second insn we generate and therefore the second
1313 address must not carry a postmodify.
1315 May return an insn which is to be emitted after the moves. */
1318 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1322 switch (GET_CODE (in))
1325 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1326 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1331 /* Cannot occur reversed. */
1332 gcc_assert (!reversed);
1334 if (GET_MODE (in) != TFmode)
1335 split_double (in, &out[0], &out[1]);
1337 /* split_double does not understand how to split a TFmode
1338 quantity into a pair of DImode constants. */
1341 unsigned HOST_WIDE_INT p[2];
1342 long l[4]; /* TFmode is 128 bits */
1344 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1345 real_to_target (l, &r, TFmode);
1347 if (FLOAT_WORDS_BIG_ENDIAN)
1349 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1350 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1354 p[0] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1355 p[1] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1357 out[0] = GEN_INT (p[0]);
1358 out[1] = GEN_INT (p[1]);
1364 rtx base = XEXP (in, 0);
1367 switch (GET_CODE (base))
1372 out[0] = adjust_automodify_address
1373 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1374 out[1] = adjust_automodify_address
1375 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1379 /* Reversal requires a pre-increment, which can only
1380 be done as a separate insn. */
1381 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1382 out[0] = adjust_automodify_address
1383 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1384 out[1] = adjust_address (in, DImode, 0);
1389 gcc_assert (!reversed && !dead);
1391 /* Just do the increment in two steps. */
1392 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1393 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1397 gcc_assert (!reversed && !dead);
1399 /* Add 8, subtract 24. */
1400 base = XEXP (base, 0);
1401 out[0] = adjust_automodify_address
1402 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1403 out[1] = adjust_automodify_address
1405 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1410 gcc_assert (!reversed && !dead);
1412 /* Extract and adjust the modification. This case is
1413 trickier than the others, because we might have an
1414 index register, or we might have a combined offset that
1415 doesn't fit a signed 9-bit displacement field. We can
1416 assume the incoming expression is already legitimate. */
1417 offset = XEXP (base, 1);
1418 base = XEXP (base, 0);
1420 out[0] = adjust_automodify_address
1421 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1423 if (GET_CODE (XEXP (offset, 1)) == REG)
1425 /* Can't adjust the postmodify to match. Emit the
1426 original, then a separate addition insn. */
1427 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1428 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1432 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1433 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1435 /* Again the postmodify cannot be made to match,
1436 but in this case it's more efficient to get rid
1437 of the postmodify entirely and fix up with an
1439 out[1] = adjust_automodify_address (in, DImode, base, 8);
1441 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1445 /* Combined offset still fits in the displacement field.
1446 (We cannot overflow it at the high end.) */
1447 out[1] = adjust_automodify_address
1448 (in, DImode, gen_rtx_POST_MODIFY
1449 (Pmode, base, gen_rtx_PLUS
1451 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1470 /* Split a TImode or TFmode move instruction after reload.
1471 This is used by *movtf_internal and *movti_internal. */
1473 ia64_split_tmode_move (rtx operands[])
1475 rtx in[2], out[2], insn;
1478 bool reversed = false;
1480 /* It is possible for reload to decide to overwrite a pointer with
1481 the value it points to. In that case we have to do the loads in
1482 the appropriate order so that the pointer is not destroyed too
1483 early. Also we must not generate a postmodify for that second
1484 load, or rws_access_regno will die. */
1485 if (GET_CODE (operands[1]) == MEM
1486 && reg_overlap_mentioned_p (operands[0], operands[1]))
1488 rtx base = XEXP (operands[1], 0);
1489 while (GET_CODE (base) != REG)
1490 base = XEXP (base, 0);
1492 if (REGNO (base) == REGNO (operands[0]))
1496 /* Another reason to do the moves in reversed order is if the first
1497 element of the target register pair is also the second element of
1498 the source register pair. */
1499 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1500 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1503 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1504 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1506 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1507 if (GET_CODE (EXP) == MEM \
1508 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1509 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1510 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1511 add_reg_note (insn, REG_INC, XEXP (XEXP (EXP, 0), 0))
1513 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1514 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1515 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1517 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1518 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1519 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1522 emit_insn (fixup[0]);
1524 emit_insn (fixup[1]);
1526 #undef MAYBE_ADD_REG_INC_NOTE
1529 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1530 through memory plus an extra GR scratch register. Except that you can
1531 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1532 SECONDARY_RELOAD_CLASS, but not both.
1534 We got into problems in the first place by allowing a construct like
1535 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1536 This solution attempts to prevent this situation from occurring. When
1537 we see something like the above, we spill the inner register to memory. */
1540 spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
1542 if (GET_CODE (in) == SUBREG
1543 && GET_MODE (SUBREG_REG (in)) == TImode
1544 && GET_CODE (SUBREG_REG (in)) == REG)
1546 rtx memt = assign_stack_temp (TImode, 16, 0);
1547 emit_move_insn (memt, SUBREG_REG (in));
1548 return adjust_address (memt, mode, 0);
1550 else if (force && GET_CODE (in) == REG)
1552 rtx memx = assign_stack_temp (mode, 16, 0);
1553 emit_move_insn (memx, in);
1560 /* Expand the movxf or movrf pattern (MODE says which) with the given
1561 OPERANDS, returning true if the pattern should then invoke
1565 ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
1567 rtx op0 = operands[0];
1569 if (GET_CODE (op0) == SUBREG)
1570 op0 = SUBREG_REG (op0);
1572 /* We must support XFmode loads into general registers for stdarg/vararg,
1573 unprototyped calls, and a rare case where a long double is passed as
1574 an argument after a float HFA fills the FP registers. We split them into
1575 DImode loads for convenience. We also need to support XFmode stores
1576 for the last case. This case does not happen for stdarg/vararg routines,
1577 because we do a block store to memory of unnamed arguments. */
1579 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1583 /* We're hoping to transform everything that deals with XFmode
1584 quantities and GR registers early in the compiler. */
1585 gcc_assert (can_create_pseudo_p ());
1587 /* Struct to register can just use TImode instead. */
1588 if ((GET_CODE (operands[1]) == SUBREG
1589 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1590 || (GET_CODE (operands[1]) == REG
1591 && GR_REGNO_P (REGNO (operands[1]))))
1593 rtx op1 = operands[1];
1595 if (GET_CODE (op1) == SUBREG)
1596 op1 = SUBREG_REG (op1);
1598 op1 = gen_rtx_REG (TImode, REGNO (op1));
1600 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1604 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1606 /* Don't word-swap when reading in the constant. */
1607 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1608 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1610 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1611 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1616 /* If the quantity is in a register not known to be GR, spill it. */
1617 if (register_operand (operands[1], mode))
1618 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1620 gcc_assert (GET_CODE (operands[1]) == MEM);
1622 /* Don't word-swap when reading in the value. */
1623 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1624 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1626 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1627 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1631 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1633 /* We're hoping to transform everything that deals with XFmode
1634 quantities and GR registers early in the compiler. */
1635 gcc_assert (can_create_pseudo_p ());
1637 /* Op0 can't be a GR_REG here, as that case is handled above.
1638 If op0 is a register, then we spill op1, so that we now have a
1639 MEM operand. This requires creating an XFmode subreg of a TImode reg
1640 to force the spill. */
1641 if (register_operand (operands[0], mode))
1643 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1644 op1 = gen_rtx_SUBREG (mode, op1, 0);
1645 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1652 gcc_assert (GET_CODE (operands[0]) == MEM);
1654 /* Don't word-swap when writing out the value. */
1655 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1656 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1658 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1659 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1664 if (!reload_in_progress && !reload_completed)
1666 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1668 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1670 rtx memt, memx, in = operands[1];
1671 if (CONSTANT_P (in))
1672 in = validize_mem (force_const_mem (mode, in));
1673 if (GET_CODE (in) == MEM)
1674 memt = adjust_address (in, TImode, 0);
1677 memt = assign_stack_temp (TImode, 16, 0);
1678 memx = adjust_address (memt, mode, 0);
1679 emit_move_insn (memx, in);
1681 emit_move_insn (op0, memt);
1685 if (!ia64_move_ok (operands[0], operands[1]))
1686 operands[1] = force_reg (mode, operands[1]);
1692 /* Emit comparison instruction if necessary, replacing *EXPR, *OP0, *OP1
1693 with the expression that holds the compare result (in VOIDmode). */
1695 static GTY(()) rtx cmptf_libfunc;
1698 ia64_expand_compare (rtx *expr, rtx *op0, rtx *op1)
1700 enum rtx_code code = GET_CODE (*expr);
1703 /* If we have a BImode input, then we already have a compare result, and
1704 do not need to emit another comparison. */
1705 if (GET_MODE (*op0) == BImode)
1707 gcc_assert ((code == NE || code == EQ) && *op1 == const0_rtx);
1710 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1711 magic number as its third argument, that indicates what to do.
1712 The return value is an integer to be compared against zero. */
1713 else if (TARGET_HPUX && GET_MODE (*op0) == TFmode)
1716 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1723 enum rtx_code ncode;
1726 gcc_assert (cmptf_libfunc && GET_MODE (*op1) == TFmode);
1729 /* 1 = equal, 0 = not equal. Equality operators do
1730 not raise FP_INVALID when given an SNaN operand. */
1731 case EQ: magic = QCMP_EQ; ncode = NE; break;
1732 case NE: magic = QCMP_EQ; ncode = EQ; break;
1733 /* isunordered() from C99. */
1734 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1735 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1736 /* Relational operators raise FP_INVALID when given
1738 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1739 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1740 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1741 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1742 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1743 Expanders for buneq etc. weuld have to be added to ia64.md
1744 for this to be useful. */
1745 default: gcc_unreachable ();
1750 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1751 *op0, TFmode, *op1, TFmode,
1752 GEN_INT (magic), DImode);
1753 cmp = gen_reg_rtx (BImode);
1754 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1755 gen_rtx_fmt_ee (ncode, BImode,
1758 insns = get_insns ();
1761 emit_libcall_block (insns, cmp, cmp,
1762 gen_rtx_fmt_ee (code, BImode, *op0, *op1));
1767 cmp = gen_reg_rtx (BImode);
1768 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1769 gen_rtx_fmt_ee (code, BImode, *op0, *op1)));
1773 *expr = gen_rtx_fmt_ee (code, VOIDmode, cmp, const0_rtx);
1778 /* Generate an integral vector comparison. Return true if the condition has
1779 been reversed, and so the sense of the comparison should be inverted. */
1782 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1783 rtx dest, rtx op0, rtx op1)
1785 bool negate = false;
1788 /* Canonicalize the comparison to EQ, GT, GTU. */
1799 code = reverse_condition (code);
1805 code = reverse_condition (code);
1811 code = swap_condition (code);
1812 x = op0, op0 = op1, op1 = x;
1819 /* Unsigned parallel compare is not supported by the hardware. Play some
1820 tricks to turn this into a signed comparison against 0. */
1829 /* Subtract (-(INT MAX) - 1) from both operands to make
1831 mask = GEN_INT (0x80000000);
1832 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1833 mask = force_reg (mode, mask);
1834 t1 = gen_reg_rtx (mode);
1835 emit_insn (gen_subv2si3 (t1, op0, mask));
1836 t2 = gen_reg_rtx (mode);
1837 emit_insn (gen_subv2si3 (t2, op1, mask));
1846 /* Perform a parallel unsigned saturating subtraction. */
1847 x = gen_reg_rtx (mode);
1848 emit_insn (gen_rtx_SET (VOIDmode, x,
1849 gen_rtx_US_MINUS (mode, op0, op1)));
1853 op1 = CONST0_RTX (mode);
1862 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1863 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1868 /* Emit an integral vector conditional move. */
1871 ia64_expand_vecint_cmov (rtx operands[])
1873 enum machine_mode mode = GET_MODE (operands[0]);
1874 enum rtx_code code = GET_CODE (operands[3]);
1878 cmp = gen_reg_rtx (mode);
1879 negate = ia64_expand_vecint_compare (code, mode, cmp,
1880 operands[4], operands[5]);
1882 ot = operands[1+negate];
1883 of = operands[2-negate];
1885 if (ot == CONST0_RTX (mode))
1887 if (of == CONST0_RTX (mode))
1889 emit_move_insn (operands[0], ot);
1893 x = gen_rtx_NOT (mode, cmp);
1894 x = gen_rtx_AND (mode, x, of);
1895 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1897 else if (of == CONST0_RTX (mode))
1899 x = gen_rtx_AND (mode, cmp, ot);
1900 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1906 t = gen_reg_rtx (mode);
1907 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1908 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1910 f = gen_reg_rtx (mode);
1911 x = gen_rtx_NOT (mode, cmp);
1912 x = gen_rtx_AND (mode, x, operands[2-negate]);
1913 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1915 x = gen_rtx_IOR (mode, t, f);
1916 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1920 /* Emit an integral vector min or max operation. Return true if all done. */
1923 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1928 /* These four combinations are supported directly. */
1929 if (mode == V8QImode && (code == UMIN || code == UMAX))
1931 if (mode == V4HImode && (code == SMIN || code == SMAX))
1934 /* This combination can be implemented with only saturating subtraction. */
1935 if (mode == V4HImode && code == UMAX)
1937 rtx x, tmp = gen_reg_rtx (mode);
1939 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
1940 emit_insn (gen_rtx_SET (VOIDmode, tmp, x));
1942 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
1946 /* Everything else implemented via vector comparisons. */
1947 xops[0] = operands[0];
1948 xops[4] = xops[1] = operands[1];
1949 xops[5] = xops[2] = operands[2];
1968 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
1970 ia64_expand_vecint_cmov (xops);
1974 /* Emit an integral vector unpack operation. */
1977 ia64_expand_unpack (rtx operands[3], bool unsignedp, bool highp)
1979 enum machine_mode mode = GET_MODE (operands[1]);
1980 rtx (*gen) (rtx, rtx, rtx);
1986 gen = highp ? gen_vec_interleave_highv8qi : gen_vec_interleave_lowv8qi;
1989 gen = highp ? gen_vec_interleave_highv4hi : gen_vec_interleave_lowv4hi;
1995 /* Fill in x with the sign extension of each element in op1. */
1997 x = CONST0_RTX (mode);
2002 x = gen_reg_rtx (mode);
2004 neg = ia64_expand_vecint_compare (LT, mode, x, operands[1],
2009 emit_insn (gen (gen_lowpart (mode, operands[0]), operands[1], x));
2012 /* Emit an integral vector widening sum operations. */
2015 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
2018 enum machine_mode wmode, mode;
2019 rtx (*unpack_l) (rtx, rtx, rtx);
2020 rtx (*unpack_h) (rtx, rtx, rtx);
2021 rtx (*plus) (rtx, rtx, rtx);
2023 wmode = GET_MODE (operands[0]);
2024 mode = GET_MODE (operands[1]);
2029 unpack_l = gen_vec_interleave_lowv8qi;
2030 unpack_h = gen_vec_interleave_highv8qi;
2031 plus = gen_addv4hi3;
2034 unpack_l = gen_vec_interleave_lowv4hi;
2035 unpack_h = gen_vec_interleave_highv4hi;
2036 plus = gen_addv2si3;
2042 /* Fill in x with the sign extension of each element in op1. */
2044 x = CONST0_RTX (mode);
2049 x = gen_reg_rtx (mode);
2051 neg = ia64_expand_vecint_compare (LT, mode, x, operands[1],
2056 l = gen_reg_rtx (wmode);
2057 h = gen_reg_rtx (wmode);
2058 s = gen_reg_rtx (wmode);
2060 emit_insn (unpack_l (gen_lowpart (mode, l), operands[1], x));
2061 emit_insn (unpack_h (gen_lowpart (mode, h), operands[1], x));
2062 emit_insn (plus (s, l, operands[2]));
2063 emit_insn (plus (operands[0], h, s));
2067 ia64_expand_widen_mul_v4hi (rtx operands[3], bool unsignedp, bool highp)
2069 rtx l = gen_reg_rtx (V4HImode);
2070 rtx h = gen_reg_rtx (V4HImode);
2071 rtx (*mulhigh)(rtx, rtx, rtx, rtx);
2072 rtx (*interl)(rtx, rtx, rtx);
2074 emit_insn (gen_mulv4hi3 (l, operands[1], operands[2]));
2076 /* For signed, pmpy2.r would appear to more closely match this operation.
2077 However, the vectorizer is more likely to use the LO and HI patterns
2078 in pairs. At which point, with this formulation, the first two insns
2079 of each can be CSEd. */
2080 mulhigh = unsignedp ? gen_pmpyshr2_u : gen_pmpyshr2;
2081 emit_insn (mulhigh (h, operands[1], operands[2], GEN_INT (16)));
2083 interl = highp ? gen_vec_interleave_highv4hi : gen_vec_interleave_lowv4hi;
2084 emit_insn (interl (gen_lowpart (V4HImode, operands[0]), l, h));
2087 /* Emit a signed or unsigned V8QI dot product operation. */
2090 ia64_expand_dot_prod_v8qi (rtx operands[4], bool unsignedp)
2092 rtx l1, l2, h1, h2, x1, x2, p1, p2, p3, p4, s1, s2, s3;
2094 /* Fill in x1 and x2 with the sign extension of each element. */
2096 x1 = x2 = CONST0_RTX (V8QImode);
2101 x1 = gen_reg_rtx (V8QImode);
2102 x2 = gen_reg_rtx (V8QImode);
2104 neg = ia64_expand_vecint_compare (LT, V8QImode, x1, operands[1],
2105 CONST0_RTX (V8QImode));
2107 neg = ia64_expand_vecint_compare (LT, V8QImode, x2, operands[2],
2108 CONST0_RTX (V8QImode));
2112 l1 = gen_reg_rtx (V4HImode);
2113 l2 = gen_reg_rtx (V4HImode);
2114 h1 = gen_reg_rtx (V4HImode);
2115 h2 = gen_reg_rtx (V4HImode);
2117 emit_insn (gen_vec_interleave_lowv8qi
2118 (gen_lowpart (V8QImode, l1), operands[1], x1));
2119 emit_insn (gen_vec_interleave_lowv8qi
2120 (gen_lowpart (V8QImode, l2), operands[2], x2));
2121 emit_insn (gen_vec_interleave_highv8qi
2122 (gen_lowpart (V8QImode, h1), operands[1], x1));
2123 emit_insn (gen_vec_interleave_highv8qi
2124 (gen_lowpart (V8QImode, h2), operands[2], x2));
2126 p1 = gen_reg_rtx (V2SImode);
2127 p2 = gen_reg_rtx (V2SImode);
2128 p3 = gen_reg_rtx (V2SImode);
2129 p4 = gen_reg_rtx (V2SImode);
2130 emit_insn (gen_pmpy2_r (p1, l1, l2));
2131 emit_insn (gen_pmpy2_l (p2, l1, l2));
2132 emit_insn (gen_pmpy2_r (p3, h1, h2));
2133 emit_insn (gen_pmpy2_l (p4, h1, h2));
2135 s1 = gen_reg_rtx (V2SImode);
2136 s2 = gen_reg_rtx (V2SImode);
2137 s3 = gen_reg_rtx (V2SImode);
2138 emit_insn (gen_addv2si3 (s1, p1, p2));
2139 emit_insn (gen_addv2si3 (s2, p3, p4));
2140 emit_insn (gen_addv2si3 (s3, s1, operands[3]));
2141 emit_insn (gen_addv2si3 (operands[0], s2, s3));
2144 /* Emit the appropriate sequence for a call. */
2147 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
2152 addr = XEXP (addr, 0);
2153 addr = convert_memory_address (DImode, addr);
2154 b0 = gen_rtx_REG (DImode, R_BR (0));
2156 /* ??? Should do this for functions known to bind local too. */
2157 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
2160 insn = gen_sibcall_nogp (addr);
2162 insn = gen_call_nogp (addr, b0);
2164 insn = gen_call_value_nogp (retval, addr, b0);
2165 insn = emit_call_insn (insn);
2170 insn = gen_sibcall_gp (addr);
2172 insn = gen_call_gp (addr, b0);
2174 insn = gen_call_value_gp (retval, addr, b0);
2175 insn = emit_call_insn (insn);
2177 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2181 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
2183 if (TARGET_ABI_OPEN_VMS)
2184 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
2185 gen_rtx_REG (DImode, GR_REG (25)));
2189 reg_emitted (enum ia64_frame_regs r)
2191 if (emitted_frame_related_regs[r] == 0)
2192 emitted_frame_related_regs[r] = current_frame_info.r[r];
2194 gcc_assert (emitted_frame_related_regs[r] == current_frame_info.r[r]);
2198 get_reg (enum ia64_frame_regs r)
2201 return current_frame_info.r[r];
2205 is_emitted (int regno)
2209 for (r = reg_fp; r < number_of_ia64_frame_regs; r++)
2210 if (emitted_frame_related_regs[r] == regno)
2216 ia64_reload_gp (void)
2220 if (current_frame_info.r[reg_save_gp])
2222 tmp = gen_rtx_REG (DImode, get_reg (reg_save_gp));
2226 HOST_WIDE_INT offset;
2229 offset = (current_frame_info.spill_cfa_off
2230 + current_frame_info.spill_size);
2231 if (frame_pointer_needed)
2233 tmp = hard_frame_pointer_rtx;
2238 tmp = stack_pointer_rtx;
2239 offset = current_frame_info.total_size - offset;
2242 offset_r = GEN_INT (offset);
2243 if (satisfies_constraint_I (offset_r))
2244 emit_insn (gen_adddi3 (pic_offset_table_rtx, tmp, offset_r));
2247 emit_move_insn (pic_offset_table_rtx, offset_r);
2248 emit_insn (gen_adddi3 (pic_offset_table_rtx,
2249 pic_offset_table_rtx, tmp));
2252 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
2255 emit_move_insn (pic_offset_table_rtx, tmp);
2259 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
2260 rtx scratch_b, int noreturn_p, int sibcall_p)
2263 bool is_desc = false;
2265 /* If we find we're calling through a register, then we're actually
2266 calling through a descriptor, so load up the values. */
2267 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
2272 /* ??? We are currently constrained to *not* use peep2, because
2273 we can legitimately change the global lifetime of the GP
2274 (in the form of killing where previously live). This is
2275 because a call through a descriptor doesn't use the previous
2276 value of the GP, while a direct call does, and we do not
2277 commit to either form until the split here.
2279 That said, this means that we lack precise life info for
2280 whether ADDR is dead after this call. This is not terribly
2281 important, since we can fix things up essentially for free
2282 with the POST_DEC below, but it's nice to not use it when we
2283 can immediately tell it's not necessary. */
2284 addr_dead_p = ((noreturn_p || sibcall_p
2285 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
2287 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
2289 /* Load the code address into scratch_b. */
2290 tmp = gen_rtx_POST_INC (Pmode, addr);
2291 tmp = gen_rtx_MEM (Pmode, tmp);
2292 emit_move_insn (scratch_r, tmp);
2293 emit_move_insn (scratch_b, scratch_r);
2295 /* Load the GP address. If ADDR is not dead here, then we must
2296 revert the change made above via the POST_INCREMENT. */
2298 tmp = gen_rtx_POST_DEC (Pmode, addr);
2301 tmp = gen_rtx_MEM (Pmode, tmp);
2302 emit_move_insn (pic_offset_table_rtx, tmp);
2309 insn = gen_sibcall_nogp (addr);
2311 insn = gen_call_value_nogp (retval, addr, retaddr);
2313 insn = gen_call_nogp (addr, retaddr);
2314 emit_call_insn (insn);
2316 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
2320 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2322 This differs from the generic code in that we know about the zero-extending
2323 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2324 also know that ld.acq+cmpxchg.rel equals a full barrier.
2326 The loop we want to generate looks like
2331 new_reg = cmp_reg op val;
2332 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2333 if (cmp_reg != old_reg)
2336 Note that we only do the plain load from memory once. Subsequent
2337 iterations use the value loaded by the compare-and-swap pattern. */
2340 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
2341 rtx old_dst, rtx new_dst)
2343 enum machine_mode mode = GET_MODE (mem);
2344 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2345 enum insn_code icode;
2347 /* Special case for using fetchadd. */
2348 if ((mode == SImode || mode == DImode)
2349 && (code == PLUS || code == MINUS)
2350 && fetchadd_operand (val, mode))
2353 val = GEN_INT (-INTVAL (val));
2356 old_dst = gen_reg_rtx (mode);
2358 emit_insn (gen_memory_barrier ());
2361 icode = CODE_FOR_fetchadd_acq_si;
2363 icode = CODE_FOR_fetchadd_acq_di;
2364 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2368 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2370 if (new_reg != new_dst)
2371 emit_move_insn (new_dst, new_reg);
2376 /* Because of the volatile mem read, we get an ld.acq, which is the
2377 front half of the full barrier. The end half is the cmpxchg.rel. */
2378 gcc_assert (MEM_VOLATILE_P (mem));
2380 old_reg = gen_reg_rtx (DImode);
2381 cmp_reg = gen_reg_rtx (DImode);
2382 label = gen_label_rtx ();
2386 val = simplify_gen_subreg (DImode, val, mode, 0);
2387 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2390 emit_move_insn (cmp_reg, mem);
2394 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2395 emit_move_insn (old_reg, cmp_reg);
2396 emit_move_insn (ar_ccv, cmp_reg);
2399 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2404 new_reg = expand_simple_binop (DImode, AND, new_reg, val, NULL_RTX,
2405 true, OPTAB_DIRECT);
2406 new_reg = expand_simple_unop (DImode, code, new_reg, NULL_RTX, true);
2409 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2410 true, OPTAB_DIRECT);
2413 new_reg = gen_lowpart (mode, new_reg);
2415 emit_move_insn (new_dst, new_reg);
2419 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2420 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2421 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2422 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2427 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2429 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2432 /* Begin the assembly file. */
2435 ia64_file_start (void)
2437 /* Variable tracking should be run after all optimizations which change order
2438 of insns. It also needs a valid CFG. This can't be done in
2439 ia64_option_override, because flag_var_tracking is finalized after
2441 ia64_flag_var_tracking = flag_var_tracking;
2442 flag_var_tracking = 0;
2444 default_file_start ();
2445 emit_safe_across_calls ();
2449 emit_safe_across_calls (void)
2451 unsigned int rs, re;
2458 while (rs < 64 && call_used_regs[PR_REG (rs)])
2462 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2466 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2470 fputc (',', asm_out_file);
2472 fprintf (asm_out_file, "p%u", rs);
2474 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2478 fputc ('\n', asm_out_file);
2481 /* Globalize a declaration. */
2484 ia64_globalize_decl_name (FILE * stream, tree decl)
2486 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2487 tree version_attr = lookup_attribute ("version_id", DECL_ATTRIBUTES (decl));
2490 tree v = TREE_VALUE (TREE_VALUE (version_attr));
2491 const char *p = TREE_STRING_POINTER (v);
2492 fprintf (stream, "\t.alias %s#, \"%s{%s}\"\n", name, name, p);
2494 targetm.asm_out.globalize_label (stream, name);
2495 if (TREE_CODE (decl) == FUNCTION_DECL)
2496 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "function");
2499 /* Helper function for ia64_compute_frame_size: find an appropriate general
2500 register to spill some special register to. SPECIAL_SPILL_MASK contains
2501 bits in GR0 to GR31 that have already been allocated by this routine.
2502 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2505 find_gr_spill (enum ia64_frame_regs r, int try_locals)
2509 if (emitted_frame_related_regs[r] != 0)
2511 regno = emitted_frame_related_regs[r];
2512 if (regno >= LOC_REG (0) && regno < LOC_REG (80 - frame_pointer_needed)
2513 && current_frame_info.n_local_regs < regno - LOC_REG (0) + 1)
2514 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2515 else if (current_function_is_leaf
2516 && regno >= GR_REG (1) && regno <= GR_REG (31))
2517 current_frame_info.gr_used_mask |= 1 << regno;
2522 /* If this is a leaf function, first try an otherwise unused
2523 call-clobbered register. */
2524 if (current_function_is_leaf)
2526 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2527 if (! df_regs_ever_live_p (regno)
2528 && call_used_regs[regno]
2529 && ! fixed_regs[regno]
2530 && ! global_regs[regno]
2531 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0
2532 && ! is_emitted (regno))
2534 current_frame_info.gr_used_mask |= 1 << regno;
2541 regno = current_frame_info.n_local_regs;
2542 /* If there is a frame pointer, then we can't use loc79, because
2543 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2544 reg_name switching code in ia64_expand_prologue. */
2545 while (regno < (80 - frame_pointer_needed))
2546 if (! is_emitted (LOC_REG (regno++)))
2548 current_frame_info.n_local_regs = regno;
2549 return LOC_REG (regno - 1);
2553 /* Failed to find a general register to spill to. Must use stack. */
2557 /* In order to make for nice schedules, we try to allocate every temporary
2558 to a different register. We must of course stay away from call-saved,
2559 fixed, and global registers. We must also stay away from registers
2560 allocated in current_frame_info.gr_used_mask, since those include regs
2561 used all through the prologue.
2563 Any register allocated here must be used immediately. The idea is to
2564 aid scheduling, not to solve data flow problems. */
2566 static int last_scratch_gr_reg;
2569 next_scratch_gr_reg (void)
2573 for (i = 0; i < 32; ++i)
2575 regno = (last_scratch_gr_reg + i + 1) & 31;
2576 if (call_used_regs[regno]
2577 && ! fixed_regs[regno]
2578 && ! global_regs[regno]
2579 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2581 last_scratch_gr_reg = regno;
2586 /* There must be _something_ available. */
2590 /* Helper function for ia64_compute_frame_size, called through
2591 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2594 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2596 unsigned int regno = REGNO (reg);
2599 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2600 for (i = 0; i < n; ++i)
2601 current_frame_info.gr_used_mask |= 1 << (regno + i);
2606 /* Returns the number of bytes offset between the frame pointer and the stack
2607 pointer for the current function. SIZE is the number of bytes of space
2608 needed for local variables. */
2611 ia64_compute_frame_size (HOST_WIDE_INT size)
2613 HOST_WIDE_INT total_size;
2614 HOST_WIDE_INT spill_size = 0;
2615 HOST_WIDE_INT extra_spill_size = 0;
2616 HOST_WIDE_INT pretend_args_size;
2619 int spilled_gr_p = 0;
2620 int spilled_fr_p = 0;
2626 if (current_frame_info.initialized)
2629 memset (¤t_frame_info, 0, sizeof current_frame_info);
2630 CLEAR_HARD_REG_SET (mask);
2632 /* Don't allocate scratches to the return register. */
2633 diddle_return_value (mark_reg_gr_used_mask, NULL);
2635 /* Don't allocate scratches to the EH scratch registers. */
2636 if (cfun->machine->ia64_eh_epilogue_sp)
2637 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2638 if (cfun->machine->ia64_eh_epilogue_bsp)
2639 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2641 /* Find the size of the register stack frame. We have only 80 local
2642 registers, because we reserve 8 for the inputs and 8 for the
2645 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2646 since we'll be adjusting that down later. */
2647 regno = LOC_REG (78) + ! frame_pointer_needed;
2648 for (; regno >= LOC_REG (0); regno--)
2649 if (df_regs_ever_live_p (regno) && !is_emitted (regno))
2651 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2653 /* For functions marked with the syscall_linkage attribute, we must mark
2654 all eight input registers as in use, so that locals aren't visible to
2657 if (cfun->machine->n_varargs > 0
2658 || lookup_attribute ("syscall_linkage",
2659 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2660 current_frame_info.n_input_regs = 8;
2663 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2664 if (df_regs_ever_live_p (regno))
2666 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2669 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2670 if (df_regs_ever_live_p (regno))
2672 i = regno - OUT_REG (0) + 1;
2674 #ifndef PROFILE_HOOK
2675 /* When -p profiling, we need one output register for the mcount argument.
2676 Likewise for -a profiling for the bb_init_func argument. For -ax
2677 profiling, we need two output registers for the two bb_init_trace_func
2682 current_frame_info.n_output_regs = i;
2684 /* ??? No rotating register support yet. */
2685 current_frame_info.n_rotate_regs = 0;
2687 /* Discover which registers need spilling, and how much room that
2688 will take. Begin with floating point and general registers,
2689 which will always wind up on the stack. */
2691 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2692 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2694 SET_HARD_REG_BIT (mask, regno);
2700 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2701 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2703 SET_HARD_REG_BIT (mask, regno);
2709 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2710 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2712 SET_HARD_REG_BIT (mask, regno);
2717 /* Now come all special registers that might get saved in other
2718 general registers. */
2720 if (frame_pointer_needed)
2722 current_frame_info.r[reg_fp] = find_gr_spill (reg_fp, 1);
2723 /* If we did not get a register, then we take LOC79. This is guaranteed
2724 to be free, even if regs_ever_live is already set, because this is
2725 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2726 as we don't count loc79 above. */
2727 if (current_frame_info.r[reg_fp] == 0)
2729 current_frame_info.r[reg_fp] = LOC_REG (79);
2730 current_frame_info.n_local_regs = LOC_REG (79) - LOC_REG (0) + 1;
2734 if (! current_function_is_leaf)
2736 /* Emit a save of BR0 if we call other functions. Do this even
2737 if this function doesn't return, as EH depends on this to be
2738 able to unwind the stack. */
2739 SET_HARD_REG_BIT (mask, BR_REG (0));
2741 current_frame_info.r[reg_save_b0] = find_gr_spill (reg_save_b0, 1);
2742 if (current_frame_info.r[reg_save_b0] == 0)
2744 extra_spill_size += 8;
2748 /* Similarly for ar.pfs. */
2749 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2750 current_frame_info.r[reg_save_ar_pfs] = find_gr_spill (reg_save_ar_pfs, 1);
2751 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2753 extra_spill_size += 8;
2757 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2758 registers are clobbered, so we fall back to the stack. */
2759 current_frame_info.r[reg_save_gp]
2760 = (cfun->calls_setjmp ? 0 : find_gr_spill (reg_save_gp, 1));
2761 if (current_frame_info.r[reg_save_gp] == 0)
2763 SET_HARD_REG_BIT (mask, GR_REG (1));
2770 if (df_regs_ever_live_p (BR_REG (0)) && ! call_used_regs[BR_REG (0)])
2772 SET_HARD_REG_BIT (mask, BR_REG (0));
2773 extra_spill_size += 8;
2777 if (df_regs_ever_live_p (AR_PFS_REGNUM))
2779 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2780 current_frame_info.r[reg_save_ar_pfs]
2781 = find_gr_spill (reg_save_ar_pfs, 1);
2782 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2784 extra_spill_size += 8;
2790 /* Unwind descriptor hackery: things are most efficient if we allocate
2791 consecutive GR save registers for RP, PFS, FP in that order. However,
2792 it is absolutely critical that FP get the only hard register that's
2793 guaranteed to be free, so we allocated it first. If all three did
2794 happen to be allocated hard regs, and are consecutive, rearrange them
2795 into the preferred order now.
2797 If we have already emitted code for any of those registers,
2798 then it's already too late to change. */
2799 min_regno = MIN (current_frame_info.r[reg_fp],
2800 MIN (current_frame_info.r[reg_save_b0],
2801 current_frame_info.r[reg_save_ar_pfs]));
2802 max_regno = MAX (current_frame_info.r[reg_fp],
2803 MAX (current_frame_info.r[reg_save_b0],
2804 current_frame_info.r[reg_save_ar_pfs]));
2806 && min_regno + 2 == max_regno
2807 && (current_frame_info.r[reg_fp] == min_regno + 1
2808 || current_frame_info.r[reg_save_b0] == min_regno + 1
2809 || current_frame_info.r[reg_save_ar_pfs] == min_regno + 1)
2810 && (emitted_frame_related_regs[reg_save_b0] == 0
2811 || emitted_frame_related_regs[reg_save_b0] == min_regno)
2812 && (emitted_frame_related_regs[reg_save_ar_pfs] == 0
2813 || emitted_frame_related_regs[reg_save_ar_pfs] == min_regno + 1)
2814 && (emitted_frame_related_regs[reg_fp] == 0
2815 || emitted_frame_related_regs[reg_fp] == min_regno + 2))
2817 current_frame_info.r[reg_save_b0] = min_regno;
2818 current_frame_info.r[reg_save_ar_pfs] = min_regno + 1;
2819 current_frame_info.r[reg_fp] = min_regno + 2;
2822 /* See if we need to store the predicate register block. */
2823 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2824 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2826 if (regno <= PR_REG (63))
2828 SET_HARD_REG_BIT (mask, PR_REG (0));
2829 current_frame_info.r[reg_save_pr] = find_gr_spill (reg_save_pr, 1);
2830 if (current_frame_info.r[reg_save_pr] == 0)
2832 extra_spill_size += 8;
2836 /* ??? Mark them all as used so that register renaming and such
2837 are free to use them. */
2838 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2839 df_set_regs_ever_live (regno, true);
2842 /* If we're forced to use st8.spill, we're forced to save and restore
2843 ar.unat as well. The check for existing liveness allows inline asm
2844 to touch ar.unat. */
2845 if (spilled_gr_p || cfun->machine->n_varargs
2846 || df_regs_ever_live_p (AR_UNAT_REGNUM))
2848 df_set_regs_ever_live (AR_UNAT_REGNUM, true);
2849 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2850 current_frame_info.r[reg_save_ar_unat]
2851 = find_gr_spill (reg_save_ar_unat, spill_size == 0);
2852 if (current_frame_info.r[reg_save_ar_unat] == 0)
2854 extra_spill_size += 8;
2859 if (df_regs_ever_live_p (AR_LC_REGNUM))
2861 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2862 current_frame_info.r[reg_save_ar_lc]
2863 = find_gr_spill (reg_save_ar_lc, spill_size == 0);
2864 if (current_frame_info.r[reg_save_ar_lc] == 0)
2866 extra_spill_size += 8;
2871 /* If we have an odd number of words of pretend arguments written to
2872 the stack, then the FR save area will be unaligned. We round the
2873 size of this area up to keep things 16 byte aligned. */
2875 pretend_args_size = IA64_STACK_ALIGN (crtl->args.pretend_args_size);
2877 pretend_args_size = crtl->args.pretend_args_size;
2879 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2880 + crtl->outgoing_args_size);
2881 total_size = IA64_STACK_ALIGN (total_size);
2883 /* We always use the 16-byte scratch area provided by the caller, but
2884 if we are a leaf function, there's no one to which we need to provide
2886 if (current_function_is_leaf)
2887 total_size = MAX (0, total_size - 16);
2889 current_frame_info.total_size = total_size;
2890 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2891 current_frame_info.spill_size = spill_size;
2892 current_frame_info.extra_spill_size = extra_spill_size;
2893 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2894 current_frame_info.n_spilled = n_spilled;
2895 current_frame_info.initialized = reload_completed;
2898 /* Worker function for TARGET_CAN_ELIMINATE. */
2901 ia64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
2903 return (to == BR_REG (0) ? current_function_is_leaf : true);
2906 /* Compute the initial difference between the specified pair of registers. */
2909 ia64_initial_elimination_offset (int from, int to)
2911 HOST_WIDE_INT offset;
2913 ia64_compute_frame_size (get_frame_size ());
2916 case FRAME_POINTER_REGNUM:
2919 case HARD_FRAME_POINTER_REGNUM:
2920 if (current_function_is_leaf)
2921 offset = -current_frame_info.total_size;
2923 offset = -(current_frame_info.total_size
2924 - crtl->outgoing_args_size - 16);
2927 case STACK_POINTER_REGNUM:
2928 if (current_function_is_leaf)
2931 offset = 16 + crtl->outgoing_args_size;
2939 case ARG_POINTER_REGNUM:
2940 /* Arguments start above the 16 byte save area, unless stdarg
2941 in which case we store through the 16 byte save area. */
2944 case HARD_FRAME_POINTER_REGNUM:
2945 offset = 16 - crtl->args.pretend_args_size;
2948 case STACK_POINTER_REGNUM:
2949 offset = (current_frame_info.total_size
2950 + 16 - crtl->args.pretend_args_size);
2965 /* If there are more than a trivial number of register spills, we use
2966 two interleaved iterators so that we can get two memory references
2969 In order to simplify things in the prologue and epilogue expanders,
2970 we use helper functions to fix up the memory references after the
2971 fact with the appropriate offsets to a POST_MODIFY memory mode.
2972 The following data structure tracks the state of the two iterators
2973 while insns are being emitted. */
2975 struct spill_fill_data
2977 rtx init_after; /* point at which to emit initializations */
2978 rtx init_reg[2]; /* initial base register */
2979 rtx iter_reg[2]; /* the iterator registers */
2980 rtx *prev_addr[2]; /* address of last memory use */
2981 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2982 HOST_WIDE_INT prev_off[2]; /* last offset */
2983 int n_iter; /* number of iterators in use */
2984 int next_iter; /* next iterator to use */
2985 unsigned int save_gr_used_mask;
2988 static struct spill_fill_data spill_fill_data;
2991 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2995 spill_fill_data.init_after = get_last_insn ();
2996 spill_fill_data.init_reg[0] = init_reg;
2997 spill_fill_data.init_reg[1] = init_reg;
2998 spill_fill_data.prev_addr[0] = NULL;
2999 spill_fill_data.prev_addr[1] = NULL;
3000 spill_fill_data.prev_insn[0] = NULL;
3001 spill_fill_data.prev_insn[1] = NULL;
3002 spill_fill_data.prev_off[0] = cfa_off;
3003 spill_fill_data.prev_off[1] = cfa_off;
3004 spill_fill_data.next_iter = 0;
3005 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
3007 spill_fill_data.n_iter = 1 + (n_spills > 2);
3008 for (i = 0; i < spill_fill_data.n_iter; ++i)
3010 int regno = next_scratch_gr_reg ();
3011 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
3012 current_frame_info.gr_used_mask |= 1 << regno;
3017 finish_spill_pointers (void)
3019 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
3023 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
3025 int iter = spill_fill_data.next_iter;
3026 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
3027 rtx disp_rtx = GEN_INT (disp);
3030 if (spill_fill_data.prev_addr[iter])
3032 if (satisfies_constraint_N (disp_rtx))
3034 *spill_fill_data.prev_addr[iter]
3035 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
3036 gen_rtx_PLUS (DImode,
3037 spill_fill_data.iter_reg[iter],
3039 add_reg_note (spill_fill_data.prev_insn[iter],
3040 REG_INC, spill_fill_data.iter_reg[iter]);
3044 /* ??? Could use register post_modify for loads. */
3045 if (!satisfies_constraint_I (disp_rtx))
3047 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
3048 emit_move_insn (tmp, disp_rtx);
3051 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
3052 spill_fill_data.iter_reg[iter], disp_rtx));
3055 /* Micro-optimization: if we've created a frame pointer, it's at
3056 CFA 0, which may allow the real iterator to be initialized lower,
3057 slightly increasing parallelism. Also, if there are few saves
3058 it may eliminate the iterator entirely. */
3060 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
3061 && frame_pointer_needed)
3063 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
3064 set_mem_alias_set (mem, get_varargs_alias_set ());
3072 seq = gen_movdi (spill_fill_data.iter_reg[iter],
3073 spill_fill_data.init_reg[iter]);
3078 if (!satisfies_constraint_I (disp_rtx))
3080 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
3081 emit_move_insn (tmp, disp_rtx);
3085 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
3086 spill_fill_data.init_reg[iter],
3093 /* Careful for being the first insn in a sequence. */
3094 if (spill_fill_data.init_after)
3095 insn = emit_insn_after (seq, spill_fill_data.init_after);
3098 rtx first = get_insns ();
3100 insn = emit_insn_before (seq, first);
3102 insn = emit_insn (seq);
3104 spill_fill_data.init_after = insn;
3107 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
3109 /* ??? Not all of the spills are for varargs, but some of them are.
3110 The rest of the spills belong in an alias set of their own. But
3111 it doesn't actually hurt to include them here. */
3112 set_mem_alias_set (mem, get_varargs_alias_set ());
3114 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
3115 spill_fill_data.prev_off[iter] = cfa_off;
3117 if (++iter >= spill_fill_data.n_iter)
3119 spill_fill_data.next_iter = iter;
3125 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
3128 int iter = spill_fill_data.next_iter;
3131 mem = spill_restore_mem (reg, cfa_off);
3132 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
3133 spill_fill_data.prev_insn[iter] = insn;
3140 RTX_FRAME_RELATED_P (insn) = 1;
3142 /* Don't even pretend that the unwind code can intuit its way
3143 through a pair of interleaved post_modify iterators. Just
3144 provide the correct answer. */
3146 if (frame_pointer_needed)
3148 base = hard_frame_pointer_rtx;
3153 base = stack_pointer_rtx;
3154 off = current_frame_info.total_size - cfa_off;
3157 add_reg_note (insn, REG_CFA_OFFSET,
3158 gen_rtx_SET (VOIDmode,
3159 gen_rtx_MEM (GET_MODE (reg),
3160 plus_constant (base, off)),
3166 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
3168 int iter = spill_fill_data.next_iter;
3171 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
3172 GEN_INT (cfa_off)));
3173 spill_fill_data.prev_insn[iter] = insn;
3176 /* Wrapper functions that discards the CONST_INT spill offset. These
3177 exist so that we can give gr_spill/gr_fill the offset they need and
3178 use a consistent function interface. */
3181 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3183 return gen_movdi (dest, src);
3187 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3189 return gen_fr_spill (dest, src);
3193 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
3195 return gen_fr_restore (dest, src);
3198 /* Called after register allocation to add any instructions needed for the
3199 prologue. Using a prologue insn is favored compared to putting all of the
3200 instructions in output_function_prologue(), since it allows the scheduler
3201 to intermix instructions with the saves of the caller saved registers. In
3202 some cases, it might be necessary to emit a barrier instruction as the last
3203 insn to prevent such scheduling.
3205 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
3206 so that the debug info generation code can handle them properly.
3208 The register save area is layed out like so:
3210 [ varargs spill area ]
3211 [ fr register spill area ]
3212 [ br register spill area ]
3213 [ ar register spill area ]
3214 [ pr register spill area ]
3215 [ gr register spill area ] */
3217 /* ??? Get inefficient code when the frame size is larger than can fit in an
3218 adds instruction. */
3221 ia64_expand_prologue (void)
3223 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
3224 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
3227 ia64_compute_frame_size (get_frame_size ());
3228 last_scratch_gr_reg = 15;
3230 if (flag_stack_usage)
3231 current_function_static_stack_size = current_frame_info.total_size;
3235 fprintf (dump_file, "ia64 frame related registers "
3236 "recorded in current_frame_info.r[]:\n");
3237 #define PRINTREG(a) if (current_frame_info.r[a]) \
3238 fprintf(dump_file, "%s = %d\n", #a, current_frame_info.r[a])
3240 PRINTREG(reg_save_b0);
3241 PRINTREG(reg_save_pr);
3242 PRINTREG(reg_save_ar_pfs);
3243 PRINTREG(reg_save_ar_unat);
3244 PRINTREG(reg_save_ar_lc);
3245 PRINTREG(reg_save_gp);
3249 /* If there is no epilogue, then we don't need some prologue insns.
3250 We need to avoid emitting the dead prologue insns, because flow
3251 will complain about them. */
3257 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
3258 if ((e->flags & EDGE_FAKE) == 0
3259 && (e->flags & EDGE_FALLTHRU) != 0)
3261 epilogue_p = (e != NULL);
3266 /* Set the local, input, and output register names. We need to do this
3267 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
3268 half. If we use in/loc/out register names, then we get assembler errors
3269 in crtn.S because there is no alloc insn or regstk directive in there. */
3270 if (! TARGET_REG_NAMES)
3272 int inputs = current_frame_info.n_input_regs;
3273 int locals = current_frame_info.n_local_regs;
3274 int outputs = current_frame_info.n_output_regs;
3276 for (i = 0; i < inputs; i++)
3277 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
3278 for (i = 0; i < locals; i++)
3279 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
3280 for (i = 0; i < outputs; i++)
3281 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
3284 /* Set the frame pointer register name. The regnum is logically loc79,
3285 but of course we'll not have allocated that many locals. Rather than
3286 worrying about renumbering the existing rtxs, we adjust the name. */
3287 /* ??? This code means that we can never use one local register when
3288 there is a frame pointer. loc79 gets wasted in this case, as it is
3289 renamed to a register that will never be used. See also the try_locals
3290 code in find_gr_spill. */
3291 if (current_frame_info.r[reg_fp])
3293 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3294 reg_names[HARD_FRAME_POINTER_REGNUM]
3295 = reg_names[current_frame_info.r[reg_fp]];
3296 reg_names[current_frame_info.r[reg_fp]] = tmp;
3299 /* We don't need an alloc instruction if we've used no outputs or locals. */
3300 if (current_frame_info.n_local_regs == 0
3301 && current_frame_info.n_output_regs == 0
3302 && current_frame_info.n_input_regs <= crtl->args.info.int_regs
3303 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3305 /* If there is no alloc, but there are input registers used, then we
3306 need a .regstk directive. */
3307 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
3308 ar_pfs_save_reg = NULL_RTX;
3312 current_frame_info.need_regstk = 0;
3314 if (current_frame_info.r[reg_save_ar_pfs])
3316 regno = current_frame_info.r[reg_save_ar_pfs];
3317 reg_emitted (reg_save_ar_pfs);
3320 regno = next_scratch_gr_reg ();
3321 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
3323 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
3324 GEN_INT (current_frame_info.n_input_regs),
3325 GEN_INT (current_frame_info.n_local_regs),
3326 GEN_INT (current_frame_info.n_output_regs),
3327 GEN_INT (current_frame_info.n_rotate_regs)));
3328 RTX_FRAME_RELATED_P (insn) = (current_frame_info.r[reg_save_ar_pfs] != 0);
3331 /* Set up frame pointer, stack pointer, and spill iterators. */
3333 n_varargs = cfun->machine->n_varargs;
3334 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
3335 stack_pointer_rtx, 0);
3337 if (frame_pointer_needed)
3339 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3340 RTX_FRAME_RELATED_P (insn) = 1;
3342 /* Force the unwind info to recognize this as defining a new CFA,
3343 rather than some temp register setup. */
3344 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL_RTX);
3347 if (current_frame_info.total_size != 0)
3349 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
3352 if (satisfies_constraint_I (frame_size_rtx))
3353 offset = frame_size_rtx;
3356 regno = next_scratch_gr_reg ();
3357 offset = gen_rtx_REG (DImode, regno);
3358 emit_move_insn (offset, frame_size_rtx);
3361 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
3362 stack_pointer_rtx, offset));
3364 if (! frame_pointer_needed)
3366 RTX_FRAME_RELATED_P (insn) = 1;
3367 add_reg_note (insn, REG_CFA_ADJUST_CFA,
3368 gen_rtx_SET (VOIDmode,
3370 gen_rtx_PLUS (DImode,
3375 /* ??? At this point we must generate a magic insn that appears to
3376 modify the stack pointer, the frame pointer, and all spill
3377 iterators. This would allow the most scheduling freedom. For
3378 now, just hard stop. */
3379 emit_insn (gen_blockage ());
3382 /* Must copy out ar.unat before doing any integer spills. */
3383 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3385 if (current_frame_info.r[reg_save_ar_unat])
3388 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3389 reg_emitted (reg_save_ar_unat);
3393 alt_regno = next_scratch_gr_reg ();
3394 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3395 current_frame_info.gr_used_mask |= 1 << alt_regno;
3398 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3399 insn = emit_move_insn (ar_unat_save_reg, reg);
3400 if (current_frame_info.r[reg_save_ar_unat])
3402 RTX_FRAME_RELATED_P (insn) = 1;
3403 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3406 /* Even if we're not going to generate an epilogue, we still
3407 need to save the register so that EH works. */
3408 if (! epilogue_p && current_frame_info.r[reg_save_ar_unat])
3409 emit_insn (gen_prologue_use (ar_unat_save_reg));
3412 ar_unat_save_reg = NULL_RTX;
3414 /* Spill all varargs registers. Do this before spilling any GR registers,
3415 since we want the UNAT bits for the GR registers to override the UNAT
3416 bits from varargs, which we don't care about. */
3419 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3421 reg = gen_rtx_REG (DImode, regno);
3422 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3425 /* Locate the bottom of the register save area. */
3426 cfa_off = (current_frame_info.spill_cfa_off
3427 + current_frame_info.spill_size
3428 + current_frame_info.extra_spill_size);
3430 /* Save the predicate register block either in a register or in memory. */
3431 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3433 reg = gen_rtx_REG (DImode, PR_REG (0));
3434 if (current_frame_info.r[reg_save_pr] != 0)
3436 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3437 reg_emitted (reg_save_pr);
3438 insn = emit_move_insn (alt_reg, reg);
3440 /* ??? Denote pr spill/fill by a DImode move that modifies all
3441 64 hard registers. */
3442 RTX_FRAME_RELATED_P (insn) = 1;
3443 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3445 /* Even if we're not going to generate an epilogue, we still
3446 need to save the register so that EH works. */
3448 emit_insn (gen_prologue_use (alt_reg));
3452 alt_regno = next_scratch_gr_reg ();
3453 alt_reg = gen_rtx_REG (DImode, alt_regno);
3454 insn = emit_move_insn (alt_reg, reg);
3455 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3460 /* Handle AR regs in numerical order. All of them get special handling. */
3461 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3462 && current_frame_info.r[reg_save_ar_unat] == 0)
3464 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3465 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3469 /* The alloc insn already copied ar.pfs into a general register. The
3470 only thing we have to do now is copy that register to a stack slot
3471 if we'd not allocated a local register for the job. */
3472 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3473 && current_frame_info.r[reg_save_ar_pfs] == 0)
3475 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3476 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3480 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3482 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3483 if (current_frame_info.r[reg_save_ar_lc] != 0)
3485 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3486 reg_emitted (reg_save_ar_lc);
3487 insn = emit_move_insn (alt_reg, reg);
3488 RTX_FRAME_RELATED_P (insn) = 1;
3489 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3491 /* Even if we're not going to generate an epilogue, we still
3492 need to save the register so that EH works. */
3494 emit_insn (gen_prologue_use (alt_reg));
3498 alt_regno = next_scratch_gr_reg ();
3499 alt_reg = gen_rtx_REG (DImode, alt_regno);
3500 emit_move_insn (alt_reg, reg);
3501 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3506 /* Save the return pointer. */
3507 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3509 reg = gen_rtx_REG (DImode, BR_REG (0));
3510 if (current_frame_info.r[reg_save_b0] != 0)
3512 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3513 reg_emitted (reg_save_b0);
3514 insn = emit_move_insn (alt_reg, reg);
3515 RTX_FRAME_RELATED_P (insn) = 1;
3516 add_reg_note (insn, REG_CFA_REGISTER, NULL_RTX);
3518 /* Even if we're not going to generate an epilogue, we still
3519 need to save the register so that EH works. */
3521 emit_insn (gen_prologue_use (alt_reg));
3525 alt_regno = next_scratch_gr_reg ();
3526 alt_reg = gen_rtx_REG (DImode, alt_regno);
3527 emit_move_insn (alt_reg, reg);
3528 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3533 if (current_frame_info.r[reg_save_gp])
3535 reg_emitted (reg_save_gp);
3536 insn = emit_move_insn (gen_rtx_REG (DImode,
3537 current_frame_info.r[reg_save_gp]),
3538 pic_offset_table_rtx);
3541 /* We should now be at the base of the gr/br/fr spill area. */
3542 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3543 + current_frame_info.spill_size));
3545 /* Spill all general registers. */
3546 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3547 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3549 reg = gen_rtx_REG (DImode, regno);
3550 do_spill (gen_gr_spill, reg, cfa_off, reg);
3554 /* Spill the rest of the BR registers. */
3555 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3556 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3558 alt_regno = next_scratch_gr_reg ();
3559 alt_reg = gen_rtx_REG (DImode, alt_regno);
3560 reg = gen_rtx_REG (DImode, regno);
3561 emit_move_insn (alt_reg, reg);
3562 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3566 /* Align the frame and spill all FR registers. */
3567 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3568 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3570 gcc_assert (!(cfa_off & 15));
3571 reg = gen_rtx_REG (XFmode, regno);
3572 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3576 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3578 finish_spill_pointers ();
3581 /* Output the textual info surrounding the prologue. */
3584 ia64_start_function (FILE *file, const char *fnname,
3585 tree decl ATTRIBUTE_UNUSED)
3587 #if VMS_DEBUGGING_INFO
3589 && strncmp (vms_debug_main, fnname, strlen (vms_debug_main)) == 0)
3591 targetm.asm_out.globalize_label (asm_out_file, VMS_DEBUG_MAIN_POINTER);
3592 ASM_OUTPUT_DEF (asm_out_file, VMS_DEBUG_MAIN_POINTER, fnname);
3593 dwarf2out_vms_debug_main_pointer ();
3598 fputs ("\t.proc ", file);
3599 assemble_name (file, fnname);
3601 ASM_OUTPUT_LABEL (file, fnname);
3604 /* Called after register allocation to add any instructions needed for the
3605 epilogue. Using an epilogue insn is favored compared to putting all of the
3606 instructions in output_function_prologue(), since it allows the scheduler
3607 to intermix instructions with the saves of the caller saved registers. In
3608 some cases, it might be necessary to emit a barrier instruction as the last
3609 insn to prevent such scheduling. */
3612 ia64_expand_epilogue (int sibcall_p)
3614 rtx insn, reg, alt_reg, ar_unat_save_reg;
3615 int regno, alt_regno, cfa_off;
3617 ia64_compute_frame_size (get_frame_size ());
3619 /* If there is a frame pointer, then we use it instead of the stack
3620 pointer, so that the stack pointer does not need to be valid when
3621 the epilogue starts. See EXIT_IGNORE_STACK. */
3622 if (frame_pointer_needed)
3623 setup_spill_pointers (current_frame_info.n_spilled,
3624 hard_frame_pointer_rtx, 0);
3626 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3627 current_frame_info.total_size);
3629 if (current_frame_info.total_size != 0)
3631 /* ??? At this point we must generate a magic insn that appears to
3632 modify the spill iterators and the frame pointer. This would
3633 allow the most scheduling freedom. For now, just hard stop. */
3634 emit_insn (gen_blockage ());
3637 /* Locate the bottom of the register save area. */
3638 cfa_off = (current_frame_info.spill_cfa_off
3639 + current_frame_info.spill_size
3640 + current_frame_info.extra_spill_size);
3642 /* Restore the predicate registers. */
3643 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3645 if (current_frame_info.r[reg_save_pr] != 0)
3647 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3648 reg_emitted (reg_save_pr);
3652 alt_regno = next_scratch_gr_reg ();
3653 alt_reg = gen_rtx_REG (DImode, alt_regno);
3654 do_restore (gen_movdi_x, alt_reg, cfa_off);
3657 reg = gen_rtx_REG (DImode, PR_REG (0));
3658 emit_move_insn (reg, alt_reg);
3661 /* Restore the application registers. */
3663 /* Load the saved unat from the stack, but do not restore it until
3664 after the GRs have been restored. */
3665 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3667 if (current_frame_info.r[reg_save_ar_unat] != 0)
3670 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3671 reg_emitted (reg_save_ar_unat);
3675 alt_regno = next_scratch_gr_reg ();
3676 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3677 current_frame_info.gr_used_mask |= 1 << alt_regno;
3678 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3683 ar_unat_save_reg = NULL_RTX;
3685 if (current_frame_info.r[reg_save_ar_pfs] != 0)
3687 reg_emitted (reg_save_ar_pfs);
3688 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_pfs]);
3689 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3690 emit_move_insn (reg, alt_reg);
3692 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3694 alt_regno = next_scratch_gr_reg ();
3695 alt_reg = gen_rtx_REG (DImode, alt_regno);
3696 do_restore (gen_movdi_x, alt_reg, cfa_off);
3698 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3699 emit_move_insn (reg, alt_reg);
3702 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3704 if (current_frame_info.r[reg_save_ar_lc] != 0)
3706 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3707 reg_emitted (reg_save_ar_lc);
3711 alt_regno = next_scratch_gr_reg ();
3712 alt_reg = gen_rtx_REG (DImode, alt_regno);
3713 do_restore (gen_movdi_x, alt_reg, cfa_off);
3716 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3717 emit_move_insn (reg, alt_reg);
3720 /* Restore the return pointer. */
3721 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3723 if (current_frame_info.r[reg_save_b0] != 0)
3725 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3726 reg_emitted (reg_save_b0);
3730 alt_regno = next_scratch_gr_reg ();
3731 alt_reg = gen_rtx_REG (DImode, alt_regno);
3732 do_restore (gen_movdi_x, alt_reg, cfa_off);
3735 reg = gen_rtx_REG (DImode, BR_REG (0));
3736 emit_move_insn (reg, alt_reg);
3739 /* We should now be at the base of the gr/br/fr spill area. */
3740 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3741 + current_frame_info.spill_size));
3743 /* The GP may be stored on the stack in the prologue, but it's
3744 never restored in the epilogue. Skip the stack slot. */
3745 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3748 /* Restore all general registers. */
3749 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3750 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3752 reg = gen_rtx_REG (DImode, regno);
3753 do_restore (gen_gr_restore, reg, cfa_off);
3757 /* Restore the branch registers. */
3758 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3759 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3761 alt_regno = next_scratch_gr_reg ();
3762 alt_reg = gen_rtx_REG (DImode, alt_regno);
3763 do_restore (gen_movdi_x, alt_reg, cfa_off);
3765 reg = gen_rtx_REG (DImode, regno);
3766 emit_move_insn (reg, alt_reg);
3769 /* Restore floating point registers. */
3770 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3771 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3773 gcc_assert (!(cfa_off & 15));
3774 reg = gen_rtx_REG (XFmode, regno);
3775 do_restore (gen_fr_restore_x, reg, cfa_off);
3779 /* Restore ar.unat for real. */
3780 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3782 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3783 emit_move_insn (reg, ar_unat_save_reg);
3786 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3788 finish_spill_pointers ();
3790 if (current_frame_info.total_size
3791 || cfun->machine->ia64_eh_epilogue_sp
3792 || frame_pointer_needed)
3794 /* ??? At this point we must generate a magic insn that appears to
3795 modify the spill iterators, the stack pointer, and the frame
3796 pointer. This would allow the most scheduling freedom. For now,
3798 emit_insn (gen_blockage ());
3801 if (cfun->machine->ia64_eh_epilogue_sp)
3802 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
3803 else if (frame_pointer_needed)
3805 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
3806 RTX_FRAME_RELATED_P (insn) = 1;
3807 add_reg_note (insn, REG_CFA_ADJUST_CFA, NULL);
3809 else if (current_frame_info.total_size)
3811 rtx offset, frame_size_rtx;
3813 frame_size_rtx = GEN_INT (current_frame_info.total_size);
3814 if (satisfies_constraint_I (frame_size_rtx))
3815 offset = frame_size_rtx;
3818 regno = next_scratch_gr_reg ();
3819 offset = gen_rtx_REG (DImode, regno);
3820 emit_move_insn (offset, frame_size_rtx);
3823 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3826 RTX_FRAME_RELATED_P (insn) = 1;
3827 add_reg_note (insn, REG_CFA_ADJUST_CFA,
3828 gen_rtx_SET (VOIDmode,
3830 gen_rtx_PLUS (DImode,
3835 if (cfun->machine->ia64_eh_epilogue_bsp)
3836 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3839 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
3842 int fp = GR_REG (2);
3843 /* We need a throw away register here, r0 and r1 are reserved,
3844 so r2 is the first available call clobbered register. If
3845 there was a frame_pointer register, we may have swapped the
3846 names of r2 and HARD_FRAME_POINTER_REGNUM, so we have to make
3847 sure we're using the string "r2" when emitting the register
3848 name for the assembler. */
3849 if (current_frame_info.r[reg_fp]
3850 && current_frame_info.r[reg_fp] == GR_REG (2))
3851 fp = HARD_FRAME_POINTER_REGNUM;
3853 /* We must emit an alloc to force the input registers to become output
3854 registers. Otherwise, if the callee tries to pass its parameters
3855 through to another call without an intervening alloc, then these
3857 /* ??? We don't need to preserve all input registers. We only need to
3858 preserve those input registers used as arguments to the sibling call.
3859 It is unclear how to compute that number here. */
3860 if (current_frame_info.n_input_regs != 0)
3862 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
3863 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
3864 const0_rtx, const0_rtx,
3865 n_inputs, const0_rtx));
3866 RTX_FRAME_RELATED_P (insn) = 1;
3871 /* Return 1 if br.ret can do all the work required to return from a
3875 ia64_direct_return (void)
3877 if (reload_completed && ! frame_pointer_needed)
3879 ia64_compute_frame_size (get_frame_size ());
3881 return (current_frame_info.total_size == 0
3882 && current_frame_info.n_spilled == 0
3883 && current_frame_info.r[reg_save_b0] == 0
3884 && current_frame_info.r[reg_save_pr] == 0
3885 && current_frame_info.r[reg_save_ar_pfs] == 0
3886 && current_frame_info.r[reg_save_ar_unat] == 0
3887 && current_frame_info.r[reg_save_ar_lc] == 0);
3892 /* Return the magic cookie that we use to hold the return address
3893 during early compilation. */
3896 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3900 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3903 /* Split this value after reload, now that we know where the return
3904 address is saved. */
3907 ia64_split_return_addr_rtx (rtx dest)
3911 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3913 if (current_frame_info.r[reg_save_b0] != 0)
3915 src = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3916 reg_emitted (reg_save_b0);
3924 /* Compute offset from CFA for BR0. */
3925 /* ??? Must be kept in sync with ia64_expand_prologue. */
3926 off = (current_frame_info.spill_cfa_off
3927 + current_frame_info.spill_size);
3928 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3929 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3932 /* Convert CFA offset to a register based offset. */
3933 if (frame_pointer_needed)
3934 src = hard_frame_pointer_rtx;
3937 src = stack_pointer_rtx;
3938 off += current_frame_info.total_size;
3941 /* Load address into scratch register. */
3942 off_r = GEN_INT (off);
3943 if (satisfies_constraint_I (off_r))
3944 emit_insn (gen_adddi3 (dest, src, off_r));
3947 emit_move_insn (dest, off_r);
3948 emit_insn (gen_adddi3 (dest, src, dest));
3951 src = gen_rtx_MEM (Pmode, dest);
3955 src = gen_rtx_REG (DImode, BR_REG (0));
3957 emit_move_insn (dest, src);
3961 ia64_hard_regno_rename_ok (int from, int to)
3963 /* Don't clobber any of the registers we reserved for the prologue. */
3966 for (r = reg_fp; r <= reg_save_ar_lc; r++)
3967 if (to == current_frame_info.r[r]
3968 || from == current_frame_info.r[r]
3969 || to == emitted_frame_related_regs[r]
3970 || from == emitted_frame_related_regs[r])
3973 /* Don't use output registers outside the register frame. */
3974 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3977 /* Retain even/oddness on predicate register pairs. */
3978 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3979 return (from & 1) == (to & 1);
3984 /* Target hook for assembling integer objects. Handle word-sized
3985 aligned objects and detect the cases when @fptr is needed. */
3988 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3990 if (size == POINTER_SIZE / BITS_PER_UNIT
3991 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3992 && GET_CODE (x) == SYMBOL_REF
3993 && SYMBOL_REF_FUNCTION_P (x))
3995 static const char * const directive[2][2] = {
3996 /* 64-bit pointer */ /* 32-bit pointer */
3997 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
3998 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
4000 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
4001 output_addr_const (asm_out_file, x);
4002 fputs (")\n", asm_out_file);
4005 return default_assemble_integer (x, size, aligned_p);
4008 /* Emit the function prologue. */
4011 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4013 int mask, grsave, grsave_prev;
4015 if (current_frame_info.need_regstk)
4016 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
4017 current_frame_info.n_input_regs,
4018 current_frame_info.n_local_regs,
4019 current_frame_info.n_output_regs,
4020 current_frame_info.n_rotate_regs);
4022 if (ia64_except_unwind_info (&global_options) != UI_TARGET)
4025 /* Emit the .prologue directive. */
4028 grsave = grsave_prev = 0;
4029 if (current_frame_info.r[reg_save_b0] != 0)
4032 grsave = grsave_prev = current_frame_info.r[reg_save_b0];
4034 if (current_frame_info.r[reg_save_ar_pfs] != 0
4035 && (grsave_prev == 0
4036 || current_frame_info.r[reg_save_ar_pfs] == grsave_prev + 1))
4039 if (grsave_prev == 0)
4040 grsave = current_frame_info.r[reg_save_ar_pfs];
4041 grsave_prev = current_frame_info.r[reg_save_ar_pfs];
4043 if (current_frame_info.r[reg_fp] != 0
4044 && (grsave_prev == 0
4045 || current_frame_info.r[reg_fp] == grsave_prev + 1))
4048 if (grsave_prev == 0)
4049 grsave = HARD_FRAME_POINTER_REGNUM;
4050 grsave_prev = current_frame_info.r[reg_fp];
4052 if (current_frame_info.r[reg_save_pr] != 0
4053 && (grsave_prev == 0
4054 || current_frame_info.r[reg_save_pr] == grsave_prev + 1))
4057 if (grsave_prev == 0)
4058 grsave = current_frame_info.r[reg_save_pr];
4061 if (mask && TARGET_GNU_AS)
4062 fprintf (file, "\t.prologue %d, %d\n", mask,
4063 ia64_dbx_register_number (grsave));
4065 fputs ("\t.prologue\n", file);
4067 /* Emit a .spill directive, if necessary, to relocate the base of
4068 the register spill area. */
4069 if (current_frame_info.spill_cfa_off != -16)
4070 fprintf (file, "\t.spill %ld\n",
4071 (long) (current_frame_info.spill_cfa_off
4072 + current_frame_info.spill_size));
4075 /* Emit the .body directive at the scheduled end of the prologue. */
4078 ia64_output_function_end_prologue (FILE *file)
4080 if (ia64_except_unwind_info (&global_options) != UI_TARGET)
4083 fputs ("\t.body\n", file);
4086 /* Emit the function epilogue. */
4089 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
4090 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4094 if (current_frame_info.r[reg_fp])
4096 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
4097 reg_names[HARD_FRAME_POINTER_REGNUM]
4098 = reg_names[current_frame_info.r[reg_fp]];
4099 reg_names[current_frame_info.r[reg_fp]] = tmp;
4100 reg_emitted (reg_fp);
4102 if (! TARGET_REG_NAMES)
4104 for (i = 0; i < current_frame_info.n_input_regs; i++)
4105 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
4106 for (i = 0; i < current_frame_info.n_local_regs; i++)
4107 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
4108 for (i = 0; i < current_frame_info.n_output_regs; i++)
4109 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
4112 current_frame_info.initialized = 0;
4116 ia64_dbx_register_number (int regno)
4118 /* In ia64_expand_prologue we quite literally renamed the frame pointer
4119 from its home at loc79 to something inside the register frame. We
4120 must perform the same renumbering here for the debug info. */
4121 if (current_frame_info.r[reg_fp])
4123 if (regno == HARD_FRAME_POINTER_REGNUM)
4124 regno = current_frame_info.r[reg_fp];
4125 else if (regno == current_frame_info.r[reg_fp])
4126 regno = HARD_FRAME_POINTER_REGNUM;
4129 if (IN_REGNO_P (regno))
4130 return 32 + regno - IN_REG (0);
4131 else if (LOC_REGNO_P (regno))
4132 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
4133 else if (OUT_REGNO_P (regno))
4134 return (32 + current_frame_info.n_input_regs
4135 + current_frame_info.n_local_regs + regno - OUT_REG (0));
4140 /* Implement TARGET_TRAMPOLINE_INIT.
4142 The trampoline should set the static chain pointer to value placed
4143 into the trampoline and should branch to the specified routine.
4144 To make the normal indirect-subroutine calling convention work,
4145 the trampoline must look like a function descriptor; the first
4146 word being the target address and the second being the target's
4149 We abuse the concept of a global pointer by arranging for it
4150 to point to the data we need to load. The complete trampoline
4151 has the following form:
4153 +-------------------+ \
4154 TRAMP: | __ia64_trampoline | |
4155 +-------------------+ > fake function descriptor
4157 +-------------------+ /
4158 | target descriptor |
4159 +-------------------+
4161 +-------------------+
4165 ia64_trampoline_init (rtx m_tramp, tree fndecl, rtx static_chain)
4167 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
4168 rtx addr, addr_reg, tramp, eight = GEN_INT (8);
4170 /* The Intel assembler requires that the global __ia64_trampoline symbol
4171 be declared explicitly */
4174 static bool declared_ia64_trampoline = false;
4176 if (!declared_ia64_trampoline)
4178 declared_ia64_trampoline = true;
4179 (*targetm.asm_out.globalize_label) (asm_out_file,
4180 "__ia64_trampoline");
4184 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
4185 addr = convert_memory_address (Pmode, XEXP (m_tramp, 0));
4186 fnaddr = convert_memory_address (Pmode, fnaddr);
4187 static_chain = convert_memory_address (Pmode, static_chain);
4189 /* Load up our iterator. */
4190 addr_reg = copy_to_reg (addr);
4191 m_tramp = adjust_automodify_address (m_tramp, Pmode, addr_reg, 0);
4193 /* The first two words are the fake descriptor:
4194 __ia64_trampoline, ADDR+16. */
4195 tramp = gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline");
4196 if (TARGET_ABI_OPEN_VMS)
4198 /* HP decided to break the ELF ABI on VMS (to deal with an ambiguity
4199 in the Macro-32 compiler) and changed the semantics of the LTOFF22
4200 relocation against function symbols to make it identical to the
4201 LTOFF_FPTR22 relocation. Emit the latter directly to stay within
4202 strict ELF and dereference to get the bare code address. */
4203 rtx reg = gen_reg_rtx (Pmode);
4204 SYMBOL_REF_FLAGS (tramp) |= SYMBOL_FLAG_FUNCTION;
4205 emit_move_insn (reg, tramp);
4206 emit_move_insn (reg, gen_rtx_MEM (Pmode, reg));
4209 emit_move_insn (m_tramp, tramp);
4210 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4211 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4213 emit_move_insn (m_tramp, force_reg (Pmode, plus_constant (addr, 16)));
4214 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4215 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4217 /* The third word is the target descriptor. */
4218 emit_move_insn (m_tramp, force_reg (Pmode, fnaddr));
4219 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
4220 m_tramp = adjust_automodify_address (m_tramp, VOIDmode, NULL, 8);
4222 /* The fourth word is the static chain. */
4223 emit_move_insn (m_tramp, static_chain);
4226 /* Do any needed setup for a variadic function. CUM has not been updated
4227 for the last named argument which has type TYPE and mode MODE.
4229 We generate the actual spill instructions during prologue generation. */
4232 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4233 tree type, int * pretend_size,
4234 int second_time ATTRIBUTE_UNUSED)
4236 CUMULATIVE_ARGS next_cum = *cum;
4238 /* Skip the current argument. */
4239 ia64_function_arg_advance (&next_cum, mode, type, 1);
4241 if (next_cum.words < MAX_ARGUMENT_SLOTS)
4243 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
4244 *pretend_size = n * UNITS_PER_WORD;
4245 cfun->machine->n_varargs = n;
4249 /* Check whether TYPE is a homogeneous floating point aggregate. If
4250 it is, return the mode of the floating point type that appears
4251 in all leafs. If it is not, return VOIDmode.
4253 An aggregate is a homogeneous floating point aggregate is if all
4254 fields/elements in it have the same floating point type (e.g,
4255 SFmode). 128-bit quad-precision floats are excluded.
4257 Variable sized aggregates should never arrive here, since we should
4258 have already decided to pass them by reference. Top-level zero-sized
4259 aggregates are excluded because our parallels crash the middle-end. */
4261 static enum machine_mode
4262 hfa_element_mode (const_tree type, bool nested)
4264 enum machine_mode element_mode = VOIDmode;
4265 enum machine_mode mode;
4266 enum tree_code code = TREE_CODE (type);
4267 int know_element_mode = 0;
4270 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
4275 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
4276 case BOOLEAN_TYPE: case POINTER_TYPE:
4277 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
4278 case LANG_TYPE: case FUNCTION_TYPE:
4281 /* Fortran complex types are supposed to be HFAs, so we need to handle
4282 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
4285 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
4286 && TYPE_MODE (type) != TCmode)
4287 return GET_MODE_INNER (TYPE_MODE (type));
4292 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
4293 mode if this is contained within an aggregate. */
4294 if (nested && TYPE_MODE (type) != TFmode)
4295 return TYPE_MODE (type);
4300 return hfa_element_mode (TREE_TYPE (type), 1);
4304 case QUAL_UNION_TYPE:
4305 for (t = TYPE_FIELDS (type); t; t = DECL_CHAIN (t))
4307 if (TREE_CODE (t) != FIELD_DECL)
4310 mode = hfa_element_mode (TREE_TYPE (t), 1);
4311 if (know_element_mode)
4313 if (mode != element_mode)
4316 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
4320 know_element_mode = 1;
4321 element_mode = mode;
4324 return element_mode;
4327 /* If we reach here, we probably have some front-end specific type
4328 that the backend doesn't know about. This can happen via the
4329 aggregate_value_p call in init_function_start. All we can do is
4330 ignore unknown tree types. */
4337 /* Return the number of words required to hold a quantity of TYPE and MODE
4338 when passed as an argument. */
4340 ia64_function_arg_words (const_tree type, enum machine_mode mode)
4344 if (mode == BLKmode)
4345 words = int_size_in_bytes (type);
4347 words = GET_MODE_SIZE (mode);
4349 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
4352 /* Return the number of registers that should be skipped so the current
4353 argument (described by TYPE and WORDS) will be properly aligned.
4355 Integer and float arguments larger than 8 bytes start at the next
4356 even boundary. Aggregates larger than 8 bytes start at the next
4357 even boundary if the aggregate has 16 byte alignment. Note that
4358 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
4359 but are still to be aligned in registers.
4361 ??? The ABI does not specify how to handle aggregates with
4362 alignment from 9 to 15 bytes, or greater than 16. We handle them
4363 all as if they had 16 byte alignment. Such aggregates can occur
4364 only if gcc extensions are used. */
4366 ia64_function_arg_offset (const CUMULATIVE_ARGS *cum,
4367 const_tree type, int words)
4369 /* No registers are skipped on VMS. */
4370 if (TARGET_ABI_OPEN_VMS || (cum->words & 1) == 0)
4374 && TREE_CODE (type) != INTEGER_TYPE
4375 && TREE_CODE (type) != REAL_TYPE)
4376 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
4381 /* Return rtx for register where argument is passed, or zero if it is passed
4383 /* ??? 128-bit quad-precision floats are always passed in general
4387 ia64_function_arg_1 (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4388 const_tree type, bool named, bool incoming)
4390 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
4391 int words = ia64_function_arg_words (type, mode);
4392 int offset = ia64_function_arg_offset (cum, type, words);
4393 enum machine_mode hfa_mode = VOIDmode;
4395 /* For OPEN VMS, emit the instruction setting up the argument register here,
4396 when we know this will be together with the other arguments setup related
4397 insns. This is not the conceptually best place to do this, but this is
4398 the easiest as we have convenient access to cumulative args info. */
4400 if (TARGET_ABI_OPEN_VMS && mode == VOIDmode && type == void_type_node
4403 unsigned HOST_WIDE_INT regval = cum->words;
4406 for (i = 0; i < 8; i++)
4407 regval |= ((int) cum->atypes[i]) << (i * 3 + 8);
4409 emit_move_insn (gen_rtx_REG (DImode, GR_REG (25)),
4413 /* If all argument slots are used, then it must go on the stack. */
4414 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4417 /* Check for and handle homogeneous FP aggregates. */
4419 hfa_mode = hfa_element_mode (type, 0);
4421 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4422 and unprototyped hfas are passed specially. */
4423 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4427 int fp_regs = cum->fp_regs;
4428 int int_regs = cum->words + offset;
4429 int hfa_size = GET_MODE_SIZE (hfa_mode);
4433 /* If prototyped, pass it in FR regs then GR regs.
4434 If not prototyped, pass it in both FR and GR regs.
4436 If this is an SFmode aggregate, then it is possible to run out of
4437 FR regs while GR regs are still left. In that case, we pass the
4438 remaining part in the GR regs. */
4440 /* Fill the FP regs. We do this always. We stop if we reach the end
4441 of the argument, the last FP register, or the last argument slot. */
4443 byte_size = ((mode == BLKmode)
4444 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4445 args_byte_size = int_regs * UNITS_PER_WORD;
4447 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4448 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
4450 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4451 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
4455 args_byte_size += hfa_size;
4459 /* If no prototype, then the whole thing must go in GR regs. */
4460 if (! cum->prototype)
4462 /* If this is an SFmode aggregate, then we might have some left over
4463 that needs to go in GR regs. */
4464 else if (byte_size != offset)
4465 int_regs += offset / UNITS_PER_WORD;
4467 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
4469 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
4471 enum machine_mode gr_mode = DImode;
4472 unsigned int gr_size;
4474 /* If we have an odd 4 byte hunk because we ran out of FR regs,
4475 then this goes in a GR reg left adjusted/little endian, right
4476 adjusted/big endian. */
4477 /* ??? Currently this is handled wrong, because 4-byte hunks are
4478 always right adjusted/little endian. */
4481 /* If we have an even 4 byte hunk because the aggregate is a
4482 multiple of 4 bytes in size, then this goes in a GR reg right
4483 adjusted/little endian. */
4484 else if (byte_size - offset == 4)
4487 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4488 gen_rtx_REG (gr_mode, (basereg
4492 gr_size = GET_MODE_SIZE (gr_mode);
4494 if (gr_size == UNITS_PER_WORD
4495 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4497 else if (gr_size > UNITS_PER_WORD)
4498 int_regs += gr_size / UNITS_PER_WORD;
4500 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4503 /* On OpenVMS variable argument is either in Rn or Fn. */
4504 else if (TARGET_ABI_OPEN_VMS && named == 0)
4506 if (FLOAT_MODE_P (mode))
4507 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->words);
4509 return gen_rtx_REG (mode, basereg + cum->words);
4512 /* Integral and aggregates go in general registers. If we have run out of
4513 FR registers, then FP values must also go in general registers. This can
4514 happen when we have a SFmode HFA. */
4515 else if (mode == TFmode || mode == TCmode
4516 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4518 int byte_size = ((mode == BLKmode)
4519 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4520 if (BYTES_BIG_ENDIAN
4521 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4522 && byte_size < UNITS_PER_WORD
4525 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4526 gen_rtx_REG (DImode,
4527 (basereg + cum->words
4530 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4533 return gen_rtx_REG (mode, basereg + cum->words + offset);
4537 /* If there is a prototype, then FP values go in a FR register when
4538 named, and in a GR register when unnamed. */
4539 else if (cum->prototype)
4542 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4543 /* In big-endian mode, an anonymous SFmode value must be represented
4544 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4545 the value into the high half of the general register. */
4546 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4547 return gen_rtx_PARALLEL (mode,
4549 gen_rtx_EXPR_LIST (VOIDmode,
4550 gen_rtx_REG (DImode, basereg + cum->words + offset),
4553 return gen_rtx_REG (mode, basereg + cum->words + offset);
4555 /* If there is no prototype, then FP values go in both FR and GR
4559 /* See comment above. */
4560 enum machine_mode inner_mode =
4561 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4563 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4564 gen_rtx_REG (mode, (FR_ARG_FIRST
4567 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4568 gen_rtx_REG (inner_mode,
4569 (basereg + cum->words
4573 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4577 /* Implement TARGET_FUNCION_ARG target hook. */
4580 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4581 const_tree type, bool named)
4583 return ia64_function_arg_1 (cum, mode, type, named, false);
4586 /* Implement TARGET_FUNCION_INCOMING_ARG target hook. */
4589 ia64_function_incoming_arg (CUMULATIVE_ARGS *cum,
4590 enum machine_mode mode,
4591 const_tree type, bool named)
4593 return ia64_function_arg_1 (cum, mode, type, named, true);
4596 /* Return number of bytes, at the beginning of the argument, that must be
4597 put in registers. 0 is the argument is entirely in registers or entirely
4601 ia64_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4602 tree type, bool named ATTRIBUTE_UNUSED)
4604 int words = ia64_function_arg_words (type, mode);
4605 int offset = ia64_function_arg_offset (cum, type, words);
4607 /* If all argument slots are used, then it must go on the stack. */
4608 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4611 /* It doesn't matter whether the argument goes in FR or GR regs. If
4612 it fits within the 8 argument slots, then it goes entirely in
4613 registers. If it extends past the last argument slot, then the rest
4614 goes on the stack. */
4616 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4619 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4622 /* Return ivms_arg_type based on machine_mode. */
4624 static enum ivms_arg_type
4625 ia64_arg_type (enum machine_mode mode)
4638 /* Update CUM to point after this argument. This is patterned after
4639 ia64_function_arg. */
4642 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4643 const_tree type, bool named)
4645 int words = ia64_function_arg_words (type, mode);
4646 int offset = ia64_function_arg_offset (cum, type, words);
4647 enum machine_mode hfa_mode = VOIDmode;
4649 /* If all arg slots are already full, then there is nothing to do. */
4650 if (cum->words >= MAX_ARGUMENT_SLOTS)
4652 cum->words += words + offset;
4656 cum->atypes[cum->words] = ia64_arg_type (mode);
4657 cum->words += words + offset;
4659 /* Check for and handle homogeneous FP aggregates. */
4661 hfa_mode = hfa_element_mode (type, 0);
4663 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4664 and unprototyped hfas are passed specially. */
4665 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4667 int fp_regs = cum->fp_regs;
4668 /* This is the original value of cum->words + offset. */
4669 int int_regs = cum->words - words;
4670 int hfa_size = GET_MODE_SIZE (hfa_mode);
4674 /* If prototyped, pass it in FR regs then GR regs.
4675 If not prototyped, pass it in both FR and GR regs.
4677 If this is an SFmode aggregate, then it is possible to run out of
4678 FR regs while GR regs are still left. In that case, we pass the
4679 remaining part in the GR regs. */
4681 /* Fill the FP regs. We do this always. We stop if we reach the end
4682 of the argument, the last FP register, or the last argument slot. */
4684 byte_size = ((mode == BLKmode)
4685 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4686 args_byte_size = int_regs * UNITS_PER_WORD;
4688 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4689 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4692 args_byte_size += hfa_size;
4696 cum->fp_regs = fp_regs;
4699 /* On OpenVMS variable argument is either in Rn or Fn. */
4700 else if (TARGET_ABI_OPEN_VMS && named == 0)
4702 cum->int_regs = cum->words;
4703 cum->fp_regs = cum->words;
4706 /* Integral and aggregates go in general registers. So do TFmode FP values.
4707 If we have run out of FR registers, then other FP values must also go in
4708 general registers. This can happen when we have a SFmode HFA. */
4709 else if (mode == TFmode || mode == TCmode
4710 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4711 cum->int_regs = cum->words;
4713 /* If there is a prototype, then FP values go in a FR register when
4714 named, and in a GR register when unnamed. */
4715 else if (cum->prototype)
4718 cum->int_regs = cum->words;
4720 /* ??? Complex types should not reach here. */
4721 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4723 /* If there is no prototype, then FP values go in both FR and GR
4727 /* ??? Complex types should not reach here. */
4728 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4729 cum->int_regs = cum->words;
4733 /* Arguments with alignment larger than 8 bytes start at the next even
4734 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
4735 even though their normal alignment is 8 bytes. See ia64_function_arg. */
4738 ia64_function_arg_boundary (enum machine_mode mode, const_tree type)
4740 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
4741 return PARM_BOUNDARY * 2;
4745 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
4746 return PARM_BOUNDARY * 2;
4748 return PARM_BOUNDARY;
4751 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
4752 return PARM_BOUNDARY * 2;
4754 return PARM_BOUNDARY;
4757 /* True if it is OK to do sibling call optimization for the specified
4758 call expression EXP. DECL will be the called function, or NULL if
4759 this is an indirect call. */
4761 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4763 /* We can't perform a sibcall if the current function has the syscall_linkage
4765 if (lookup_attribute ("syscall_linkage",
4766 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
4769 /* We must always return with our current GP. This means we can
4770 only sibcall to functions defined in the current module unless
4771 TARGET_CONST_GP is set to true. */
4772 return (decl && (*targetm.binds_local_p) (decl)) || TARGET_CONST_GP;
4776 /* Implement va_arg. */
4779 ia64_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
4782 /* Variable sized types are passed by reference. */
4783 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
4785 tree ptrtype = build_pointer_type (type);
4786 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
4787 return build_va_arg_indirect_ref (addr);
4790 /* Aggregate arguments with alignment larger than 8 bytes start at
4791 the next even boundary. Integer and floating point arguments
4792 do so if they are larger than 8 bytes, whether or not they are
4793 also aligned larger than 8 bytes. */
4794 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
4795 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
4797 tree t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (valist), valist,
4798 size_int (2 * UNITS_PER_WORD - 1));
4799 t = fold_convert (sizetype, t);
4800 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4801 size_int (-2 * UNITS_PER_WORD));
4802 t = fold_convert (TREE_TYPE (valist), t);
4803 gimplify_assign (unshare_expr (valist), t, pre_p);
4806 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4809 /* Return 1 if function return value returned in memory. Return 0 if it is
4813 ia64_return_in_memory (const_tree valtype, const_tree fntype ATTRIBUTE_UNUSED)
4815 enum machine_mode mode;
4816 enum machine_mode hfa_mode;
4817 HOST_WIDE_INT byte_size;
4819 mode = TYPE_MODE (valtype);
4820 byte_size = GET_MODE_SIZE (mode);
4821 if (mode == BLKmode)
4823 byte_size = int_size_in_bytes (valtype);
4828 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4830 hfa_mode = hfa_element_mode (valtype, 0);
4831 if (hfa_mode != VOIDmode)
4833 int hfa_size = GET_MODE_SIZE (hfa_mode);
4835 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
4840 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
4846 /* Return rtx for register that holds the function return value. */
4849 ia64_function_value (const_tree valtype,
4850 const_tree fn_decl_or_type,
4851 bool outgoing ATTRIBUTE_UNUSED)
4853 enum machine_mode mode;
4854 enum machine_mode hfa_mode;
4856 const_tree func = fn_decl_or_type;
4859 && !DECL_P (fn_decl_or_type))
4862 mode = TYPE_MODE (valtype);
4863 hfa_mode = hfa_element_mode (valtype, 0);
4865 if (hfa_mode != VOIDmode)
4873 hfa_size = GET_MODE_SIZE (hfa_mode);
4874 byte_size = ((mode == BLKmode)
4875 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
4877 for (i = 0; offset < byte_size; i++)
4879 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4880 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
4884 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4886 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
4887 return gen_rtx_REG (mode, FR_ARG_FIRST);
4890 bool need_parallel = false;
4892 /* In big-endian mode, we need to manage the layout of aggregates
4893 in the registers so that we get the bits properly aligned in
4894 the highpart of the registers. */
4895 if (BYTES_BIG_ENDIAN
4896 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
4897 need_parallel = true;
4899 /* Something like struct S { long double x; char a[0] } is not an
4900 HFA structure, and therefore doesn't go in fp registers. But
4901 the middle-end will give it XFmode anyway, and XFmode values
4902 don't normally fit in integer registers. So we need to smuggle
4903 the value inside a parallel. */
4904 else if (mode == XFmode || mode == XCmode || mode == RFmode)
4905 need_parallel = true;
4915 bytesize = int_size_in_bytes (valtype);
4916 /* An empty PARALLEL is invalid here, but the return value
4917 doesn't matter for empty structs. */
4919 return gen_rtx_REG (mode, GR_RET_FIRST);
4920 for (i = 0; offset < bytesize; i++)
4922 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4923 gen_rtx_REG (DImode,
4926 offset += UNITS_PER_WORD;
4928 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4931 mode = ia64_promote_function_mode (valtype, mode, &unsignedp,
4932 func ? TREE_TYPE (func) : NULL_TREE,
4935 return gen_rtx_REG (mode, GR_RET_FIRST);
4939 /* Worker function for TARGET_LIBCALL_VALUE. */
4942 ia64_libcall_value (enum machine_mode mode,
4943 const_rtx fun ATTRIBUTE_UNUSED)
4945 return gen_rtx_REG (mode,
4946 (((GET_MODE_CLASS (mode) == MODE_FLOAT
4947 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4948 && (mode) != TFmode)
4949 ? FR_RET_FIRST : GR_RET_FIRST));
4952 /* Worker function for FUNCTION_VALUE_REGNO_P. */
4955 ia64_function_value_regno_p (const unsigned int regno)
4957 return ((regno >= GR_RET_FIRST && regno <= GR_RET_LAST)
4958 || (regno >= FR_RET_FIRST && regno <= FR_RET_LAST));
4961 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4962 We need to emit DTP-relative relocations. */
4965 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
4967 gcc_assert (size == 4 || size == 8);
4969 fputs ("\tdata4.ua\t@dtprel(", file);
4971 fputs ("\tdata8.ua\t@dtprel(", file);
4972 output_addr_const (file, x);
4976 /* Print a memory address as an operand to reference that memory location. */
4978 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
4979 also call this from ia64_print_operand for memory addresses. */
4982 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
4983 rtx address ATTRIBUTE_UNUSED)
4987 /* Print an operand to an assembler instruction.
4988 C Swap and print a comparison operator.
4989 D Print an FP comparison operator.
4990 E Print 32 - constant, for SImode shifts as extract.
4991 e Print 64 - constant, for DImode rotates.
4992 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4993 a floating point register emitted normally.
4994 G A floating point constant.
4995 I Invert a predicate register by adding 1.
4996 J Select the proper predicate register for a condition.
4997 j Select the inverse predicate register for a condition.
4998 O Append .acq for volatile load.
4999 P Postincrement of a MEM.
5000 Q Append .rel for volatile store.
5001 R Print .s .d or nothing for a single, double or no truncation.
5002 S Shift amount for shladd instruction.
5003 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
5004 for Intel assembler.
5005 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
5006 for Intel assembler.
5007 X A pair of floating point registers.
5008 r Print register name, or constant 0 as r0. HP compatibility for
5010 v Print vector constant value as an 8-byte integer value. */
5013 ia64_print_operand (FILE * file, rtx x, int code)
5020 /* Handled below. */
5025 enum rtx_code c = swap_condition (GET_CODE (x));
5026 fputs (GET_RTX_NAME (c), file);
5031 switch (GET_CODE (x))
5055 str = GET_RTX_NAME (GET_CODE (x));
5062 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
5066 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
5070 if (x == CONST0_RTX (GET_MODE (x)))
5071 str = reg_names [FR_REG (0)];
5072 else if (x == CONST1_RTX (GET_MODE (x)))
5073 str = reg_names [FR_REG (1)];
5076 gcc_assert (GET_CODE (x) == REG);
5077 str = reg_names [REGNO (x)];
5086 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
5087 real_to_target (val, &rv, GET_MODE (x));
5088 if (GET_MODE (x) == SFmode)
5089 fprintf (file, "0x%08lx", val[0] & 0xffffffff);
5090 else if (GET_MODE (x) == DFmode)
5091 fprintf (file, "0x%08lx%08lx", (WORDS_BIG_ENDIAN ? val[0] : val[1])
5093 (WORDS_BIG_ENDIAN ? val[1] : val[0])
5096 output_operand_lossage ("invalid %%G mode");
5101 fputs (reg_names [REGNO (x) + 1], file);
5107 unsigned int regno = REGNO (XEXP (x, 0));
5108 if (GET_CODE (x) == EQ)
5112 fputs (reg_names [regno], file);
5117 if (MEM_VOLATILE_P (x))
5118 fputs(".acq", file);
5123 HOST_WIDE_INT value;
5125 switch (GET_CODE (XEXP (x, 0)))
5131 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
5132 if (GET_CODE (x) == CONST_INT)
5136 gcc_assert (GET_CODE (x) == REG);
5137 fprintf (file, ", %s", reg_names[REGNO (x)]);
5143 value = GET_MODE_SIZE (GET_MODE (x));
5147 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
5151 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
5156 if (MEM_VOLATILE_P (x))
5157 fputs(".rel", file);
5161 if (x == CONST0_RTX (GET_MODE (x)))
5163 else if (x == CONST1_RTX (GET_MODE (x)))
5165 else if (x == CONST2_RTX (GET_MODE (x)))
5168 output_operand_lossage ("invalid %%R value");
5172 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5176 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5178 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
5184 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
5186 const char *prefix = "0x";
5187 if (INTVAL (x) & 0x80000000)
5189 fprintf (file, "0xffffffff");
5192 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
5199 unsigned int regno = REGNO (x);
5200 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
5205 /* If this operand is the constant zero, write it as register zero.
5206 Any register, zero, or CONST_INT value is OK here. */
5207 if (GET_CODE (x) == REG)
5208 fputs (reg_names[REGNO (x)], file);
5209 else if (x == CONST0_RTX (GET_MODE (x)))
5211 else if (GET_CODE (x) == CONST_INT)
5212 output_addr_const (file, x);
5214 output_operand_lossage ("invalid %%r value");
5218 gcc_assert (GET_CODE (x) == CONST_VECTOR);
5219 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
5226 /* For conditional branches, returns or calls, substitute
5227 sptk, dptk, dpnt, or spnt for %s. */
5228 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
5231 int pred_val = INTVAL (XEXP (x, 0));
5233 /* Guess top and bottom 10% statically predicted. */
5234 if (pred_val < REG_BR_PROB_BASE / 50
5235 && br_prob_note_reliable_p (x))
5237 else if (pred_val < REG_BR_PROB_BASE / 2)
5239 else if (pred_val < REG_BR_PROB_BASE / 100 * 98
5240 || !br_prob_note_reliable_p (x))
5245 else if (GET_CODE (current_output_insn) == CALL_INSN)
5250 fputs (which, file);
5255 x = current_insn_predicate;
5258 unsigned int regno = REGNO (XEXP (x, 0));
5259 if (GET_CODE (x) == EQ)
5261 fprintf (file, "(%s) ", reg_names [regno]);
5266 output_operand_lossage ("ia64_print_operand: unknown code");
5270 switch (GET_CODE (x))
5272 /* This happens for the spill/restore instructions. */
5277 /* ... fall through ... */
5280 fputs (reg_names [REGNO (x)], file);
5285 rtx addr = XEXP (x, 0);
5286 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
5287 addr = XEXP (addr, 0);
5288 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
5293 output_addr_const (file, x);
5300 /* Compute a (partial) cost for rtx X. Return true if the complete
5301 cost has been computed, and false if subexpressions should be
5302 scanned. In either case, *TOTAL contains the cost result. */
5303 /* ??? This is incomplete. */
5306 ia64_rtx_costs (rtx x, int code, int outer_code, int *total,
5307 bool speed ATTRIBUTE_UNUSED)
5315 *total = satisfies_constraint_J (x) ? 0 : COSTS_N_INSNS (1);
5318 if (satisfies_constraint_I (x))
5320 else if (satisfies_constraint_J (x))
5323 *total = COSTS_N_INSNS (1);
5326 if (satisfies_constraint_K (x) || satisfies_constraint_L (x))
5329 *total = COSTS_N_INSNS (1);
5334 *total = COSTS_N_INSNS (1);
5340 *total = COSTS_N_INSNS (3);
5344 *total = COSTS_N_INSNS (4);
5348 /* For multiplies wider than HImode, we have to go to the FPU,
5349 which normally involves copies. Plus there's the latency
5350 of the multiply itself, and the latency of the instructions to
5351 transfer integer regs to FP regs. */
5352 if (FLOAT_MODE_P (GET_MODE (x)))
5353 *total = COSTS_N_INSNS (4);
5354 else if (GET_MODE_SIZE (GET_MODE (x)) > 2)
5355 *total = COSTS_N_INSNS (10);
5357 *total = COSTS_N_INSNS (2);
5362 if (FLOAT_MODE_P (GET_MODE (x)))
5364 *total = COSTS_N_INSNS (4);
5372 *total = COSTS_N_INSNS (1);
5379 /* We make divide expensive, so that divide-by-constant will be
5380 optimized to a multiply. */
5381 *total = COSTS_N_INSNS (60);
5389 /* Calculate the cost of moving data from a register in class FROM to
5390 one in class TO, using MODE. */
5393 ia64_register_move_cost (enum machine_mode mode, reg_class_t from_i,
5396 enum reg_class from = (enum reg_class) from_i;
5397 enum reg_class to = (enum reg_class) to_i;
5399 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
5400 if (to == ADDL_REGS)
5402 if (from == ADDL_REGS)
5405 /* All costs are symmetric, so reduce cases by putting the
5406 lower number class as the destination. */
5409 enum reg_class tmp = to;
5410 to = from, from = tmp;
5413 /* Moving from FR<->GR in XFmode must be more expensive than 2,
5414 so that we get secondary memory reloads. Between FR_REGS,
5415 we have to make this at least as expensive as memory_move_cost
5416 to avoid spectacularly poor register class preferencing. */
5417 if (mode == XFmode || mode == RFmode)
5419 if (to != GR_REGS || from != GR_REGS)
5420 return memory_move_cost (mode, to, false);
5428 /* Moving between PR registers takes two insns. */
5429 if (from == PR_REGS)
5431 /* Moving between PR and anything but GR is impossible. */
5432 if (from != GR_REGS)
5433 return memory_move_cost (mode, to, false);
5437 /* Moving between BR and anything but GR is impossible. */
5438 if (from != GR_REGS && from != GR_AND_BR_REGS)
5439 return memory_move_cost (mode, to, false);
5444 /* Moving between AR and anything but GR is impossible. */
5445 if (from != GR_REGS)
5446 return memory_move_cost (mode, to, false);
5452 case GR_AND_FR_REGS:
5453 case GR_AND_BR_REGS:
5464 /* Calculate the cost of moving data of MODE from a register to or from
5468 ia64_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
5470 bool in ATTRIBUTE_UNUSED)
5472 if (rclass == GENERAL_REGS
5473 || rclass == FR_REGS
5474 || rclass == FP_REGS
5475 || rclass == GR_AND_FR_REGS)
5481 /* Implement TARGET_PREFERRED_RELOAD_CLASS. Place additional restrictions
5482 on RCLASS to use when copying X into that class. */
5485 ia64_preferred_reload_class (rtx x, reg_class_t rclass)
5491 /* Don't allow volatile mem reloads into floating point registers.
5492 This is defined to force reload to choose the r/m case instead
5493 of the f/f case when reloading (set (reg fX) (mem/v)). */
5494 if (MEM_P (x) && MEM_VOLATILE_P (x))
5497 /* Force all unrecognized constants into the constant pool. */
5515 /* This function returns the register class required for a secondary
5516 register when copying between one of the registers in RCLASS, and X,
5517 using MODE. A return value of NO_REGS means that no secondary register
5521 ia64_secondary_reload_class (enum reg_class rclass,
5522 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
5526 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
5527 regno = true_regnum (x);
5534 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
5535 interaction. We end up with two pseudos with overlapping lifetimes
5536 both of which are equiv to the same constant, and both which need
5537 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
5538 changes depending on the path length, which means the qty_first_reg
5539 check in make_regs_eqv can give different answers at different times.
5540 At some point I'll probably need a reload_indi pattern to handle
5543 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
5544 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
5545 non-general registers for good measure. */
5546 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
5549 /* This is needed if a pseudo used as a call_operand gets spilled to a
5551 if (GET_CODE (x) == MEM)
5557 /* Need to go through general registers to get to other class regs. */
5558 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
5561 /* This can happen when a paradoxical subreg is an operand to the
5563 /* ??? This shouldn't be necessary after instruction scheduling is
5564 enabled, because paradoxical subregs are not accepted by
5565 register_operand when INSN_SCHEDULING is defined. Or alternatively,
5566 stop the paradoxical subreg stupidity in the *_operand functions
5568 if (GET_CODE (x) == MEM
5569 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
5570 || GET_MODE (x) == QImode))
5573 /* This can happen because of the ior/and/etc patterns that accept FP
5574 registers as operands. If the third operand is a constant, then it
5575 needs to be reloaded into a FP register. */
5576 if (GET_CODE (x) == CONST_INT)
5579 /* This can happen because of register elimination in a muldi3 insn.
5580 E.g. `26107 * (unsigned long)&u'. */
5581 if (GET_CODE (x) == PLUS)
5586 /* ??? This happens if we cse/gcse a BImode value across a call,
5587 and the function has a nonlocal goto. This is because global
5588 does not allocate call crossing pseudos to hard registers when
5589 crtl->has_nonlocal_goto is true. This is relatively
5590 common for C++ programs that use exceptions. To reproduce,
5591 return NO_REGS and compile libstdc++. */
5592 if (GET_CODE (x) == MEM)
5595 /* This can happen when we take a BImode subreg of a DImode value,
5596 and that DImode value winds up in some non-GR register. */
5597 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
5609 /* Implement targetm.unspec_may_trap_p hook. */
5611 ia64_unspec_may_trap_p (const_rtx x, unsigned flags)
5613 if (GET_CODE (x) == UNSPEC)
5615 switch (XINT (x, 1))
5621 case UNSPEC_CHKACLR:
5623 /* These unspecs are just wrappers. */
5624 return may_trap_p_1 (XVECEXP (x, 0, 0), flags);
5628 return default_unspec_may_trap_p (x, flags);
5632 /* Parse the -mfixed-range= option string. */
5635 fix_range (const char *const_str)
5638 char *str, *dash, *comma;
5640 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5641 REG2 are either register names or register numbers. The effect
5642 of this option is to mark the registers in the range from REG1 to
5643 REG2 as ``fixed'' so they won't be used by the compiler. This is
5644 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5646 i = strlen (const_str);
5647 str = (char *) alloca (i + 1);
5648 memcpy (str, const_str, i + 1);
5652 dash = strchr (str, '-');
5655 warning (0, "value of -mfixed-range must have form REG1-REG2");
5660 comma = strchr (dash + 1, ',');
5664 first = decode_reg_name (str);
5667 warning (0, "unknown register name: %s", str);
5671 last = decode_reg_name (dash + 1);
5674 warning (0, "unknown register name: %s", dash + 1);
5682 warning (0, "%s-%s is an empty range", str, dash + 1);
5686 for (i = first; i <= last; ++i)
5687 fixed_regs[i] = call_used_regs[i] = 1;
5697 /* Implement TARGET_HANDLE_OPTION. */
5700 ia64_handle_option (size_t code, const char *arg, int value)
5704 case OPT_mfixed_range_:
5708 case OPT_mtls_size_:
5709 if (value != 14 && value != 22 && value != 64)
5710 error ("bad value %<%s%> for -mtls-size= switch", arg);
5717 const char *name; /* processor name or nickname. */
5718 enum processor_type processor;
5720 const processor_alias_table[] =
5722 {"itanium2", PROCESSOR_ITANIUM2},
5723 {"mckinley", PROCESSOR_ITANIUM2},
5725 int const pta_size = ARRAY_SIZE (processor_alias_table);
5728 for (i = 0; i < pta_size; i++)
5729 if (!strcmp (arg, processor_alias_table[i].name))
5731 ia64_tune = processor_alias_table[i].processor;
5735 error ("bad value %<%s%> for -mtune= switch", arg);
5744 /* Implement TARGET_OPTION_OVERRIDE. */
5747 ia64_option_override (void)
5749 if (TARGET_AUTO_PIC)
5750 target_flags |= MASK_CONST_GP;
5752 /* Numerous experiment shows that IRA based loop pressure
5753 calculation works better for RTL loop invariant motion on targets
5754 with enough (>= 32) registers. It is an expensive optimization.
5755 So it is on only for peak performance. */
5757 flag_ira_loop_pressure = 1;
5760 ia64_section_threshold = (global_options_set.x_g_switch_value
5762 : IA64_DEFAULT_GVALUE);
5764 init_machine_status = ia64_init_machine_status;
5766 if (align_functions <= 0)
5767 align_functions = 64;
5768 if (align_loops <= 0)
5770 if (TARGET_ABI_OPEN_VMS)
5773 ia64_override_options_after_change();
5776 /* Implement targetm.override_options_after_change. */
5779 ia64_override_options_after_change (void)
5781 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
5782 flag_schedule_insns_after_reload = 0;
5785 && !global_options_set.x_flag_selective_scheduling
5786 && !global_options_set.x_flag_selective_scheduling2)
5788 flag_selective_scheduling2 = 1;
5789 flag_sel_sched_pipelining = 1;
5791 if (mflag_sched_control_spec == 2)
5793 /* Control speculation is on by default for the selective scheduler,
5794 but not for the Haifa scheduler. */
5795 mflag_sched_control_spec = flag_selective_scheduling2 ? 1 : 0;
5797 if (flag_sel_sched_pipelining && flag_auto_inc_dec)
5799 /* FIXME: remove this when we'd implement breaking autoinsns as
5800 a transformation. */
5801 flag_auto_inc_dec = 0;
5805 /* Initialize the record of emitted frame related registers. */
5807 void ia64_init_expanders (void)
5809 memset (&emitted_frame_related_regs, 0, sizeof (emitted_frame_related_regs));
5812 static struct machine_function *
5813 ia64_init_machine_status (void)
5815 return ggc_alloc_cleared_machine_function ();
5818 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
5819 static enum attr_type ia64_safe_type (rtx);
5821 static enum attr_itanium_class
5822 ia64_safe_itanium_class (rtx insn)
5824 if (recog_memoized (insn) >= 0)
5825 return get_attr_itanium_class (insn);
5826 else if (DEBUG_INSN_P (insn))
5827 return ITANIUM_CLASS_IGNORE;
5829 return ITANIUM_CLASS_UNKNOWN;
5832 static enum attr_type
5833 ia64_safe_type (rtx insn)
5835 if (recog_memoized (insn) >= 0)
5836 return get_attr_type (insn);
5838 return TYPE_UNKNOWN;
5841 /* The following collection of routines emit instruction group stop bits as
5842 necessary to avoid dependencies. */
5844 /* Need to track some additional registers as far as serialization is
5845 concerned so we can properly handle br.call and br.ret. We could
5846 make these registers visible to gcc, but since these registers are
5847 never explicitly used in gcc generated code, it seems wasteful to
5848 do so (plus it would make the call and return patterns needlessly
5850 #define REG_RP (BR_REG (0))
5851 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
5852 /* This is used for volatile asms which may require a stop bit immediately
5853 before and after them. */
5854 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
5855 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
5856 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
5858 /* For each register, we keep track of how it has been written in the
5859 current instruction group.
5861 If a register is written unconditionally (no qualifying predicate),
5862 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
5864 If a register is written if its qualifying predicate P is true, we
5865 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
5866 may be written again by the complement of P (P^1) and when this happens,
5867 WRITE_COUNT gets set to 2.
5869 The result of this is that whenever an insn attempts to write a register
5870 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
5872 If a predicate register is written by a floating-point insn, we set
5873 WRITTEN_BY_FP to true.
5875 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
5876 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
5878 #if GCC_VERSION >= 4000
5879 #define RWS_FIELD_TYPE __extension__ unsigned short
5881 #define RWS_FIELD_TYPE unsigned int
5883 struct reg_write_state
5885 RWS_FIELD_TYPE write_count : 2;
5886 RWS_FIELD_TYPE first_pred : 10;
5887 RWS_FIELD_TYPE written_by_fp : 1;
5888 RWS_FIELD_TYPE written_by_and : 1;
5889 RWS_FIELD_TYPE written_by_or : 1;
5892 /* Cumulative info for the current instruction group. */
5893 struct reg_write_state rws_sum[NUM_REGS];
5894 #ifdef ENABLE_CHECKING
5895 /* Bitmap whether a register has been written in the current insn. */
5896 HARD_REG_ELT_TYPE rws_insn[(NUM_REGS + HOST_BITS_PER_WIDEST_FAST_INT - 1)
5897 / HOST_BITS_PER_WIDEST_FAST_INT];
5900 rws_insn_set (int regno)
5902 gcc_assert (!TEST_HARD_REG_BIT (rws_insn, regno));
5903 SET_HARD_REG_BIT (rws_insn, regno);
5907 rws_insn_test (int regno)
5909 return TEST_HARD_REG_BIT (rws_insn, regno);
5912 /* When not checking, track just REG_AR_CFM and REG_VOLATILE. */
5913 unsigned char rws_insn[2];
5916 rws_insn_set (int regno)
5918 if (regno == REG_AR_CFM)
5920 else if (regno == REG_VOLATILE)
5925 rws_insn_test (int regno)
5927 if (regno == REG_AR_CFM)
5929 if (regno == REG_VOLATILE)
5935 /* Indicates whether this is the first instruction after a stop bit,
5936 in which case we don't need another stop bit. Without this,
5937 ia64_variable_issue will die when scheduling an alloc. */
5938 static int first_instruction;
5940 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
5941 RTL for one instruction. */
5944 unsigned int is_write : 1; /* Is register being written? */
5945 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
5946 unsigned int is_branch : 1; /* Is register used as part of a branch? */
5947 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
5948 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
5949 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
5952 static void rws_update (int, struct reg_flags, int);
5953 static int rws_access_regno (int, struct reg_flags, int);
5954 static int rws_access_reg (rtx, struct reg_flags, int);
5955 static void update_set_flags (rtx, struct reg_flags *);
5956 static int set_src_needs_barrier (rtx, struct reg_flags, int);
5957 static int rtx_needs_barrier (rtx, struct reg_flags, int);
5958 static void init_insn_group_barriers (void);
5959 static int group_barrier_needed (rtx);
5960 static int safe_group_barrier_needed (rtx);
5961 static int in_safe_group_barrier;
5963 /* Update *RWS for REGNO, which is being written by the current instruction,
5964 with predicate PRED, and associated register flags in FLAGS. */
5967 rws_update (int regno, struct reg_flags flags, int pred)
5970 rws_sum[regno].write_count++;
5972 rws_sum[regno].write_count = 2;
5973 rws_sum[regno].written_by_fp |= flags.is_fp;
5974 /* ??? Not tracking and/or across differing predicates. */
5975 rws_sum[regno].written_by_and = flags.is_and;
5976 rws_sum[regno].written_by_or = flags.is_or;
5977 rws_sum[regno].first_pred = pred;
5980 /* Handle an access to register REGNO of type FLAGS using predicate register
5981 PRED. Update rws_sum array. Return 1 if this access creates
5982 a dependency with an earlier instruction in the same group. */
5985 rws_access_regno (int regno, struct reg_flags flags, int pred)
5987 int need_barrier = 0;
5989 gcc_assert (regno < NUM_REGS);
5991 if (! PR_REGNO_P (regno))
5992 flags.is_and = flags.is_or = 0;
5998 rws_insn_set (regno);
5999 write_count = rws_sum[regno].write_count;
6001 switch (write_count)
6004 /* The register has not been written yet. */
6005 if (!in_safe_group_barrier)
6006 rws_update (regno, flags, pred);
6010 /* The register has been written via a predicate. Treat
6011 it like a unconditional write and do not try to check
6012 for complementary pred reg in earlier write. */
6013 if (flags.is_and && rws_sum[regno].written_by_and)
6015 else if (flags.is_or && rws_sum[regno].written_by_or)
6019 if (!in_safe_group_barrier)
6020 rws_update (regno, flags, pred);
6024 /* The register has been unconditionally written already. We
6026 if (flags.is_and && rws_sum[regno].written_by_and)
6028 else if (flags.is_or && rws_sum[regno].written_by_or)
6032 if (!in_safe_group_barrier)
6034 rws_sum[regno].written_by_and = flags.is_and;
6035 rws_sum[regno].written_by_or = flags.is_or;
6045 if (flags.is_branch)
6047 /* Branches have several RAW exceptions that allow to avoid
6050 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
6051 /* RAW dependencies on branch regs are permissible as long
6052 as the writer is a non-branch instruction. Since we
6053 never generate code that uses a branch register written
6054 by a branch instruction, handling this case is
6058 if (REGNO_REG_CLASS (regno) == PR_REGS
6059 && ! rws_sum[regno].written_by_fp)
6060 /* The predicates of a branch are available within the
6061 same insn group as long as the predicate was written by
6062 something other than a floating-point instruction. */
6066 if (flags.is_and && rws_sum[regno].written_by_and)
6068 if (flags.is_or && rws_sum[regno].written_by_or)
6071 switch (rws_sum[regno].write_count)
6074 /* The register has not been written yet. */
6078 /* The register has been written via a predicate, assume we
6079 need a barrier (don't check for complementary regs). */
6084 /* The register has been unconditionally written already. We
6094 return need_barrier;
6098 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
6100 int regno = REGNO (reg);
6101 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
6104 return rws_access_regno (regno, flags, pred);
6107 int need_barrier = 0;
6109 need_barrier |= rws_access_regno (regno + n, flags, pred);
6110 return need_barrier;
6114 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
6115 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
6118 update_set_flags (rtx x, struct reg_flags *pflags)
6120 rtx src = SET_SRC (x);
6122 switch (GET_CODE (src))
6128 /* There are four cases here:
6129 (1) The destination is (pc), in which case this is a branch,
6130 nothing here applies.
6131 (2) The destination is ar.lc, in which case this is a
6132 doloop_end_internal,
6133 (3) The destination is an fp register, in which case this is
6134 an fselect instruction.
6135 (4) The condition has (unspec [(reg)] UNSPEC_LDC), in which case
6136 this is a check load.
6137 In all cases, nothing we do in this function applies. */
6141 if (COMPARISON_P (src)
6142 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
6143 /* Set pflags->is_fp to 1 so that we know we're dealing
6144 with a floating point comparison when processing the
6145 destination of the SET. */
6148 /* Discover if this is a parallel comparison. We only handle
6149 and.orcm and or.andcm at present, since we must retain a
6150 strict inverse on the predicate pair. */
6151 else if (GET_CODE (src) == AND)
6153 else if (GET_CODE (src) == IOR)
6160 /* Subroutine of rtx_needs_barrier; this function determines whether the
6161 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
6162 are as in rtx_needs_barrier. COND is an rtx that holds the condition
6166 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
6168 int need_barrier = 0;
6170 rtx src = SET_SRC (x);
6172 if (GET_CODE (src) == CALL)
6173 /* We don't need to worry about the result registers that
6174 get written by subroutine call. */
6175 return rtx_needs_barrier (src, flags, pred);
6176 else if (SET_DEST (x) == pc_rtx)
6178 /* X is a conditional branch. */
6179 /* ??? This seems redundant, as the caller sets this bit for
6181 if (!ia64_spec_check_src_p (src))
6182 flags.is_branch = 1;
6183 return rtx_needs_barrier (src, flags, pred);
6186 if (ia64_spec_check_src_p (src))
6187 /* Avoid checking one register twice (in condition
6188 and in 'then' section) for ldc pattern. */
6190 gcc_assert (REG_P (XEXP (src, 2)));
6191 need_barrier = rtx_needs_barrier (XEXP (src, 2), flags, pred);
6193 /* We process MEM below. */
6194 src = XEXP (src, 1);
6197 need_barrier |= rtx_needs_barrier (src, flags, pred);
6200 if (GET_CODE (dst) == ZERO_EXTRACT)
6202 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
6203 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
6205 return need_barrier;
6208 /* Handle an access to rtx X of type FLAGS using predicate register
6209 PRED. Return 1 if this access creates a dependency with an earlier
6210 instruction in the same group. */
6213 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
6216 int is_complemented = 0;
6217 int need_barrier = 0;
6218 const char *format_ptr;
6219 struct reg_flags new_flags;
6227 switch (GET_CODE (x))
6230 update_set_flags (x, &new_flags);
6231 need_barrier = set_src_needs_barrier (x, new_flags, pred);
6232 if (GET_CODE (SET_SRC (x)) != CALL)
6234 new_flags.is_write = 1;
6235 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
6240 new_flags.is_write = 0;
6241 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6243 /* Avoid multiple register writes, in case this is a pattern with
6244 multiple CALL rtx. This avoids a failure in rws_access_reg. */
6245 if (! flags.is_sibcall && ! rws_insn_test (REG_AR_CFM))
6247 new_flags.is_write = 1;
6248 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
6249 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
6250 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6255 /* X is a predicated instruction. */
6257 cond = COND_EXEC_TEST (x);
6259 need_barrier = rtx_needs_barrier (cond, flags, 0);
6261 if (GET_CODE (cond) == EQ)
6262 is_complemented = 1;
6263 cond = XEXP (cond, 0);
6264 gcc_assert (GET_CODE (cond) == REG
6265 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
6266 pred = REGNO (cond);
6267 if (is_complemented)
6270 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
6271 return need_barrier;
6275 /* Clobber & use are for earlier compiler-phases only. */
6280 /* We always emit stop bits for traditional asms. We emit stop bits
6281 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
6282 if (GET_CODE (x) != ASM_OPERANDS
6283 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
6285 /* Avoid writing the register multiple times if we have multiple
6286 asm outputs. This avoids a failure in rws_access_reg. */
6287 if (! rws_insn_test (REG_VOLATILE))
6289 new_flags.is_write = 1;
6290 rws_access_regno (REG_VOLATILE, new_flags, pred);
6295 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
6296 We cannot just fall through here since then we would be confused
6297 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
6298 traditional asms unlike their normal usage. */
6300 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
6301 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
6306 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6308 rtx pat = XVECEXP (x, 0, i);
6309 switch (GET_CODE (pat))
6312 update_set_flags (pat, &new_flags);
6313 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
6319 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6330 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
6332 rtx pat = XVECEXP (x, 0, i);
6333 if (GET_CODE (pat) == SET)
6335 if (GET_CODE (SET_SRC (pat)) != CALL)
6337 new_flags.is_write = 1;
6338 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
6342 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
6343 need_barrier |= rtx_needs_barrier (pat, flags, pred);
6348 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
6351 if (REGNO (x) == AR_UNAT_REGNUM)
6353 for (i = 0; i < 64; ++i)
6354 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
6357 need_barrier = rws_access_reg (x, flags, pred);
6361 /* Find the regs used in memory address computation. */
6362 new_flags.is_write = 0;
6363 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6366 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
6367 case SYMBOL_REF: case LABEL_REF: case CONST:
6370 /* Operators with side-effects. */
6371 case POST_INC: case POST_DEC:
6372 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6374 new_flags.is_write = 0;
6375 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6376 new_flags.is_write = 1;
6377 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6381 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
6383 new_flags.is_write = 0;
6384 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
6385 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6386 new_flags.is_write = 1;
6387 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
6390 /* Handle common unary and binary ops for efficiency. */
6391 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
6392 case MOD: case UDIV: case UMOD: case AND: case IOR:
6393 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
6394 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
6395 case NE: case EQ: case GE: case GT: case LE:
6396 case LT: case GEU: case GTU: case LEU: case LTU:
6397 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
6398 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
6401 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
6402 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
6403 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
6404 case SQRT: case FFS: case POPCOUNT:
6405 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6409 /* VEC_SELECT's second argument is a PARALLEL with integers that
6410 describe the elements selected. On ia64, those integers are
6411 always constants. Avoid walking the PARALLEL so that we don't
6412 get confused with "normal" parallels and then die. */
6413 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
6417 switch (XINT (x, 1))
6419 case UNSPEC_LTOFF_DTPMOD:
6420 case UNSPEC_LTOFF_DTPREL:
6422 case UNSPEC_LTOFF_TPREL:
6424 case UNSPEC_PRED_REL_MUTEX:
6425 case UNSPEC_PIC_CALL:
6427 case UNSPEC_FETCHADD_ACQ:
6428 case UNSPEC_BSP_VALUE:
6429 case UNSPEC_FLUSHRS:
6430 case UNSPEC_BUNDLE_SELECTOR:
6433 case UNSPEC_GR_SPILL:
6434 case UNSPEC_GR_RESTORE:
6436 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
6437 HOST_WIDE_INT bit = (offset >> 3) & 63;
6439 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6440 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
6441 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
6446 case UNSPEC_FR_SPILL:
6447 case UNSPEC_FR_RESTORE:
6448 case UNSPEC_GETF_EXP:
6449 case UNSPEC_SETF_EXP:
6451 case UNSPEC_FR_SQRT_RECIP_APPROX:
6452 case UNSPEC_FR_SQRT_RECIP_APPROX_RES:
6457 case UNSPEC_CHKACLR:
6459 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6462 case UNSPEC_FR_RECIP_APPROX:
6464 case UNSPEC_COPYSIGN:
6465 case UNSPEC_FR_RECIP_APPROX_RES:
6466 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
6467 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6470 case UNSPEC_CMPXCHG_ACQ:
6471 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
6472 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
6480 case UNSPEC_VOLATILE:
6481 switch (XINT (x, 1))
6484 /* Alloc must always be the first instruction of a group.
6485 We force this by always returning true. */
6486 /* ??? We might get better scheduling if we explicitly check for
6487 input/local/output register dependencies, and modify the
6488 scheduler so that alloc is always reordered to the start of
6489 the current group. We could then eliminate all of the
6490 first_instruction code. */
6491 rws_access_regno (AR_PFS_REGNUM, flags, pred);
6493 new_flags.is_write = 1;
6494 rws_access_regno (REG_AR_CFM, new_flags, pred);
6497 case UNSPECV_SET_BSP:
6501 case UNSPECV_BLOCKAGE:
6502 case UNSPECV_INSN_GROUP_BARRIER:
6504 case UNSPECV_PSAC_ALL:
6505 case UNSPECV_PSAC_NORMAL:
6514 new_flags.is_write = 0;
6515 need_barrier = rws_access_regno (REG_RP, flags, pred);
6516 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
6518 new_flags.is_write = 1;
6519 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
6520 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
6524 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
6525 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6526 switch (format_ptr[i])
6528 case '0': /* unused field */
6529 case 'i': /* integer */
6530 case 'n': /* note */
6531 case 'w': /* wide integer */
6532 case 's': /* pointer to string */
6533 case 'S': /* optional pointer to string */
6537 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
6542 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
6543 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
6552 return need_barrier;
6555 /* Clear out the state for group_barrier_needed at the start of a
6556 sequence of insns. */
6559 init_insn_group_barriers (void)
6561 memset (rws_sum, 0, sizeof (rws_sum));
6562 first_instruction = 1;
6565 /* Given the current state, determine whether a group barrier (a stop bit) is
6566 necessary before INSN. Return nonzero if so. This modifies the state to
6567 include the effects of INSN as a side-effect. */
6570 group_barrier_needed (rtx insn)
6573 int need_barrier = 0;
6574 struct reg_flags flags;
6576 memset (&flags, 0, sizeof (flags));
6577 switch (GET_CODE (insn))
6584 /* A barrier doesn't imply an instruction group boundary. */
6588 memset (rws_insn, 0, sizeof (rws_insn));
6592 flags.is_branch = 1;
6593 flags.is_sibcall = SIBLING_CALL_P (insn);
6594 memset (rws_insn, 0, sizeof (rws_insn));
6596 /* Don't bundle a call following another call. */
6597 if ((pat = prev_active_insn (insn))
6598 && GET_CODE (pat) == CALL_INSN)
6604 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
6608 if (!ia64_spec_check_p (insn))
6609 flags.is_branch = 1;
6611 /* Don't bundle a jump following a call. */
6612 if ((pat = prev_active_insn (insn))
6613 && GET_CODE (pat) == CALL_INSN)
6621 if (GET_CODE (PATTERN (insn)) == USE
6622 || GET_CODE (PATTERN (insn)) == CLOBBER)
6623 /* Don't care about USE and CLOBBER "insns"---those are used to
6624 indicate to the optimizer that it shouldn't get rid of
6625 certain operations. */
6628 pat = PATTERN (insn);
6630 /* Ug. Hack hacks hacked elsewhere. */
6631 switch (recog_memoized (insn))
6633 /* We play dependency tricks with the epilogue in order
6634 to get proper schedules. Undo this for dv analysis. */
6635 case CODE_FOR_epilogue_deallocate_stack:
6636 case CODE_FOR_prologue_allocate_stack:
6637 pat = XVECEXP (pat, 0, 0);
6640 /* The pattern we use for br.cloop confuses the code above.
6641 The second element of the vector is representative. */
6642 case CODE_FOR_doloop_end_internal:
6643 pat = XVECEXP (pat, 0, 1);
6646 /* Doesn't generate code. */
6647 case CODE_FOR_pred_rel_mutex:
6648 case CODE_FOR_prologue_use:
6655 memset (rws_insn, 0, sizeof (rws_insn));
6656 need_barrier = rtx_needs_barrier (pat, flags, 0);
6658 /* Check to see if the previous instruction was a volatile
6661 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
6669 if (first_instruction && INSN_P (insn)
6670 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6671 && GET_CODE (PATTERN (insn)) != USE
6672 && GET_CODE (PATTERN (insn)) != CLOBBER)
6675 first_instruction = 0;
6678 return need_barrier;
6681 /* Like group_barrier_needed, but do not clobber the current state. */
6684 safe_group_barrier_needed (rtx insn)
6686 int saved_first_instruction;
6689 saved_first_instruction = first_instruction;
6690 in_safe_group_barrier = 1;
6692 t = group_barrier_needed (insn);
6694 first_instruction = saved_first_instruction;
6695 in_safe_group_barrier = 0;
6700 /* Scan the current function and insert stop bits as necessary to
6701 eliminate dependencies. This function assumes that a final
6702 instruction scheduling pass has been run which has already
6703 inserted most of the necessary stop bits. This function only
6704 inserts new ones at basic block boundaries, since these are
6705 invisible to the scheduler. */
6708 emit_insn_group_barriers (FILE *dump)
6712 int insns_since_last_label = 0;
6714 init_insn_group_barriers ();
6716 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6718 if (GET_CODE (insn) == CODE_LABEL)
6720 if (insns_since_last_label)
6722 insns_since_last_label = 0;
6724 else if (GET_CODE (insn) == NOTE
6725 && NOTE_KIND (insn) == NOTE_INSN_BASIC_BLOCK)
6727 if (insns_since_last_label)
6729 insns_since_last_label = 0;
6731 else if (GET_CODE (insn) == INSN
6732 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
6733 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6735 init_insn_group_barriers ();
6738 else if (NONDEBUG_INSN_P (insn))
6740 insns_since_last_label = 1;
6742 if (group_barrier_needed (insn))
6747 fprintf (dump, "Emitting stop before label %d\n",
6748 INSN_UID (last_label));
6749 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
6752 init_insn_group_barriers ();
6760 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
6761 This function has to emit all necessary group barriers. */
6764 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6768 init_insn_group_barriers ();
6770 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6772 if (GET_CODE (insn) == BARRIER)
6774 rtx last = prev_active_insn (insn);
6778 if (GET_CODE (last) == JUMP_INSN
6779 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6780 last = prev_active_insn (last);
6781 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6782 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6784 init_insn_group_barriers ();
6786 else if (NONDEBUG_INSN_P (insn))
6788 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6789 init_insn_group_barriers ();
6790 else if (group_barrier_needed (insn))
6792 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
6793 init_insn_group_barriers ();
6794 group_barrier_needed (insn);
6802 /* Instruction scheduling support. */
6804 #define NR_BUNDLES 10
6806 /* A list of names of all available bundles. */
6808 static const char *bundle_name [NR_BUNDLES] =
6814 #if NR_BUNDLES == 10
6824 /* Nonzero if we should insert stop bits into the schedule. */
6826 int ia64_final_schedule = 0;
6828 /* Codes of the corresponding queried units: */
6830 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
6831 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
6833 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
6834 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
6836 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
6838 /* The following variable value is an insn group barrier. */
6840 static rtx dfa_stop_insn;
6842 /* The following variable value is the last issued insn. */
6844 static rtx last_scheduled_insn;
6846 /* The following variable value is pointer to a DFA state used as
6847 temporary variable. */
6849 static state_t temp_dfa_state = NULL;
6851 /* The following variable value is DFA state after issuing the last
6854 static state_t prev_cycle_state = NULL;
6856 /* The following array element values are TRUE if the corresponding
6857 insn requires to add stop bits before it. */
6859 static char *stops_p = NULL;
6861 /* The following variable is used to set up the mentioned above array. */
6863 static int stop_before_p = 0;
6865 /* The following variable value is length of the arrays `clocks' and
6868 static int clocks_length;
6870 /* The following variable value is number of data speculations in progress. */
6871 static int pending_data_specs = 0;
6873 /* Number of memory references on current and three future processor cycles. */
6874 static char mem_ops_in_group[4];
6876 /* Number of current processor cycle (from scheduler's point of view). */
6877 static int current_cycle;
6879 static rtx ia64_single_set (rtx);
6880 static void ia64_emit_insn_before (rtx, rtx);
6882 /* Map a bundle number to its pseudo-op. */
6885 get_bundle_name (int b)
6887 return bundle_name[b];
6891 /* Return the maximum number of instructions a cpu can issue. */
6894 ia64_issue_rate (void)
6899 /* Helper function - like single_set, but look inside COND_EXEC. */
6902 ia64_single_set (rtx insn)
6904 rtx x = PATTERN (insn), ret;
6905 if (GET_CODE (x) == COND_EXEC)
6906 x = COND_EXEC_CODE (x);
6907 if (GET_CODE (x) == SET)
6910 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
6911 Although they are not classical single set, the second set is there just
6912 to protect it from moving past FP-relative stack accesses. */
6913 switch (recog_memoized (insn))
6915 case CODE_FOR_prologue_allocate_stack:
6916 case CODE_FOR_epilogue_deallocate_stack:
6917 ret = XVECEXP (x, 0, 0);
6921 ret = single_set_2 (insn, x);
6928 /* Adjust the cost of a scheduling dependency.
6929 Return the new cost of a dependency of type DEP_TYPE or INSN on DEP_INSN.
6930 COST is the current cost, DW is dependency weakness. */
6932 ia64_adjust_cost_2 (rtx insn, int dep_type1, rtx dep_insn, int cost, dw_t dw)
6934 enum reg_note dep_type = (enum reg_note) dep_type1;
6935 enum attr_itanium_class dep_class;
6936 enum attr_itanium_class insn_class;
6938 insn_class = ia64_safe_itanium_class (insn);
6939 dep_class = ia64_safe_itanium_class (dep_insn);
6941 /* Treat true memory dependencies separately. Ignore apparent true
6942 dependence between store and call (call has a MEM inside a SYMBOL_REF). */
6943 if (dep_type == REG_DEP_TRUE
6944 && (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF)
6945 && (insn_class == ITANIUM_CLASS_BR || insn_class == ITANIUM_CLASS_SCALL))
6948 if (dw == MIN_DEP_WEAK)
6949 /* Store and load are likely to alias, use higher cost to avoid stall. */
6950 return PARAM_VALUE (PARAM_SCHED_MEM_TRUE_DEP_COST);
6951 else if (dw > MIN_DEP_WEAK)
6953 /* Store and load are less likely to alias. */
6954 if (mflag_sched_fp_mem_deps_zero_cost && dep_class == ITANIUM_CLASS_STF)
6955 /* Assume there will be no cache conflict for floating-point data.
6956 For integer data, L1 conflict penalty is huge (17 cycles), so we
6957 never assume it will not cause a conflict. */
6963 if (dep_type != REG_DEP_OUTPUT)
6966 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
6967 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
6973 /* Like emit_insn_before, but skip cycle_display notes.
6974 ??? When cycle display notes are implemented, update this. */
6977 ia64_emit_insn_before (rtx insn, rtx before)
6979 emit_insn_before (insn, before);
6982 /* The following function marks insns who produce addresses for load
6983 and store insns. Such insns will be placed into M slots because it
6984 decrease latency time for Itanium1 (see function
6985 `ia64_produce_address_p' and the DFA descriptions). */
6988 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
6990 rtx insn, next, next_tail;
6992 /* Before reload, which_alternative is not set, which means that
6993 ia64_safe_itanium_class will produce wrong results for (at least)
6994 move instructions. */
6995 if (!reload_completed)
6998 next_tail = NEXT_INSN (tail);
6999 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
7002 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
7004 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
7006 sd_iterator_def sd_it;
7008 bool has_mem_op_consumer_p = false;
7010 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
7012 enum attr_itanium_class c;
7014 if (DEP_TYPE (dep) != REG_DEP_TRUE)
7017 next = DEP_CON (dep);
7018 c = ia64_safe_itanium_class (next);
7019 if ((c == ITANIUM_CLASS_ST
7020 || c == ITANIUM_CLASS_STF)
7021 && ia64_st_address_bypass_p (insn, next))
7023 has_mem_op_consumer_p = true;
7026 else if ((c == ITANIUM_CLASS_LD
7027 || c == ITANIUM_CLASS_FLD
7028 || c == ITANIUM_CLASS_FLDP)
7029 && ia64_ld_address_bypass_p (insn, next))
7031 has_mem_op_consumer_p = true;
7036 insn->call = has_mem_op_consumer_p;
7040 /* We're beginning a new block. Initialize data structures as necessary. */
7043 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
7044 int sched_verbose ATTRIBUTE_UNUSED,
7045 int max_ready ATTRIBUTE_UNUSED)
7047 #ifdef ENABLE_CHECKING
7050 if (!sel_sched_p () && reload_completed)
7051 for (insn = NEXT_INSN (current_sched_info->prev_head);
7052 insn != current_sched_info->next_tail;
7053 insn = NEXT_INSN (insn))
7054 gcc_assert (!SCHED_GROUP_P (insn));
7056 last_scheduled_insn = NULL_RTX;
7057 init_insn_group_barriers ();
7060 memset (mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7063 /* We're beginning a scheduling pass. Check assertion. */
7066 ia64_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
7067 int sched_verbose ATTRIBUTE_UNUSED,
7068 int max_ready ATTRIBUTE_UNUSED)
7070 gcc_assert (pending_data_specs == 0);
7073 /* Scheduling pass is now finished. Free/reset static variable. */
7075 ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
7076 int sched_verbose ATTRIBUTE_UNUSED)
7078 gcc_assert (pending_data_specs == 0);
7081 /* Return TRUE if INSN is a load (either normal or speculative, but not a
7082 speculation check), FALSE otherwise. */
7084 is_load_p (rtx insn)
7086 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
7089 ((insn_class == ITANIUM_CLASS_LD || insn_class == ITANIUM_CLASS_FLD)
7090 && get_attr_check_load (insn) == CHECK_LOAD_NO);
7093 /* If INSN is a memory reference, memoize it in MEM_OPS_IN_GROUP global array
7094 (taking account for 3-cycle cache reference postponing for stores: Intel
7095 Itanium 2 Reference Manual for Software Development and Optimization,
7098 record_memory_reference (rtx insn)
7100 enum attr_itanium_class insn_class = ia64_safe_itanium_class (insn);
7102 switch (insn_class) {
7103 case ITANIUM_CLASS_FLD:
7104 case ITANIUM_CLASS_LD:
7105 mem_ops_in_group[current_cycle % 4]++;
7107 case ITANIUM_CLASS_STF:
7108 case ITANIUM_CLASS_ST:
7109 mem_ops_in_group[(current_cycle + 3) % 4]++;
7115 /* We are about to being issuing insns for this clock cycle.
7116 Override the default sort algorithm to better slot instructions. */
7119 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
7120 int *pn_ready, int clock_var,
7124 int n_ready = *pn_ready;
7125 rtx *e_ready = ready + n_ready;
7129 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
7131 if (reorder_type == 0)
7133 /* First, move all USEs, CLOBBERs and other crud out of the way. */
7135 for (insnp = ready; insnp < e_ready; insnp++)
7136 if (insnp < e_ready)
7139 enum attr_type t = ia64_safe_type (insn);
7140 if (t == TYPE_UNKNOWN)
7142 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
7143 || asm_noperands (PATTERN (insn)) >= 0)
7145 rtx lowest = ready[n_asms];
7146 ready[n_asms] = insn;
7152 rtx highest = ready[n_ready - 1];
7153 ready[n_ready - 1] = insn;
7160 if (n_asms < n_ready)
7162 /* Some normal insns to process. Skip the asms. */
7166 else if (n_ready > 0)
7170 if (ia64_final_schedule)
7173 int nr_need_stop = 0;
7175 for (insnp = ready; insnp < e_ready; insnp++)
7176 if (safe_group_barrier_needed (*insnp))
7179 if (reorder_type == 1 && n_ready == nr_need_stop)
7181 if (reorder_type == 0)
7184 /* Move down everything that needs a stop bit, preserving
7186 while (insnp-- > ready + deleted)
7187 while (insnp >= ready + deleted)
7190 if (! safe_group_barrier_needed (insn))
7192 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7200 current_cycle = clock_var;
7201 if (reload_completed && mem_ops_in_group[clock_var % 4] >= ia64_max_memory_insns)
7206 /* Move down loads/stores, preserving relative order. */
7207 while (insnp-- > ready + moved)
7208 while (insnp >= ready + moved)
7211 if (! is_load_p (insn))
7213 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
7224 /* We are about to being issuing insns for this clock cycle. Override
7225 the default sort algorithm to better slot instructions. */
7228 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
7231 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
7232 pn_ready, clock_var, 0);
7235 /* Like ia64_sched_reorder, but called after issuing each insn.
7236 Override the default sort algorithm to better slot instructions. */
7239 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
7240 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
7241 int *pn_ready, int clock_var)
7243 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
7247 /* We are about to issue INSN. Return the number of insns left on the
7248 ready queue that can be issued this cycle. */
7251 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
7252 int sched_verbose ATTRIBUTE_UNUSED,
7253 rtx insn ATTRIBUTE_UNUSED,
7254 int can_issue_more ATTRIBUTE_UNUSED)
7256 if (sched_deps_info->generate_spec_deps && !sel_sched_p ())
7257 /* Modulo scheduling does not extend h_i_d when emitting
7258 new instructions. Don't use h_i_d, if we don't have to. */
7260 if (DONE_SPEC (insn) & BEGIN_DATA)
7261 pending_data_specs++;
7262 if (CHECK_SPEC (insn) & BEGIN_DATA)
7263 pending_data_specs--;
7266 if (DEBUG_INSN_P (insn))
7269 last_scheduled_insn = insn;
7270 memcpy (prev_cycle_state, curr_state, dfa_state_size);
7271 if (reload_completed)
7273 int needed = group_barrier_needed (insn);
7275 gcc_assert (!needed);
7276 if (GET_CODE (insn) == CALL_INSN)
7277 init_insn_group_barriers ();
7278 stops_p [INSN_UID (insn)] = stop_before_p;
7281 record_memory_reference (insn);
7286 /* We are choosing insn from the ready queue. Return nonzero if INSN
7290 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
7292 gcc_assert (insn && INSN_P (insn));
7293 return ((!reload_completed
7294 || !safe_group_barrier_needed (insn))
7295 && ia64_first_cycle_multipass_dfa_lookahead_guard_spec (insn)
7296 && (!mflag_sched_mem_insns_hard_limit
7297 || !is_load_p (insn)
7298 || mem_ops_in_group[current_cycle % 4] < ia64_max_memory_insns));
7301 /* We are choosing insn from the ready queue. Return nonzero if INSN
7305 ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx insn)
7307 gcc_assert (insn && INSN_P (insn));
7308 /* Size of ALAT is 32. As far as we perform conservative data speculation,
7309 we keep ALAT half-empty. */
7310 return (pending_data_specs < 16
7311 || !(TODO_SPEC (insn) & BEGIN_DATA));
7314 /* The following variable value is pseudo-insn used by the DFA insn
7315 scheduler to change the DFA state when the simulated clock is
7318 static rtx dfa_pre_cycle_insn;
7320 /* Returns 1 when a meaningful insn was scheduled between the last group
7321 barrier and LAST. */
7323 scheduled_good_insn (rtx last)
7325 if (last && recog_memoized (last) >= 0)
7329 last != NULL && !NOTE_INSN_BASIC_BLOCK_P (last)
7330 && !stops_p[INSN_UID (last)];
7331 last = PREV_INSN (last))
7332 /* We could hit a NOTE_INSN_DELETED here which is actually outside
7333 the ebb we're scheduling. */
7334 if (INSN_P (last) && recog_memoized (last) >= 0)
7340 /* We are about to being issuing INSN. Return nonzero if we cannot
7341 issue it on given cycle CLOCK and return zero if we should not sort
7342 the ready queue on the next clock start. */
7345 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
7346 int clock, int *sort_p)
7348 gcc_assert (insn && INSN_P (insn));
7350 if (DEBUG_INSN_P (insn))
7353 /* When a group barrier is needed for insn, last_scheduled_insn
7355 gcc_assert (!(reload_completed && safe_group_barrier_needed (insn))
7356 || last_scheduled_insn);
7358 if ((reload_completed
7359 && (safe_group_barrier_needed (insn)
7360 || (mflag_sched_stop_bits_after_every_cycle
7361 && last_clock != clock
7362 && last_scheduled_insn
7363 && scheduled_good_insn (last_scheduled_insn))))
7364 || (last_scheduled_insn
7365 && (GET_CODE (last_scheduled_insn) == CALL_INSN
7366 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
7367 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
7369 init_insn_group_barriers ();
7371 if (verbose && dump)
7372 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
7373 last_clock == clock ? " + cycle advance" : "");
7376 current_cycle = clock;
7377 mem_ops_in_group[current_cycle % 4] = 0;
7379 if (last_clock == clock)
7381 state_transition (curr_state, dfa_stop_insn);
7382 if (TARGET_EARLY_STOP_BITS)
7383 *sort_p = (last_scheduled_insn == NULL_RTX
7384 || GET_CODE (last_scheduled_insn) != CALL_INSN);
7390 if (last_scheduled_insn)
7392 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
7393 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
7394 state_reset (curr_state);
7397 memcpy (curr_state, prev_cycle_state, dfa_state_size);
7398 state_transition (curr_state, dfa_stop_insn);
7399 state_transition (curr_state, dfa_pre_cycle_insn);
7400 state_transition (curr_state, NULL);
7407 /* Implement targetm.sched.h_i_d_extended hook.
7408 Extend internal data structures. */
7410 ia64_h_i_d_extended (void)
7412 if (stops_p != NULL)
7414 int new_clocks_length = get_max_uid () * 3 / 2;
7415 stops_p = (char *) xrecalloc (stops_p, new_clocks_length, clocks_length, 1);
7416 clocks_length = new_clocks_length;
7421 /* This structure describes the data used by the backend to guide scheduling.
7422 When the current scheduling point is switched, this data should be saved
7423 and restored later, if the scheduler returns to this point. */
7424 struct _ia64_sched_context
7426 state_t prev_cycle_state;
7427 rtx last_scheduled_insn;
7428 struct reg_write_state rws_sum[NUM_REGS];
7429 struct reg_write_state rws_insn[NUM_REGS];
7430 int first_instruction;
7431 int pending_data_specs;
7433 char mem_ops_in_group[4];
7435 typedef struct _ia64_sched_context *ia64_sched_context_t;
7437 /* Allocates a scheduling context. */
7439 ia64_alloc_sched_context (void)
7441 return xmalloc (sizeof (struct _ia64_sched_context));
7444 /* Initializes the _SC context with clean data, if CLEAN_P, and from
7445 the global context otherwise. */
7447 ia64_init_sched_context (void *_sc, bool clean_p)
7449 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7451 sc->prev_cycle_state = xmalloc (dfa_state_size);
7454 state_reset (sc->prev_cycle_state);
7455 sc->last_scheduled_insn = NULL_RTX;
7456 memset (sc->rws_sum, 0, sizeof (rws_sum));
7457 memset (sc->rws_insn, 0, sizeof (rws_insn));
7458 sc->first_instruction = 1;
7459 sc->pending_data_specs = 0;
7460 sc->current_cycle = 0;
7461 memset (sc->mem_ops_in_group, 0, sizeof (mem_ops_in_group));
7465 memcpy (sc->prev_cycle_state, prev_cycle_state, dfa_state_size);
7466 sc->last_scheduled_insn = last_scheduled_insn;
7467 memcpy (sc->rws_sum, rws_sum, sizeof (rws_sum));
7468 memcpy (sc->rws_insn, rws_insn, sizeof (rws_insn));
7469 sc->first_instruction = first_instruction;
7470 sc->pending_data_specs = pending_data_specs;
7471 sc->current_cycle = current_cycle;
7472 memcpy (sc->mem_ops_in_group, mem_ops_in_group, sizeof (mem_ops_in_group));
7476 /* Sets the global scheduling context to the one pointed to by _SC. */
7478 ia64_set_sched_context (void *_sc)
7480 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7482 gcc_assert (sc != NULL);
7484 memcpy (prev_cycle_state, sc->prev_cycle_state, dfa_state_size);
7485 last_scheduled_insn = sc->last_scheduled_insn;
7486 memcpy (rws_sum, sc->rws_sum, sizeof (rws_sum));
7487 memcpy (rws_insn, sc->rws_insn, sizeof (rws_insn));
7488 first_instruction = sc->first_instruction;
7489 pending_data_specs = sc->pending_data_specs;
7490 current_cycle = sc->current_cycle;
7491 memcpy (mem_ops_in_group, sc->mem_ops_in_group, sizeof (mem_ops_in_group));
7494 /* Clears the data in the _SC scheduling context. */
7496 ia64_clear_sched_context (void *_sc)
7498 ia64_sched_context_t sc = (ia64_sched_context_t) _sc;
7500 free (sc->prev_cycle_state);
7501 sc->prev_cycle_state = NULL;
7504 /* Frees the _SC scheduling context. */
7506 ia64_free_sched_context (void *_sc)
7508 gcc_assert (_sc != NULL);
7513 typedef rtx (* gen_func_t) (rtx, rtx);
7515 /* Return a function that will generate a load of mode MODE_NO
7516 with speculation types TS. */
7518 get_spec_load_gen_function (ds_t ts, int mode_no)
7520 static gen_func_t gen_ld_[] = {
7530 gen_zero_extendqidi2,
7531 gen_zero_extendhidi2,
7532 gen_zero_extendsidi2,
7535 static gen_func_t gen_ld_a[] = {
7545 gen_zero_extendqidi2_advanced,
7546 gen_zero_extendhidi2_advanced,
7547 gen_zero_extendsidi2_advanced,
7549 static gen_func_t gen_ld_s[] = {
7550 gen_movbi_speculative,
7551 gen_movqi_speculative,
7552 gen_movhi_speculative,
7553 gen_movsi_speculative,
7554 gen_movdi_speculative,
7555 gen_movsf_speculative,
7556 gen_movdf_speculative,
7557 gen_movxf_speculative,
7558 gen_movti_speculative,
7559 gen_zero_extendqidi2_speculative,
7560 gen_zero_extendhidi2_speculative,
7561 gen_zero_extendsidi2_speculative,
7563 static gen_func_t gen_ld_sa[] = {
7564 gen_movbi_speculative_advanced,
7565 gen_movqi_speculative_advanced,
7566 gen_movhi_speculative_advanced,
7567 gen_movsi_speculative_advanced,
7568 gen_movdi_speculative_advanced,
7569 gen_movsf_speculative_advanced,
7570 gen_movdf_speculative_advanced,
7571 gen_movxf_speculative_advanced,
7572 gen_movti_speculative_advanced,
7573 gen_zero_extendqidi2_speculative_advanced,
7574 gen_zero_extendhidi2_speculative_advanced,
7575 gen_zero_extendsidi2_speculative_advanced,
7577 static gen_func_t gen_ld_s_a[] = {
7578 gen_movbi_speculative_a,
7579 gen_movqi_speculative_a,
7580 gen_movhi_speculative_a,
7581 gen_movsi_speculative_a,
7582 gen_movdi_speculative_a,
7583 gen_movsf_speculative_a,
7584 gen_movdf_speculative_a,
7585 gen_movxf_speculative_a,
7586 gen_movti_speculative_a,
7587 gen_zero_extendqidi2_speculative_a,
7588 gen_zero_extendhidi2_speculative_a,
7589 gen_zero_extendsidi2_speculative_a,
7594 if (ts & BEGIN_DATA)
7596 if (ts & BEGIN_CONTROL)
7601 else if (ts & BEGIN_CONTROL)
7603 if ((spec_info->flags & SEL_SCHED_SPEC_DONT_CHECK_CONTROL)
7604 || ia64_needs_block_p (ts))
7607 gen_ld = gen_ld_s_a;
7614 return gen_ld[mode_no];
7617 /* Constants that help mapping 'enum machine_mode' to int. */
7620 SPEC_MODE_INVALID = -1,
7621 SPEC_MODE_FIRST = 0,
7622 SPEC_MODE_FOR_EXTEND_FIRST = 1,
7623 SPEC_MODE_FOR_EXTEND_LAST = 3,
7629 /* Offset to reach ZERO_EXTEND patterns. */
7630 SPEC_GEN_EXTEND_OFFSET = SPEC_MODE_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 1
7633 /* Return index of the MODE. */
7635 ia64_mode_to_int (enum machine_mode mode)
7639 case BImode: return 0; /* SPEC_MODE_FIRST */
7640 case QImode: return 1; /* SPEC_MODE_FOR_EXTEND_FIRST */
7641 case HImode: return 2;
7642 case SImode: return 3; /* SPEC_MODE_FOR_EXTEND_LAST */
7643 case DImode: return 4;
7644 case SFmode: return 5;
7645 case DFmode: return 6;
7646 case XFmode: return 7;
7648 /* ??? This mode needs testing. Bypasses for ldfp8 instruction are not
7649 mentioned in itanium[12].md. Predicate fp_register_operand also
7650 needs to be defined. Bottom line: better disable for now. */
7651 return SPEC_MODE_INVALID;
7652 default: return SPEC_MODE_INVALID;
7656 /* Provide information about speculation capabilities. */
7658 ia64_set_sched_flags (spec_info_t spec_info)
7660 unsigned int *flags = &(current_sched_info->flags);
7662 if (*flags & SCHED_RGN
7663 || *flags & SCHED_EBB
7664 || *flags & SEL_SCHED)
7668 if ((mflag_sched_br_data_spec && !reload_completed && optimize > 0)
7669 || (mflag_sched_ar_data_spec && reload_completed))
7674 && ((mflag_sched_br_in_data_spec && !reload_completed)
7675 || (mflag_sched_ar_in_data_spec && reload_completed)))
7679 if (mflag_sched_control_spec
7681 || reload_completed))
7683 mask |= BEGIN_CONTROL;
7685 if (!sel_sched_p () && mflag_sched_in_control_spec)
7686 mask |= BE_IN_CONTROL;
7689 spec_info->mask = mask;
7693 *flags |= USE_DEPS_LIST | DO_SPECULATION;
7695 if (mask & BE_IN_SPEC)
7698 spec_info->flags = 0;
7700 if ((mask & DATA_SPEC) && mflag_sched_prefer_non_data_spec_insns)
7701 spec_info->flags |= PREFER_NON_DATA_SPEC;
7703 if (mask & CONTROL_SPEC)
7705 if (mflag_sched_prefer_non_control_spec_insns)
7706 spec_info->flags |= PREFER_NON_CONTROL_SPEC;
7708 if (sel_sched_p () && mflag_sel_sched_dont_check_control_spec)
7709 spec_info->flags |= SEL_SCHED_SPEC_DONT_CHECK_CONTROL;
7712 if (sched_verbose >= 1)
7713 spec_info->dump = sched_dump;
7715 spec_info->dump = 0;
7717 if (mflag_sched_count_spec_in_critical_path)
7718 spec_info->flags |= COUNT_SPEC_IN_CRITICAL_PATH;
7722 spec_info->mask = 0;
7725 /* If INSN is an appropriate load return its mode.
7726 Return -1 otherwise. */
7728 get_mode_no_for_insn (rtx insn)
7730 rtx reg, mem, mode_rtx;
7734 extract_insn_cached (insn);
7736 /* We use WHICH_ALTERNATIVE only after reload. This will
7737 guarantee that reload won't touch a speculative insn. */
7739 if (recog_data.n_operands != 2)
7742 reg = recog_data.operand[0];
7743 mem = recog_data.operand[1];
7745 /* We should use MEM's mode since REG's mode in presence of
7746 ZERO_EXTEND will always be DImode. */
7747 if (get_attr_speculable1 (insn) == SPECULABLE1_YES)
7748 /* Process non-speculative ld. */
7750 if (!reload_completed)
7752 /* Do not speculate into regs like ar.lc. */
7753 if (!REG_P (reg) || AR_REGNO_P (REGNO (reg)))
7760 rtx mem_reg = XEXP (mem, 0);
7762 if (!REG_P (mem_reg))
7768 else if (get_attr_speculable2 (insn) == SPECULABLE2_YES)
7770 gcc_assert (REG_P (reg) && MEM_P (mem));
7776 else if (get_attr_data_speculative (insn) == DATA_SPECULATIVE_YES
7777 || get_attr_control_speculative (insn) == CONTROL_SPECULATIVE_YES
7778 || get_attr_check_load (insn) == CHECK_LOAD_YES)
7779 /* Process speculative ld or ld.c. */
7781 gcc_assert (REG_P (reg) && MEM_P (mem));
7786 enum attr_itanium_class attr_class = get_attr_itanium_class (insn);
7788 if (attr_class == ITANIUM_CLASS_CHK_A
7789 || attr_class == ITANIUM_CLASS_CHK_S_I
7790 || attr_class == ITANIUM_CLASS_CHK_S_F)
7797 mode_no = ia64_mode_to_int (GET_MODE (mode_rtx));
7799 if (mode_no == SPEC_MODE_INVALID)
7802 extend_p = (GET_MODE (reg) != GET_MODE (mode_rtx));
7806 if (!(SPEC_MODE_FOR_EXTEND_FIRST <= mode_no
7807 && mode_no <= SPEC_MODE_FOR_EXTEND_LAST))
7810 mode_no += SPEC_GEN_EXTEND_OFFSET;
7816 /* If X is an unspec part of a speculative load, return its code.
7817 Return -1 otherwise. */
7819 get_spec_unspec_code (const_rtx x)
7821 if (GET_CODE (x) != UNSPEC)
7843 /* Implement skip_rtx_p hook. */
7845 ia64_skip_rtx_p (const_rtx x)
7847 return get_spec_unspec_code (x) != -1;
7850 /* If INSN is a speculative load, return its UNSPEC code.
7851 Return -1 otherwise. */
7853 get_insn_spec_code (const_rtx insn)
7857 pat = PATTERN (insn);
7859 if (GET_CODE (pat) == COND_EXEC)
7860 pat = COND_EXEC_CODE (pat);
7862 if (GET_CODE (pat) != SET)
7865 reg = SET_DEST (pat);
7869 mem = SET_SRC (pat);
7870 if (GET_CODE (mem) == ZERO_EXTEND)
7871 mem = XEXP (mem, 0);
7873 return get_spec_unspec_code (mem);
7876 /* If INSN is a speculative load, return a ds with the speculation types.
7877 Otherwise [if INSN is a normal instruction] return 0. */
7879 ia64_get_insn_spec_ds (rtx insn)
7881 int code = get_insn_spec_code (insn);
7890 return BEGIN_CONTROL;
7893 return BEGIN_DATA | BEGIN_CONTROL;
7900 /* If INSN is a speculative load return a ds with the speculation types that
7902 Otherwise [if INSN is a normal instruction] return 0. */
7904 ia64_get_insn_checked_ds (rtx insn)
7906 int code = get_insn_spec_code (insn);
7911 return BEGIN_DATA | BEGIN_CONTROL;
7914 return BEGIN_CONTROL;
7918 return BEGIN_DATA | BEGIN_CONTROL;
7925 /* If GEN_P is true, calculate the index of needed speculation check and return
7926 speculative pattern for INSN with speculative mode TS, machine mode
7927 MODE_NO and with ZERO_EXTEND (if EXTEND_P is true).
7928 If GEN_P is false, just calculate the index of needed speculation check. */
7930 ia64_gen_spec_load (rtx insn, ds_t ts, int mode_no)
7933 gen_func_t gen_load;
7935 gen_load = get_spec_load_gen_function (ts, mode_no);
7937 new_pat = gen_load (copy_rtx (recog_data.operand[0]),
7938 copy_rtx (recog_data.operand[1]));
7940 pat = PATTERN (insn);
7941 if (GET_CODE (pat) == COND_EXEC)
7942 new_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
7949 insn_can_be_in_speculative_p (rtx insn ATTRIBUTE_UNUSED,
7950 ds_t ds ATTRIBUTE_UNUSED)
7955 /* Implement targetm.sched.speculate_insn hook.
7956 Check if the INSN can be TS speculative.
7957 If 'no' - return -1.
7958 If 'yes' - generate speculative pattern in the NEW_PAT and return 1.
7959 If current pattern of the INSN already provides TS speculation,
7962 ia64_speculate_insn (rtx insn, ds_t ts, rtx *new_pat)
7967 gcc_assert (!(ts & ~SPECULATIVE));
7969 if (ia64_spec_check_p (insn))
7972 if ((ts & BE_IN_SPEC)
7973 && !insn_can_be_in_speculative_p (insn, ts))
7976 mode_no = get_mode_no_for_insn (insn);
7978 if (mode_no != SPEC_MODE_INVALID)
7980 if (ia64_get_insn_spec_ds (insn) == ds_get_speculation_types (ts))
7985 *new_pat = ia64_gen_spec_load (insn, ts, mode_no);
7994 /* Return a function that will generate a check for speculation TS with mode
7996 If simple check is needed, pass true for SIMPLE_CHECK_P.
7997 If clearing check is needed, pass true for CLEARING_CHECK_P. */
7999 get_spec_check_gen_function (ds_t ts, int mode_no,
8000 bool simple_check_p, bool clearing_check_p)
8002 static gen_func_t gen_ld_c_clr[] = {
8012 gen_zero_extendqidi2_clr,
8013 gen_zero_extendhidi2_clr,
8014 gen_zero_extendsidi2_clr,
8016 static gen_func_t gen_ld_c_nc[] = {
8026 gen_zero_extendqidi2_nc,
8027 gen_zero_extendhidi2_nc,
8028 gen_zero_extendsidi2_nc,
8030 static gen_func_t gen_chk_a_clr[] = {
8031 gen_advanced_load_check_clr_bi,
8032 gen_advanced_load_check_clr_qi,
8033 gen_advanced_load_check_clr_hi,
8034 gen_advanced_load_check_clr_si,
8035 gen_advanced_load_check_clr_di,
8036 gen_advanced_load_check_clr_sf,
8037 gen_advanced_load_check_clr_df,
8038 gen_advanced_load_check_clr_xf,
8039 gen_advanced_load_check_clr_ti,
8040 gen_advanced_load_check_clr_di,
8041 gen_advanced_load_check_clr_di,
8042 gen_advanced_load_check_clr_di,
8044 static gen_func_t gen_chk_a_nc[] = {
8045 gen_advanced_load_check_nc_bi,
8046 gen_advanced_load_check_nc_qi,
8047 gen_advanced_load_check_nc_hi,
8048 gen_advanced_load_check_nc_si,
8049 gen_advanced_load_check_nc_di,
8050 gen_advanced_load_check_nc_sf,
8051 gen_advanced_load_check_nc_df,
8052 gen_advanced_load_check_nc_xf,
8053 gen_advanced_load_check_nc_ti,
8054 gen_advanced_load_check_nc_di,
8055 gen_advanced_load_check_nc_di,
8056 gen_advanced_load_check_nc_di,
8058 static gen_func_t gen_chk_s[] = {
8059 gen_speculation_check_bi,
8060 gen_speculation_check_qi,
8061 gen_speculation_check_hi,
8062 gen_speculation_check_si,
8063 gen_speculation_check_di,
8064 gen_speculation_check_sf,
8065 gen_speculation_check_df,
8066 gen_speculation_check_xf,
8067 gen_speculation_check_ti,
8068 gen_speculation_check_di,
8069 gen_speculation_check_di,
8070 gen_speculation_check_di,
8073 gen_func_t *gen_check;
8075 if (ts & BEGIN_DATA)
8077 /* We don't need recovery because even if this is ld.sa
8078 ALAT entry will be allocated only if NAT bit is set to zero.
8079 So it is enough to use ld.c here. */
8083 gcc_assert (mflag_sched_spec_ldc);
8085 if (clearing_check_p)
8086 gen_check = gen_ld_c_clr;
8088 gen_check = gen_ld_c_nc;
8092 if (clearing_check_p)
8093 gen_check = gen_chk_a_clr;
8095 gen_check = gen_chk_a_nc;
8098 else if (ts & BEGIN_CONTROL)
8101 /* We might want to use ld.sa -> ld.c instead of
8104 gcc_assert (!ia64_needs_block_p (ts));
8106 if (clearing_check_p)
8107 gen_check = gen_ld_c_clr;
8109 gen_check = gen_ld_c_nc;
8113 gen_check = gen_chk_s;
8119 gcc_assert (mode_no >= 0);
8120 return gen_check[mode_no];
8123 /* Return nonzero, if INSN needs branchy recovery check. */
8125 ia64_needs_block_p (ds_t ts)
8127 if (ts & BEGIN_DATA)
8128 return !mflag_sched_spec_ldc;
8130 gcc_assert ((ts & BEGIN_CONTROL) != 0);
8132 return !(mflag_sched_spec_control_ldc && mflag_sched_spec_ldc);
8135 /* Generate (or regenerate, if (MUTATE_P)) recovery check for INSN.
8136 If (LABEL != 0 || MUTATE_P), generate branchy recovery check.
8137 Otherwise, generate a simple check. */
8139 ia64_gen_spec_check (rtx insn, rtx label, ds_t ds)
8141 rtx op1, pat, check_pat;
8142 gen_func_t gen_check;
8145 mode_no = get_mode_no_for_insn (insn);
8146 gcc_assert (mode_no >= 0);
8152 gcc_assert (!ia64_needs_block_p (ds));
8153 op1 = copy_rtx (recog_data.operand[1]);
8156 gen_check = get_spec_check_gen_function (ds, mode_no, label == NULL_RTX,
8159 check_pat = gen_check (copy_rtx (recog_data.operand[0]), op1);
8161 pat = PATTERN (insn);
8162 if (GET_CODE (pat) == COND_EXEC)
8163 check_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
8169 /* Return nonzero, if X is branchy recovery check. */
8171 ia64_spec_check_p (rtx x)
8174 if (GET_CODE (x) == COND_EXEC)
8175 x = COND_EXEC_CODE (x);
8176 if (GET_CODE (x) == SET)
8177 return ia64_spec_check_src_p (SET_SRC (x));
8181 /* Return nonzero, if SRC belongs to recovery check. */
8183 ia64_spec_check_src_p (rtx src)
8185 if (GET_CODE (src) == IF_THEN_ELSE)
8190 if (GET_CODE (t) == NE)
8194 if (GET_CODE (t) == UNSPEC)
8200 if (code == UNSPEC_LDCCLR
8201 || code == UNSPEC_LDCNC
8202 || code == UNSPEC_CHKACLR
8203 || code == UNSPEC_CHKANC
8204 || code == UNSPEC_CHKS)
8206 gcc_assert (code != 0);
8216 /* The following page contains abstract data `bundle states' which are
8217 used for bundling insns (inserting nops and template generation). */
8219 /* The following describes state of insn bundling. */
8223 /* Unique bundle state number to identify them in the debugging
8226 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
8227 /* number nops before and after the insn */
8228 short before_nops_num, after_nops_num;
8229 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
8231 int cost; /* cost of the state in cycles */
8232 int accumulated_insns_num; /* number of all previous insns including
8233 nops. L is considered as 2 insns */
8234 int branch_deviation; /* deviation of previous branches from 3rd slots */
8235 int middle_bundle_stops; /* number of stop bits in the middle of bundles */
8236 struct bundle_state *next; /* next state with the same insn_num */
8237 struct bundle_state *originator; /* originator (previous insn state) */
8238 /* All bundle states are in the following chain. */
8239 struct bundle_state *allocated_states_chain;
8240 /* The DFA State after issuing the insn and the nops. */
8244 /* The following is map insn number to the corresponding bundle state. */
8246 static struct bundle_state **index_to_bundle_states;
8248 /* The unique number of next bundle state. */
8250 static int bundle_states_num;
8252 /* All allocated bundle states are in the following chain. */
8254 static struct bundle_state *allocated_bundle_states_chain;
8256 /* All allocated but not used bundle states are in the following
8259 static struct bundle_state *free_bundle_state_chain;
8262 /* The following function returns a free bundle state. */
8264 static struct bundle_state *
8265 get_free_bundle_state (void)
8267 struct bundle_state *result;
8269 if (free_bundle_state_chain != NULL)
8271 result = free_bundle_state_chain;
8272 free_bundle_state_chain = result->next;
8276 result = XNEW (struct bundle_state);
8277 result->dfa_state = xmalloc (dfa_state_size);
8278 result->allocated_states_chain = allocated_bundle_states_chain;
8279 allocated_bundle_states_chain = result;
8281 result->unique_num = bundle_states_num++;
8286 /* The following function frees given bundle state. */
8289 free_bundle_state (struct bundle_state *state)
8291 state->next = free_bundle_state_chain;
8292 free_bundle_state_chain = state;
8295 /* Start work with abstract data `bundle states'. */
8298 initiate_bundle_states (void)
8300 bundle_states_num = 0;
8301 free_bundle_state_chain = NULL;
8302 allocated_bundle_states_chain = NULL;
8305 /* Finish work with abstract data `bundle states'. */
8308 finish_bundle_states (void)
8310 struct bundle_state *curr_state, *next_state;
8312 for (curr_state = allocated_bundle_states_chain;
8314 curr_state = next_state)
8316 next_state = curr_state->allocated_states_chain;
8317 free (curr_state->dfa_state);
8322 /* Hash table of the bundle states. The key is dfa_state and insn_num
8323 of the bundle states. */
8325 static htab_t bundle_state_table;
8327 /* The function returns hash of BUNDLE_STATE. */
8330 bundle_state_hash (const void *bundle_state)
8332 const struct bundle_state *const state
8333 = (const struct bundle_state *) bundle_state;
8336 for (result = i = 0; i < dfa_state_size; i++)
8337 result += (((unsigned char *) state->dfa_state) [i]
8338 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
8339 return result + state->insn_num;
8342 /* The function returns nonzero if the bundle state keys are equal. */
8345 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
8347 const struct bundle_state *const state1
8348 = (const struct bundle_state *) bundle_state_1;
8349 const struct bundle_state *const state2
8350 = (const struct bundle_state *) bundle_state_2;
8352 return (state1->insn_num == state2->insn_num
8353 && memcmp (state1->dfa_state, state2->dfa_state,
8354 dfa_state_size) == 0);
8357 /* The function inserts the BUNDLE_STATE into the hash table. The
8358 function returns nonzero if the bundle has been inserted into the
8359 table. The table contains the best bundle state with given key. */
8362 insert_bundle_state (struct bundle_state *bundle_state)
8366 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, INSERT);
8367 if (*entry_ptr == NULL)
8369 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
8370 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
8371 *entry_ptr = (void *) bundle_state;
8374 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
8375 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
8376 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
8377 > bundle_state->accumulated_insns_num
8378 || (((struct bundle_state *)
8379 *entry_ptr)->accumulated_insns_num
8380 == bundle_state->accumulated_insns_num
8381 && (((struct bundle_state *)
8382 *entry_ptr)->branch_deviation
8383 > bundle_state->branch_deviation
8384 || (((struct bundle_state *)
8385 *entry_ptr)->branch_deviation
8386 == bundle_state->branch_deviation
8387 && ((struct bundle_state *)
8388 *entry_ptr)->middle_bundle_stops
8389 > bundle_state->middle_bundle_stops))))))
8392 struct bundle_state temp;
8394 temp = *(struct bundle_state *) *entry_ptr;
8395 *(struct bundle_state *) *entry_ptr = *bundle_state;
8396 ((struct bundle_state *) *entry_ptr)->next = temp.next;
8397 *bundle_state = temp;
8402 /* Start work with the hash table. */
8405 initiate_bundle_state_table (void)
8407 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
8411 /* Finish work with the hash table. */
8414 finish_bundle_state_table (void)
8416 htab_delete (bundle_state_table);
8421 /* The following variable is a insn `nop' used to check bundle states
8422 with different number of inserted nops. */
8424 static rtx ia64_nop;
8426 /* The following function tries to issue NOPS_NUM nops for the current
8427 state without advancing processor cycle. If it failed, the
8428 function returns FALSE and frees the current state. */
8431 try_issue_nops (struct bundle_state *curr_state, int nops_num)
8435 for (i = 0; i < nops_num; i++)
8436 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
8438 free_bundle_state (curr_state);
8444 /* The following function tries to issue INSN for the current
8445 state without advancing processor cycle. If it failed, the
8446 function returns FALSE and frees the current state. */
8449 try_issue_insn (struct bundle_state *curr_state, rtx insn)
8451 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
8453 free_bundle_state (curr_state);
8459 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
8460 starting with ORIGINATOR without advancing processor cycle. If
8461 TRY_BUNDLE_END_P is TRUE, the function also/only (if
8462 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
8463 If it was successful, the function creates new bundle state and
8464 insert into the hash table and into `index_to_bundle_states'. */
8467 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
8468 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
8470 struct bundle_state *curr_state;
8472 curr_state = get_free_bundle_state ();
8473 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
8474 curr_state->insn = insn;
8475 curr_state->insn_num = originator->insn_num + 1;
8476 curr_state->cost = originator->cost;
8477 curr_state->originator = originator;
8478 curr_state->before_nops_num = before_nops_num;
8479 curr_state->after_nops_num = 0;
8480 curr_state->accumulated_insns_num
8481 = originator->accumulated_insns_num + before_nops_num;
8482 curr_state->branch_deviation = originator->branch_deviation;
8483 curr_state->middle_bundle_stops = originator->middle_bundle_stops;
8485 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
8487 gcc_assert (GET_MODE (insn) != TImode);
8488 if (!try_issue_nops (curr_state, before_nops_num))
8490 if (!try_issue_insn (curr_state, insn))
8492 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
8493 if (curr_state->accumulated_insns_num % 3 != 0)
8494 curr_state->middle_bundle_stops++;
8495 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
8496 && curr_state->accumulated_insns_num % 3 != 0)
8498 free_bundle_state (curr_state);
8502 else if (GET_MODE (insn) != TImode)
8504 if (!try_issue_nops (curr_state, before_nops_num))
8506 if (!try_issue_insn (curr_state, insn))
8508 curr_state->accumulated_insns_num++;
8509 gcc_assert (GET_CODE (PATTERN (insn)) != ASM_INPUT
8510 && asm_noperands (PATTERN (insn)) < 0);
8512 if (ia64_safe_type (insn) == TYPE_L)
8513 curr_state->accumulated_insns_num++;
8517 /* If this is an insn that must be first in a group, then don't allow
8518 nops to be emitted before it. Currently, alloc is the only such
8519 supported instruction. */
8520 /* ??? The bundling automatons should handle this for us, but they do
8521 not yet have support for the first_insn attribute. */
8522 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
8524 free_bundle_state (curr_state);
8528 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
8529 state_transition (curr_state->dfa_state, NULL);
8531 if (!try_issue_nops (curr_state, before_nops_num))
8533 if (!try_issue_insn (curr_state, insn))
8535 curr_state->accumulated_insns_num++;
8536 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
8537 || asm_noperands (PATTERN (insn)) >= 0)
8539 /* Finish bundle containing asm insn. */
8540 curr_state->after_nops_num
8541 = 3 - curr_state->accumulated_insns_num % 3;
8542 curr_state->accumulated_insns_num
8543 += 3 - curr_state->accumulated_insns_num % 3;
8545 else if (ia64_safe_type (insn) == TYPE_L)
8546 curr_state->accumulated_insns_num++;
8548 if (ia64_safe_type (insn) == TYPE_B)
8549 curr_state->branch_deviation
8550 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
8551 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
8553 if (!only_bundle_end_p && insert_bundle_state (curr_state))
8556 struct bundle_state *curr_state1;
8557 struct bundle_state *allocated_states_chain;
8559 curr_state1 = get_free_bundle_state ();
8560 dfa_state = curr_state1->dfa_state;
8561 allocated_states_chain = curr_state1->allocated_states_chain;
8562 *curr_state1 = *curr_state;
8563 curr_state1->dfa_state = dfa_state;
8564 curr_state1->allocated_states_chain = allocated_states_chain;
8565 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
8567 curr_state = curr_state1;
8569 if (!try_issue_nops (curr_state,
8570 3 - curr_state->accumulated_insns_num % 3))
8572 curr_state->after_nops_num
8573 = 3 - curr_state->accumulated_insns_num % 3;
8574 curr_state->accumulated_insns_num
8575 += 3 - curr_state->accumulated_insns_num % 3;
8577 if (!insert_bundle_state (curr_state))
8578 free_bundle_state (curr_state);
8582 /* The following function returns position in the two window bundle
8586 get_max_pos (state_t state)
8588 if (cpu_unit_reservation_p (state, pos_6))
8590 else if (cpu_unit_reservation_p (state, pos_5))
8592 else if (cpu_unit_reservation_p (state, pos_4))
8594 else if (cpu_unit_reservation_p (state, pos_3))
8596 else if (cpu_unit_reservation_p (state, pos_2))
8598 else if (cpu_unit_reservation_p (state, pos_1))
8604 /* The function returns code of a possible template for given position
8605 and state. The function should be called only with 2 values of
8606 position equal to 3 or 6. We avoid generating F NOPs by putting
8607 templates containing F insns at the end of the template search
8608 because undocumented anomaly in McKinley derived cores which can
8609 cause stalls if an F-unit insn (including a NOP) is issued within a
8610 six-cycle window after reading certain application registers (such
8611 as ar.bsp). Furthermore, power-considerations also argue against
8612 the use of F-unit instructions unless they're really needed. */
8615 get_template (state_t state, int pos)
8620 if (cpu_unit_reservation_p (state, _0mmi_))
8622 else if (cpu_unit_reservation_p (state, _0mii_))
8624 else if (cpu_unit_reservation_p (state, _0mmb_))
8626 else if (cpu_unit_reservation_p (state, _0mib_))
8628 else if (cpu_unit_reservation_p (state, _0mbb_))
8630 else if (cpu_unit_reservation_p (state, _0bbb_))
8632 else if (cpu_unit_reservation_p (state, _0mmf_))
8634 else if (cpu_unit_reservation_p (state, _0mfi_))
8636 else if (cpu_unit_reservation_p (state, _0mfb_))
8638 else if (cpu_unit_reservation_p (state, _0mlx_))
8643 if (cpu_unit_reservation_p (state, _1mmi_))
8645 else if (cpu_unit_reservation_p (state, _1mii_))
8647 else if (cpu_unit_reservation_p (state, _1mmb_))
8649 else if (cpu_unit_reservation_p (state, _1mib_))
8651 else if (cpu_unit_reservation_p (state, _1mbb_))
8653 else if (cpu_unit_reservation_p (state, _1bbb_))
8655 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
8657 else if (cpu_unit_reservation_p (state, _1mfi_))
8659 else if (cpu_unit_reservation_p (state, _1mfb_))
8661 else if (cpu_unit_reservation_p (state, _1mlx_))
8670 /* True when INSN is important for bundling. */
8672 important_for_bundling_p (rtx insn)
8674 return (INSN_P (insn)
8675 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8676 && GET_CODE (PATTERN (insn)) != USE
8677 && GET_CODE (PATTERN (insn)) != CLOBBER);
8680 /* The following function returns an insn important for insn bundling
8681 followed by INSN and before TAIL. */
8684 get_next_important_insn (rtx insn, rtx tail)
8686 for (; insn && insn != tail; insn = NEXT_INSN (insn))
8687 if (important_for_bundling_p (insn))
8692 /* Add a bundle selector TEMPLATE0 before INSN. */
8695 ia64_add_bundle_selector_before (int template0, rtx insn)
8697 rtx b = gen_bundle_selector (GEN_INT (template0));
8699 ia64_emit_insn_before (b, insn);
8700 #if NR_BUNDLES == 10
8701 if ((template0 == 4 || template0 == 5)
8702 && ia64_except_unwind_info (&global_options) == UI_TARGET)
8705 rtx note = NULL_RTX;
8707 /* In .mbb and .bbb bundles, check if CALL_INSN isn't in the
8708 first or second slot. If it is and has REG_EH_NOTE set, copy it
8709 to following nops, as br.call sets rp to the address of following
8710 bundle and therefore an EH region end must be on a bundle
8712 insn = PREV_INSN (insn);
8713 for (i = 0; i < 3; i++)
8716 insn = next_active_insn (insn);
8717 while (GET_CODE (insn) == INSN
8718 && get_attr_empty (insn) == EMPTY_YES);
8719 if (GET_CODE (insn) == CALL_INSN)
8720 note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
8725 gcc_assert ((code = recog_memoized (insn)) == CODE_FOR_nop
8726 || code == CODE_FOR_nop_b);
8727 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
8730 add_reg_note (insn, REG_EH_REGION, XEXP (note, 0));
8737 /* The following function does insn bundling. Bundling means
8738 inserting templates and nop insns to fit insn groups into permitted
8739 templates. Instruction scheduling uses NDFA (non-deterministic
8740 finite automata) encoding informations about the templates and the
8741 inserted nops. Nondeterminism of the automata permits follows
8742 all possible insn sequences very fast.
8744 Unfortunately it is not possible to get information about inserting
8745 nop insns and used templates from the automata states. The
8746 automata only says that we can issue an insn possibly inserting
8747 some nops before it and using some template. Therefore insn
8748 bundling in this function is implemented by using DFA
8749 (deterministic finite automata). We follow all possible insn
8750 sequences by inserting 0-2 nops (that is what the NDFA describe for
8751 insn scheduling) before/after each insn being bundled. We know the
8752 start of simulated processor cycle from insn scheduling (insn
8753 starting a new cycle has TImode).
8755 Simple implementation of insn bundling would create enormous
8756 number of possible insn sequences satisfying information about new
8757 cycle ticks taken from the insn scheduling. To make the algorithm
8758 practical we use dynamic programming. Each decision (about
8759 inserting nops and implicitly about previous decisions) is described
8760 by structure bundle_state (see above). If we generate the same
8761 bundle state (key is automaton state after issuing the insns and
8762 nops for it), we reuse already generated one. As consequence we
8763 reject some decisions which cannot improve the solution and
8764 reduce memory for the algorithm.
8766 When we reach the end of EBB (extended basic block), we choose the
8767 best sequence and then, moving back in EBB, insert templates for
8768 the best alternative. The templates are taken from querying
8769 automaton state for each insn in chosen bundle states.
8771 So the algorithm makes two (forward and backward) passes through
8775 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
8777 struct bundle_state *curr_state, *next_state, *best_state;
8778 rtx insn, next_insn;
8780 int i, bundle_end_p, only_bundle_end_p, asm_p;
8781 int pos = 0, max_pos, template0, template1;
8784 enum attr_type type;
8787 /* Count insns in the EBB. */
8788 for (insn = NEXT_INSN (prev_head_insn);
8789 insn && insn != tail;
8790 insn = NEXT_INSN (insn))
8796 dfa_clean_insn_cache ();
8797 initiate_bundle_state_table ();
8798 index_to_bundle_states = XNEWVEC (struct bundle_state *, insn_num + 2);
8799 /* First (forward) pass -- generation of bundle states. */
8800 curr_state = get_free_bundle_state ();
8801 curr_state->insn = NULL;
8802 curr_state->before_nops_num = 0;
8803 curr_state->after_nops_num = 0;
8804 curr_state->insn_num = 0;
8805 curr_state->cost = 0;
8806 curr_state->accumulated_insns_num = 0;
8807 curr_state->branch_deviation = 0;
8808 curr_state->middle_bundle_stops = 0;
8809 curr_state->next = NULL;
8810 curr_state->originator = NULL;
8811 state_reset (curr_state->dfa_state);
8812 index_to_bundle_states [0] = curr_state;
8814 /* Shift cycle mark if it is put on insn which could be ignored. */
8815 for (insn = NEXT_INSN (prev_head_insn);
8817 insn = NEXT_INSN (insn))
8819 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
8820 || GET_CODE (PATTERN (insn)) == USE
8821 || GET_CODE (PATTERN (insn)) == CLOBBER)
8822 && GET_MODE (insn) == TImode)
8824 PUT_MODE (insn, VOIDmode);
8825 for (next_insn = NEXT_INSN (insn);
8827 next_insn = NEXT_INSN (next_insn))
8828 if (INSN_P (next_insn)
8829 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
8830 && GET_CODE (PATTERN (next_insn)) != USE
8831 && GET_CODE (PATTERN (next_insn)) != CLOBBER
8832 && INSN_CODE (next_insn) != CODE_FOR_insn_group_barrier)
8834 PUT_MODE (next_insn, TImode);
8838 /* Forward pass: generation of bundle states. */
8839 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
8843 gcc_assert (INSN_P (insn)
8844 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8845 && GET_CODE (PATTERN (insn)) != USE
8846 && GET_CODE (PATTERN (insn)) != CLOBBER);
8847 type = ia64_safe_type (insn);
8848 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
8850 index_to_bundle_states [insn_num] = NULL;
8851 for (curr_state = index_to_bundle_states [insn_num - 1];
8853 curr_state = next_state)
8855 pos = curr_state->accumulated_insns_num % 3;
8856 next_state = curr_state->next;
8857 /* We must fill up the current bundle in order to start a
8858 subsequent asm insn in a new bundle. Asm insn is always
8859 placed in a separate bundle. */
8861 = (next_insn != NULL_RTX
8862 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
8863 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
8864 /* We may fill up the current bundle if it is the cycle end
8865 without a group barrier. */
8867 = (only_bundle_end_p || next_insn == NULL_RTX
8868 || (GET_MODE (next_insn) == TImode
8869 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
8870 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
8872 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
8874 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
8876 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
8879 gcc_assert (index_to_bundle_states [insn_num]);
8880 for (curr_state = index_to_bundle_states [insn_num];
8882 curr_state = curr_state->next)
8883 if (verbose >= 2 && dump)
8885 /* This structure is taken from generated code of the
8886 pipeline hazard recognizer (see file insn-attrtab.c).
8887 Please don't forget to change the structure if a new
8888 automaton is added to .md file. */
8891 unsigned short one_automaton_state;
8892 unsigned short oneb_automaton_state;
8893 unsigned short two_automaton_state;
8894 unsigned short twob_automaton_state;
8899 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d state %d) for %d\n",
8900 curr_state->unique_num,
8901 (curr_state->originator == NULL
8902 ? -1 : curr_state->originator->unique_num),
8904 curr_state->before_nops_num, curr_state->after_nops_num,
8905 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8906 curr_state->middle_bundle_stops,
8907 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
8912 /* We should find a solution because the 2nd insn scheduling has
8914 gcc_assert (index_to_bundle_states [insn_num]);
8915 /* Find a state corresponding to the best insn sequence. */
8917 for (curr_state = index_to_bundle_states [insn_num];
8919 curr_state = curr_state->next)
8920 /* We are just looking at the states with fully filled up last
8921 bundle. The first we prefer insn sequences with minimal cost
8922 then with minimal inserted nops and finally with branch insns
8923 placed in the 3rd slots. */
8924 if (curr_state->accumulated_insns_num % 3 == 0
8925 && (best_state == NULL || best_state->cost > curr_state->cost
8926 || (best_state->cost == curr_state->cost
8927 && (curr_state->accumulated_insns_num
8928 < best_state->accumulated_insns_num
8929 || (curr_state->accumulated_insns_num
8930 == best_state->accumulated_insns_num
8931 && (curr_state->branch_deviation
8932 < best_state->branch_deviation
8933 || (curr_state->branch_deviation
8934 == best_state->branch_deviation
8935 && curr_state->middle_bundle_stops
8936 < best_state->middle_bundle_stops)))))))
8937 best_state = curr_state;
8938 /* Second (backward) pass: adding nops and templates. */
8939 gcc_assert (best_state);
8940 insn_num = best_state->before_nops_num;
8941 template0 = template1 = -1;
8942 for (curr_state = best_state;
8943 curr_state->originator != NULL;
8944 curr_state = curr_state->originator)
8946 insn = curr_state->insn;
8947 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
8948 || asm_noperands (PATTERN (insn)) >= 0);
8950 if (verbose >= 2 && dump)
8954 unsigned short one_automaton_state;
8955 unsigned short oneb_automaton_state;
8956 unsigned short two_automaton_state;
8957 unsigned short twob_automaton_state;
8962 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, mid.stops %d, state %d) for %d\n",
8963 curr_state->unique_num,
8964 (curr_state->originator == NULL
8965 ? -1 : curr_state->originator->unique_num),
8967 curr_state->before_nops_num, curr_state->after_nops_num,
8968 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8969 curr_state->middle_bundle_stops,
8970 ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state,
8973 /* Find the position in the current bundle window. The window can
8974 contain at most two bundles. Two bundle window means that
8975 the processor will make two bundle rotation. */
8976 max_pos = get_max_pos (curr_state->dfa_state);
8978 /* The following (negative template number) means that the
8979 processor did one bundle rotation. */
8980 || (max_pos == 3 && template0 < 0))
8982 /* We are at the end of the window -- find template(s) for
8986 template0 = get_template (curr_state->dfa_state, 3);
8989 template1 = get_template (curr_state->dfa_state, 3);
8990 template0 = get_template (curr_state->dfa_state, 6);
8993 if (max_pos > 3 && template1 < 0)
8994 /* It may happen when we have the stop inside a bundle. */
8996 gcc_assert (pos <= 3);
8997 template1 = get_template (curr_state->dfa_state, 3);
9001 /* Emit nops after the current insn. */
9002 for (i = 0; i < curr_state->after_nops_num; i++)
9005 emit_insn_after (nop, insn);
9007 gcc_assert (pos >= 0);
9010 /* We are at the start of a bundle: emit the template
9011 (it should be defined). */
9012 gcc_assert (template0 >= 0);
9013 ia64_add_bundle_selector_before (template0, nop);
9014 /* If we have two bundle window, we make one bundle
9015 rotation. Otherwise template0 will be undefined
9016 (negative value). */
9017 template0 = template1;
9021 /* Move the position backward in the window. Group barrier has
9022 no slot. Asm insn takes all bundle. */
9023 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
9024 && GET_CODE (PATTERN (insn)) != ASM_INPUT
9025 && asm_noperands (PATTERN (insn)) < 0)
9027 /* Long insn takes 2 slots. */
9028 if (ia64_safe_type (insn) == TYPE_L)
9030 gcc_assert (pos >= 0);
9032 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
9033 && GET_CODE (PATTERN (insn)) != ASM_INPUT
9034 && asm_noperands (PATTERN (insn)) < 0)
9036 /* The current insn is at the bundle start: emit the
9038 gcc_assert (template0 >= 0);
9039 ia64_add_bundle_selector_before (template0, insn);
9040 b = PREV_INSN (insn);
9042 /* See comment above in analogous place for emitting nops
9044 template0 = template1;
9047 /* Emit nops after the current insn. */
9048 for (i = 0; i < curr_state->before_nops_num; i++)
9051 ia64_emit_insn_before (nop, insn);
9052 nop = PREV_INSN (insn);
9055 gcc_assert (pos >= 0);
9058 /* See comment above in analogous place for emitting nops
9060 gcc_assert (template0 >= 0);
9061 ia64_add_bundle_selector_before (template0, insn);
9062 b = PREV_INSN (insn);
9064 template0 = template1;
9070 #ifdef ENABLE_CHECKING
9072 /* Assert right calculation of middle_bundle_stops. */
9073 int num = best_state->middle_bundle_stops;
9074 bool start_bundle = true, end_bundle = false;
9076 for (insn = NEXT_INSN (prev_head_insn);
9077 insn && insn != tail;
9078 insn = NEXT_INSN (insn))
9082 if (recog_memoized (insn) == CODE_FOR_bundle_selector)
9083 start_bundle = true;
9088 for (next_insn = NEXT_INSN (insn);
9089 next_insn && next_insn != tail;
9090 next_insn = NEXT_INSN (next_insn))
9091 if (INSN_P (next_insn)
9092 && (ia64_safe_itanium_class (next_insn)
9093 != ITANIUM_CLASS_IGNORE
9094 || recog_memoized (next_insn)
9095 == CODE_FOR_bundle_selector)
9096 && GET_CODE (PATTERN (next_insn)) != USE
9097 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
9100 end_bundle = next_insn == NULL_RTX
9101 || next_insn == tail
9102 || (INSN_P (next_insn)
9103 && recog_memoized (next_insn)
9104 == CODE_FOR_bundle_selector);
9105 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier
9106 && !start_bundle && !end_bundle
9108 && GET_CODE (PATTERN (next_insn)) != ASM_INPUT
9109 && asm_noperands (PATTERN (next_insn)) < 0)
9112 start_bundle = false;
9116 gcc_assert (num == 0);
9120 free (index_to_bundle_states);
9121 finish_bundle_state_table ();
9123 dfa_clean_insn_cache ();
9126 /* The following function is called at the end of scheduling BB or
9127 EBB. After reload, it inserts stop bits and does insn bundling. */
9130 ia64_sched_finish (FILE *dump, int sched_verbose)
9133 fprintf (dump, "// Finishing schedule.\n");
9134 if (!reload_completed)
9136 if (reload_completed)
9138 final_emit_insn_group_barriers (dump);
9139 bundling (dump, sched_verbose, current_sched_info->prev_head,
9140 current_sched_info->next_tail);
9141 if (sched_verbose && dump)
9142 fprintf (dump, "// finishing %d-%d\n",
9143 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
9144 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
9150 /* The following function inserts stop bits in scheduled BB or EBB. */
9153 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
9156 int need_barrier_p = 0;
9157 int seen_good_insn = 0;
9159 init_insn_group_barriers ();
9161 for (insn = NEXT_INSN (current_sched_info->prev_head);
9162 insn != current_sched_info->next_tail;
9163 insn = NEXT_INSN (insn))
9165 if (GET_CODE (insn) == BARRIER)
9167 rtx last = prev_active_insn (insn);
9171 if (GET_CODE (last) == JUMP_INSN
9172 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
9173 last = prev_active_insn (last);
9174 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
9175 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
9177 init_insn_group_barriers ();
9181 else if (NONDEBUG_INSN_P (insn))
9183 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
9185 init_insn_group_barriers ();
9189 else if (need_barrier_p || group_barrier_needed (insn)
9190 || (mflag_sched_stop_bits_after_every_cycle
9191 && GET_MODE (insn) == TImode
9194 if (TARGET_EARLY_STOP_BITS)
9199 last != current_sched_info->prev_head;
9200 last = PREV_INSN (last))
9201 if (INSN_P (last) && GET_MODE (last) == TImode
9202 && stops_p [INSN_UID (last)])
9204 if (last == current_sched_info->prev_head)
9206 last = prev_active_insn (last);
9208 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
9209 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
9211 init_insn_group_barriers ();
9212 for (last = NEXT_INSN (last);
9214 last = NEXT_INSN (last))
9217 group_barrier_needed (last);
9218 if (recog_memoized (last) >= 0
9219 && important_for_bundling_p (last))
9225 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
9227 init_insn_group_barriers ();
9230 group_barrier_needed (insn);
9231 if (recog_memoized (insn) >= 0
9232 && important_for_bundling_p (insn))
9235 else if (recog_memoized (insn) >= 0
9236 && important_for_bundling_p (insn))
9238 need_barrier_p = (GET_CODE (insn) == CALL_INSN
9239 || GET_CODE (PATTERN (insn)) == ASM_INPUT
9240 || asm_noperands (PATTERN (insn)) >= 0);
9247 /* If the following function returns TRUE, we will use the DFA
9251 ia64_first_cycle_multipass_dfa_lookahead (void)
9253 return (reload_completed ? 6 : 4);
9256 /* The following function initiates variable `dfa_pre_cycle_insn'. */
9259 ia64_init_dfa_pre_cycle_insn (void)
9261 if (temp_dfa_state == NULL)
9263 dfa_state_size = state_size ();
9264 temp_dfa_state = xmalloc (dfa_state_size);
9265 prev_cycle_state = xmalloc (dfa_state_size);
9267 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
9268 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
9269 recog_memoized (dfa_pre_cycle_insn);
9270 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
9271 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
9272 recog_memoized (dfa_stop_insn);
9275 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
9276 used by the DFA insn scheduler. */
9279 ia64_dfa_pre_cycle_insn (void)
9281 return dfa_pre_cycle_insn;
9284 /* The following function returns TRUE if PRODUCER (of type ilog or
9285 ld) produces address for CONSUMER (of type st or stf). */
9288 ia64_st_address_bypass_p (rtx producer, rtx consumer)
9292 gcc_assert (producer && consumer);
9293 dest = ia64_single_set (producer);
9295 reg = SET_DEST (dest);
9297 if (GET_CODE (reg) == SUBREG)
9298 reg = SUBREG_REG (reg);
9299 gcc_assert (GET_CODE (reg) == REG);
9301 dest = ia64_single_set (consumer);
9303 mem = SET_DEST (dest);
9304 gcc_assert (mem && GET_CODE (mem) == MEM);
9305 return reg_mentioned_p (reg, mem);
9308 /* The following function returns TRUE if PRODUCER (of type ilog or
9309 ld) produces address for CONSUMER (of type ld or fld). */
9312 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
9314 rtx dest, src, reg, mem;
9316 gcc_assert (producer && consumer);
9317 dest = ia64_single_set (producer);
9319 reg = SET_DEST (dest);
9321 if (GET_CODE (reg) == SUBREG)
9322 reg = SUBREG_REG (reg);
9323 gcc_assert (GET_CODE (reg) == REG);
9325 src = ia64_single_set (consumer);
9327 mem = SET_SRC (src);
9330 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
9331 mem = XVECEXP (mem, 0, 0);
9332 else if (GET_CODE (mem) == IF_THEN_ELSE)
9333 /* ??? Is this bypass necessary for ld.c? */
9335 gcc_assert (XINT (XEXP (XEXP (mem, 0), 0), 1) == UNSPEC_LDCCLR);
9336 mem = XEXP (mem, 1);
9339 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
9340 mem = XEXP (mem, 0);
9342 if (GET_CODE (mem) == UNSPEC)
9344 int c = XINT (mem, 1);
9346 gcc_assert (c == UNSPEC_LDA || c == UNSPEC_LDS || c == UNSPEC_LDS_A
9347 || c == UNSPEC_LDSA);
9348 mem = XVECEXP (mem, 0, 0);
9351 /* Note that LO_SUM is used for GOT loads. */
9352 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
9354 return reg_mentioned_p (reg, mem);
9357 /* The following function returns TRUE if INSN produces address for a
9358 load/store insn. We will place such insns into M slot because it
9359 decreases its latency time. */
9362 ia64_produce_address_p (rtx insn)
9368 /* Emit pseudo-ops for the assembler to describe predicate relations.
9369 At present this assumes that we only consider predicate pairs to
9370 be mutex, and that the assembler can deduce proper values from
9371 straight-line code. */
9374 emit_predicate_relation_info (void)
9378 FOR_EACH_BB_REVERSE (bb)
9381 rtx head = BB_HEAD (bb);
9383 /* We only need such notes at code labels. */
9384 if (GET_CODE (head) != CODE_LABEL)
9386 if (NOTE_INSN_BASIC_BLOCK_P (NEXT_INSN (head)))
9387 head = NEXT_INSN (head);
9389 /* Skip p0, which may be thought to be live due to (reg:DI p0)
9390 grabbing the entire block of predicate registers. */
9391 for (r = PR_REG (2); r < PR_REG (64); r += 2)
9392 if (REGNO_REG_SET_P (df_get_live_in (bb), r))
9394 rtx p = gen_rtx_REG (BImode, r);
9395 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
9396 if (head == BB_END (bb))
9402 /* Look for conditional calls that do not return, and protect predicate
9403 relations around them. Otherwise the assembler will assume the call
9404 returns, and complain about uses of call-clobbered predicates after
9406 FOR_EACH_BB_REVERSE (bb)
9408 rtx insn = BB_HEAD (bb);
9412 if (GET_CODE (insn) == CALL_INSN
9413 && GET_CODE (PATTERN (insn)) == COND_EXEC
9414 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
9416 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
9417 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
9418 if (BB_HEAD (bb) == insn)
9420 if (BB_END (bb) == insn)
9424 if (insn == BB_END (bb))
9426 insn = NEXT_INSN (insn);
9431 /* Perform machine dependent operations on the rtl chain INSNS. */
9436 /* We are freeing block_for_insn in the toplev to keep compatibility
9437 with old MDEP_REORGS that are not CFG based. Recompute it now. */
9438 compute_bb_for_insn ();
9440 /* If optimizing, we'll have split before scheduling. */
9444 if (optimize && ia64_flag_schedule_insns2
9445 && dbg_cnt (ia64_sched2))
9447 timevar_push (TV_SCHED2);
9448 ia64_final_schedule = 1;
9450 initiate_bundle_states ();
9451 ia64_nop = make_insn_raw (gen_nop ());
9452 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
9453 recog_memoized (ia64_nop);
9454 clocks_length = get_max_uid () + 1;
9455 stops_p = XCNEWVEC (char, clocks_length);
9457 if (ia64_tune == PROCESSOR_ITANIUM2)
9459 pos_1 = get_cpu_unit_code ("2_1");
9460 pos_2 = get_cpu_unit_code ("2_2");
9461 pos_3 = get_cpu_unit_code ("2_3");
9462 pos_4 = get_cpu_unit_code ("2_4");
9463 pos_5 = get_cpu_unit_code ("2_5");
9464 pos_6 = get_cpu_unit_code ("2_6");
9465 _0mii_ = get_cpu_unit_code ("2b_0mii.");
9466 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
9467 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
9468 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
9469 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
9470 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
9471 _0mib_ = get_cpu_unit_code ("2b_0mib.");
9472 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
9473 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
9474 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
9475 _1mii_ = get_cpu_unit_code ("2b_1mii.");
9476 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
9477 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
9478 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
9479 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
9480 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
9481 _1mib_ = get_cpu_unit_code ("2b_1mib.");
9482 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
9483 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
9484 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
9488 pos_1 = get_cpu_unit_code ("1_1");
9489 pos_2 = get_cpu_unit_code ("1_2");
9490 pos_3 = get_cpu_unit_code ("1_3");
9491 pos_4 = get_cpu_unit_code ("1_4");
9492 pos_5 = get_cpu_unit_code ("1_5");
9493 pos_6 = get_cpu_unit_code ("1_6");
9494 _0mii_ = get_cpu_unit_code ("1b_0mii.");
9495 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
9496 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
9497 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
9498 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
9499 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
9500 _0mib_ = get_cpu_unit_code ("1b_0mib.");
9501 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
9502 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
9503 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
9504 _1mii_ = get_cpu_unit_code ("1b_1mii.");
9505 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
9506 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
9507 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
9508 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
9509 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
9510 _1mib_ = get_cpu_unit_code ("1b_1mib.");
9511 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
9512 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
9513 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
9516 if (flag_selective_scheduling2
9517 && !maybe_skip_selective_scheduling ())
9518 run_selective_scheduling ();
9522 /* Redo alignment computation, as it might gone wrong. */
9523 compute_alignments ();
9525 /* We cannot reuse this one because it has been corrupted by the
9527 finish_bundle_states ();
9530 emit_insn_group_barriers (dump_file);
9532 ia64_final_schedule = 0;
9533 timevar_pop (TV_SCHED2);
9536 emit_all_insn_group_barriers (dump_file);
9540 /* A call must not be the last instruction in a function, so that the
9541 return address is still within the function, so that unwinding works
9542 properly. Note that IA-64 differs from dwarf2 on this point. */
9543 if (ia64_except_unwind_info (&global_options) == UI_TARGET)
9548 insn = get_last_insn ();
9549 if (! INSN_P (insn))
9550 insn = prev_active_insn (insn);
9553 /* Skip over insns that expand to nothing. */
9554 while (GET_CODE (insn) == INSN
9555 && get_attr_empty (insn) == EMPTY_YES)
9557 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
9558 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
9560 insn = prev_active_insn (insn);
9562 if (GET_CODE (insn) == CALL_INSN)
9565 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9566 emit_insn (gen_break_f ());
9567 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
9572 emit_predicate_relation_info ();
9574 if (ia64_flag_var_tracking)
9576 timevar_push (TV_VAR_TRACKING);
9577 variable_tracking_main ();
9578 timevar_pop (TV_VAR_TRACKING);
9580 df_finish_pass (false);
9583 /* Return true if REGNO is used by the epilogue. */
9586 ia64_epilogue_uses (int regno)
9591 /* With a call to a function in another module, we will write a new
9592 value to "gp". After returning from such a call, we need to make
9593 sure the function restores the original gp-value, even if the
9594 function itself does not use the gp anymore. */
9595 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
9597 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
9598 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
9599 /* For functions defined with the syscall_linkage attribute, all
9600 input registers are marked as live at all function exits. This
9601 prevents the register allocator from using the input registers,
9602 which in turn makes it possible to restart a system call after
9603 an interrupt without having to save/restore the input registers.
9604 This also prevents kernel data from leaking to application code. */
9605 return lookup_attribute ("syscall_linkage",
9606 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
9609 /* Conditional return patterns can't represent the use of `b0' as
9610 the return address, so we force the value live this way. */
9614 /* Likewise for ar.pfs, which is used by br.ret. */
9622 /* Return true if REGNO is used by the frame unwinder. */
9625 ia64_eh_uses (int regno)
9629 if (! reload_completed)
9635 for (r = reg_save_b0; r <= reg_save_ar_lc; r++)
9636 if (regno == current_frame_info.r[r]
9637 || regno == emitted_frame_related_regs[r])
9643 /* Return true if this goes in small data/bss. */
9645 /* ??? We could also support own long data here. Generating movl/add/ld8
9646 instead of addl,ld8/ld8. This makes the code bigger, but should make the
9647 code faster because there is one less load. This also includes incomplete
9648 types which can't go in sdata/sbss. */
9651 ia64_in_small_data_p (const_tree exp)
9653 if (TARGET_NO_SDATA)
9656 /* We want to merge strings, so we never consider them small data. */
9657 if (TREE_CODE (exp) == STRING_CST)
9660 /* Functions are never small data. */
9661 if (TREE_CODE (exp) == FUNCTION_DECL)
9664 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
9666 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
9668 if (strcmp (section, ".sdata") == 0
9669 || strncmp (section, ".sdata.", 7) == 0
9670 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
9671 || strcmp (section, ".sbss") == 0
9672 || strncmp (section, ".sbss.", 6) == 0
9673 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
9678 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
9680 /* If this is an incomplete type with size 0, then we can't put it
9681 in sdata because it might be too big when completed. */
9682 if (size > 0 && size <= ia64_section_threshold)
9689 /* Output assembly directives for prologue regions. */
9691 /* The current basic block number. */
9693 static bool last_block;
9695 /* True if we need a copy_state command at the start of the next block. */
9697 static bool need_copy_state;
9699 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
9700 # define MAX_ARTIFICIAL_LABEL_BYTES 30
9703 /* Emit a debugging label after a call-frame-related insn. We'd
9704 rather output the label right away, but we'd have to output it
9705 after, not before, the instruction, and the instruction has not
9706 been output yet. So we emit the label after the insn, delete it to
9707 avoid introducing basic blocks, and mark it as preserved, such that
9708 it is still output, given that it is referenced in debug info. */
9711 ia64_emit_deleted_label_after_insn (rtx insn)
9713 char label[MAX_ARTIFICIAL_LABEL_BYTES];
9714 rtx lb = gen_label_rtx ();
9715 rtx label_insn = emit_label_after (lb, insn);
9717 LABEL_PRESERVE_P (lb) = 1;
9719 delete_insn (label_insn);
9721 ASM_GENERATE_INTERNAL_LABEL (label, "L", CODE_LABEL_NUMBER (label_insn));
9723 return xstrdup (label);
9726 /* Define the CFA after INSN with the steady-state definition. */
9729 ia64_dwarf2out_def_steady_cfa (rtx insn, bool frame)
9731 rtx fp = frame_pointer_needed
9732 ? hard_frame_pointer_rtx
9733 : stack_pointer_rtx;
9734 const char *label = ia64_emit_deleted_label_after_insn (insn);
9741 ia64_initial_elimination_offset
9742 (REGNO (arg_pointer_rtx), REGNO (fp))
9743 + ARG_POINTER_CFA_OFFSET (current_function_decl));
9746 /* All we need to do here is avoid a crash in the generic dwarf2
9747 processing. The real CFA definition is set up above. */
9750 ia64_dwarf_handle_frame_unspec (const char * ARG_UNUSED (label),
9751 rtx ARG_UNUSED (pattern),
9754 gcc_assert (index == UNSPECV_ALLOC);
9757 /* The generic dwarf2 frame debug info generator does not define a
9758 separate region for the very end of the epilogue, so refrain from
9759 doing so in the IA64-specific code as well. */
9761 #define IA64_CHANGE_CFA_IN_EPILOGUE 0
9763 /* The function emits unwind directives for the start of an epilogue. */
9766 process_epilogue (FILE *asm_out_file, rtx insn, bool unwind, bool frame)
9768 /* If this isn't the last block of the function, then we need to label the
9769 current state, and copy it back in at the start of the next block. */
9774 fprintf (asm_out_file, "\t.label_state %d\n",
9775 ++cfun->machine->state_num);
9776 need_copy_state = true;
9780 fprintf (asm_out_file, "\t.restore sp\n");
9781 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
9782 dwarf2out_def_cfa (ia64_emit_deleted_label_after_insn (insn),
9783 STACK_POINTER_REGNUM, INCOMING_FRAME_SP_OFFSET);
9786 /* This function processes a SET pattern for REG_CFA_ADJUST_CFA. */
9789 process_cfa_adjust_cfa (FILE *asm_out_file, rtx pat, rtx insn,
9790 bool unwind, bool frame)
9792 rtx dest = SET_DEST (pat);
9793 rtx src = SET_SRC (pat);
9795 if (dest == stack_pointer_rtx)
9797 if (GET_CODE (src) == PLUS)
9799 rtx op0 = XEXP (src, 0);
9800 rtx op1 = XEXP (src, 1);
9802 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
9804 if (INTVAL (op1) < 0)
9806 gcc_assert (!frame_pointer_needed);
9808 fprintf (asm_out_file,
9809 "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
9811 ia64_dwarf2out_def_steady_cfa (insn, frame);
9814 process_epilogue (asm_out_file, insn, unwind, frame);
9818 gcc_assert (src == hard_frame_pointer_rtx);
9819 process_epilogue (asm_out_file, insn, unwind, frame);
9822 else if (dest == hard_frame_pointer_rtx)
9824 gcc_assert (src == stack_pointer_rtx);
9825 gcc_assert (frame_pointer_needed);
9828 fprintf (asm_out_file, "\t.vframe r%d\n",
9829 ia64_dbx_register_number (REGNO (dest)));
9830 ia64_dwarf2out_def_steady_cfa (insn, frame);
9836 /* This function processes a SET pattern for REG_CFA_REGISTER. */
9839 process_cfa_register (FILE *asm_out_file, rtx pat, bool unwind)
9841 rtx dest = SET_DEST (pat);
9842 rtx src = SET_SRC (pat);
9844 int dest_regno = REGNO (dest);
9845 int src_regno = REGNO (src);
9850 /* Saving return address pointer. */
9851 gcc_assert (dest_regno == current_frame_info.r[reg_save_b0]);
9853 fprintf (asm_out_file, "\t.save rp, r%d\n",
9854 ia64_dbx_register_number (dest_regno));
9858 gcc_assert (dest_regno == current_frame_info.r[reg_save_pr]);
9860 fprintf (asm_out_file, "\t.save pr, r%d\n",
9861 ia64_dbx_register_number (dest_regno));
9864 case AR_UNAT_REGNUM:
9865 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_unat]);
9867 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
9868 ia64_dbx_register_number (dest_regno));
9872 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_lc]);
9874 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
9875 ia64_dbx_register_number (dest_regno));
9879 /* Everything else should indicate being stored to memory. */
9884 /* This function processes a SET pattern for REG_CFA_OFFSET. */
9887 process_cfa_offset (FILE *asm_out_file, rtx pat, bool unwind)
9889 rtx dest = SET_DEST (pat);
9890 rtx src = SET_SRC (pat);
9891 int src_regno = REGNO (src);
9896 gcc_assert (MEM_P (dest));
9897 if (GET_CODE (XEXP (dest, 0)) == REG)
9899 base = XEXP (dest, 0);
9904 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
9905 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
9906 base = XEXP (XEXP (dest, 0), 0);
9907 off = INTVAL (XEXP (XEXP (dest, 0), 1));
9910 if (base == hard_frame_pointer_rtx)
9912 saveop = ".savepsp";
9917 gcc_assert (base == stack_pointer_rtx);
9921 src_regno = REGNO (src);
9925 gcc_assert (!current_frame_info.r[reg_save_b0]);
9927 fprintf (asm_out_file, "\t%s rp, " HOST_WIDE_INT_PRINT_DEC "\n",
9932 gcc_assert (!current_frame_info.r[reg_save_pr]);
9934 fprintf (asm_out_file, "\t%s pr, " HOST_WIDE_INT_PRINT_DEC "\n",
9939 gcc_assert (!current_frame_info.r[reg_save_ar_lc]);
9941 fprintf (asm_out_file, "\t%s ar.lc, " HOST_WIDE_INT_PRINT_DEC "\n",
9946 gcc_assert (!current_frame_info.r[reg_save_ar_pfs]);
9948 fprintf (asm_out_file, "\t%s ar.pfs, " HOST_WIDE_INT_PRINT_DEC "\n",
9952 case AR_UNAT_REGNUM:
9953 gcc_assert (!current_frame_info.r[reg_save_ar_unat]);
9955 fprintf (asm_out_file, "\t%s ar.unat, " HOST_WIDE_INT_PRINT_DEC "\n",
9964 fprintf (asm_out_file, "\t.save.g 0x%x\n",
9965 1 << (src_regno - GR_REG (4)));
9974 fprintf (asm_out_file, "\t.save.b 0x%x\n",
9975 1 << (src_regno - BR_REG (1)));
9983 fprintf (asm_out_file, "\t.save.f 0x%x\n",
9984 1 << (src_regno - FR_REG (2)));
9987 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
9988 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
9989 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
9990 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
9992 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
9993 1 << (src_regno - FR_REG (12)));
9997 /* ??? For some reason we mark other general registers, even those
9998 we can't represent in the unwind info. Ignore them. */
10003 /* This function looks at a single insn and emits any directives
10004 required to unwind this insn. */
10007 ia64_asm_unwind_emit (FILE *asm_out_file, rtx insn)
10009 bool unwind = ia64_except_unwind_info (&global_options) == UI_TARGET;
10010 bool frame = dwarf2out_do_frame ();
10014 if (!unwind && !frame)
10017 if (NOTE_INSN_BASIC_BLOCK_P (insn))
10019 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
10021 /* Restore unwind state from immediately before the epilogue. */
10022 if (need_copy_state)
10026 fprintf (asm_out_file, "\t.body\n");
10027 fprintf (asm_out_file, "\t.copy_state %d\n",
10028 cfun->machine->state_num);
10030 if (IA64_CHANGE_CFA_IN_EPILOGUE)
10031 ia64_dwarf2out_def_steady_cfa (insn, frame);
10032 need_copy_state = false;
10036 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
10039 /* Look for the ALLOC insn. */
10040 if (INSN_CODE (insn) == CODE_FOR_alloc)
10042 rtx dest = SET_DEST (XVECEXP (PATTERN (insn), 0, 0));
10043 int dest_regno = REGNO (dest);
10045 /* If this is the final destination for ar.pfs, then this must
10046 be the alloc in the prologue. */
10047 if (dest_regno == current_frame_info.r[reg_save_ar_pfs])
10050 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
10051 ia64_dbx_register_number (dest_regno));
10055 /* This must be an alloc before a sibcall. We must drop the
10056 old frame info. The easiest way to drop the old frame
10057 info is to ensure we had a ".restore sp" directive
10058 followed by a new prologue. If the procedure doesn't
10059 have a memory-stack frame, we'll issue a dummy ".restore
10061 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
10062 /* if haven't done process_epilogue() yet, do it now */
10063 process_epilogue (asm_out_file, insn, unwind, frame);
10065 fprintf (asm_out_file, "\t.prologue\n");
10070 handled_one = false;
10071 for (note = REG_NOTES (insn); note; note = XEXP (note, 1))
10072 switch (REG_NOTE_KIND (note))
10074 case REG_CFA_ADJUST_CFA:
10075 pat = XEXP (note, 0);
10077 pat = PATTERN (insn);
10078 process_cfa_adjust_cfa (asm_out_file, pat, insn, unwind, frame);
10079 handled_one = true;
10082 case REG_CFA_OFFSET:
10083 pat = XEXP (note, 0);
10085 pat = PATTERN (insn);
10086 process_cfa_offset (asm_out_file, pat, unwind);
10087 handled_one = true;
10090 case REG_CFA_REGISTER:
10091 pat = XEXP (note, 0);
10093 pat = PATTERN (insn);
10094 process_cfa_register (asm_out_file, pat, unwind);
10095 handled_one = true;
10098 case REG_FRAME_RELATED_EXPR:
10099 case REG_CFA_DEF_CFA:
10100 case REG_CFA_EXPRESSION:
10101 case REG_CFA_RESTORE:
10102 case REG_CFA_SET_VDRAP:
10103 /* Not used in the ia64 port. */
10104 gcc_unreachable ();
10107 /* Not a frame-related note. */
10111 /* All REG_FRAME_RELATED_P insns, besides ALLOC, are marked with the
10112 explicit action to take. No guessing required. */
10113 gcc_assert (handled_one);
10116 /* Implement TARGET_ASM_EMIT_EXCEPT_PERSONALITY. */
10119 ia64_asm_emit_except_personality (rtx personality)
10121 fputs ("\t.personality\t", asm_out_file);
10122 output_addr_const (asm_out_file, personality);
10123 fputc ('\n', asm_out_file);
10126 /* Implement TARGET_ASM_INITIALIZE_SECTIONS. */
10129 ia64_asm_init_sections (void)
10131 exception_section = get_unnamed_section (0, output_section_asm_op,
10135 /* Implement TARGET_DEBUG_UNWIND_INFO. */
10137 static enum unwind_info_type
10138 ia64_debug_unwind_info (void)
10143 /* Implement TARGET_EXCEPT_UNWIND_INFO. */
10145 static enum unwind_info_type
10146 ia64_except_unwind_info (struct gcc_options *opts)
10148 /* Honor the --enable-sjlj-exceptions configure switch. */
10149 #ifdef CONFIG_UNWIND_EXCEPTIONS
10150 if (CONFIG_UNWIND_EXCEPTIONS)
10154 /* For simplicity elsewhere in this file, indicate that all unwind
10155 info is disabled if we're not emitting unwind tables. */
10156 if (!opts->x_flag_exceptions && !opts->x_flag_unwind_tables)
10165 IA64_BUILTIN_COPYSIGNQ,
10166 IA64_BUILTIN_FABSQ,
10167 IA64_BUILTIN_FLUSHRS,
10169 IA64_BUILTIN_HUGE_VALQ,
10173 static GTY(()) tree ia64_builtins[(int) IA64_BUILTIN_max];
10176 ia64_init_builtins (void)
10182 /* The __fpreg type. */
10183 fpreg_type = make_node (REAL_TYPE);
10184 TYPE_PRECISION (fpreg_type) = 82;
10185 layout_type (fpreg_type);
10186 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
10188 /* The __float80 type. */
10189 float80_type = make_node (REAL_TYPE);
10190 TYPE_PRECISION (float80_type) = 80;
10191 layout_type (float80_type);
10192 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
10194 /* The __float128 type. */
10198 tree float128_type = make_node (REAL_TYPE);
10200 TYPE_PRECISION (float128_type) = 128;
10201 layout_type (float128_type);
10202 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
10204 /* TFmode support builtins. */
10205 ftype = build_function_type (float128_type, void_list_node);
10206 decl = add_builtin_function ("__builtin_infq", ftype,
10207 IA64_BUILTIN_INFQ, BUILT_IN_MD,
10209 ia64_builtins[IA64_BUILTIN_INFQ] = decl;
10211 decl = add_builtin_function ("__builtin_huge_valq", ftype,
10212 IA64_BUILTIN_HUGE_VALQ, BUILT_IN_MD,
10214 ia64_builtins[IA64_BUILTIN_HUGE_VALQ] = decl;
10216 ftype = build_function_type_list (float128_type,
10219 decl = add_builtin_function ("__builtin_fabsq", ftype,
10220 IA64_BUILTIN_FABSQ, BUILT_IN_MD,
10221 "__fabstf2", NULL_TREE);
10222 TREE_READONLY (decl) = 1;
10223 ia64_builtins[IA64_BUILTIN_FABSQ] = decl;
10225 ftype = build_function_type_list (float128_type,
10229 decl = add_builtin_function ("__builtin_copysignq", ftype,
10230 IA64_BUILTIN_COPYSIGNQ, BUILT_IN_MD,
10231 "__copysigntf3", NULL_TREE);
10232 TREE_READONLY (decl) = 1;
10233 ia64_builtins[IA64_BUILTIN_COPYSIGNQ] = decl;
10236 /* Under HPUX, this is a synonym for "long double". */
10237 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
10240 /* Fwrite on VMS is non-standard. */
10241 if (TARGET_ABI_OPEN_VMS)
10243 implicit_built_in_decls[(int) BUILT_IN_FWRITE] = NULL_TREE;
10244 implicit_built_in_decls[(int) BUILT_IN_FWRITE_UNLOCKED] = NULL_TREE;
10247 #define def_builtin(name, type, code) \
10248 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \
10251 decl = def_builtin ("__builtin_ia64_bsp",
10252 build_function_type (ptr_type_node, void_list_node),
10254 ia64_builtins[IA64_BUILTIN_BSP] = decl;
10256 decl = def_builtin ("__builtin_ia64_flushrs",
10257 build_function_type (void_type_node, void_list_node),
10258 IA64_BUILTIN_FLUSHRS);
10259 ia64_builtins[IA64_BUILTIN_FLUSHRS] = decl;
10265 if (built_in_decls [BUILT_IN_FINITE])
10266 set_user_assembler_name (built_in_decls [BUILT_IN_FINITE],
10268 if (built_in_decls [BUILT_IN_FINITEF])
10269 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF],
10271 if (built_in_decls [BUILT_IN_FINITEL])
10272 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEL],
10278 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10279 enum machine_mode mode ATTRIBUTE_UNUSED,
10280 int ignore ATTRIBUTE_UNUSED)
10282 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10283 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
10287 case IA64_BUILTIN_BSP:
10288 if (! target || ! register_operand (target, DImode))
10289 target = gen_reg_rtx (DImode);
10290 emit_insn (gen_bsp_value (target));
10291 #ifdef POINTERS_EXTEND_UNSIGNED
10292 target = convert_memory_address (ptr_mode, target);
10296 case IA64_BUILTIN_FLUSHRS:
10297 emit_insn (gen_flushrs ());
10300 case IA64_BUILTIN_INFQ:
10301 case IA64_BUILTIN_HUGE_VALQ:
10303 enum machine_mode target_mode = TYPE_MODE (TREE_TYPE (exp));
10304 REAL_VALUE_TYPE inf;
10308 tmp = CONST_DOUBLE_FROM_REAL_VALUE (inf, target_mode);
10310 tmp = validize_mem (force_const_mem (target_mode, tmp));
10313 target = gen_reg_rtx (target_mode);
10315 emit_move_insn (target, tmp);
10319 case IA64_BUILTIN_FABSQ:
10320 case IA64_BUILTIN_COPYSIGNQ:
10321 return expand_call (exp, target, ignore);
10324 gcc_unreachable ();
10330 /* Return the ia64 builtin for CODE. */
10333 ia64_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
10335 if (code >= IA64_BUILTIN_max)
10336 return error_mark_node;
10338 return ia64_builtins[code];
10341 /* For the HP-UX IA64 aggregate parameters are passed stored in the
10342 most significant bits of the stack slot. */
10345 ia64_hpux_function_arg_padding (enum machine_mode mode, const_tree type)
10347 /* Exception to normal case for structures/unions/etc. */
10349 if (type && AGGREGATE_TYPE_P (type)
10350 && int_size_in_bytes (type) < UNITS_PER_WORD)
10353 /* Fall back to the default. */
10354 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
10357 /* Emit text to declare externally defined variables and functions, because
10358 the Intel assembler does not support undefined externals. */
10361 ia64_asm_output_external (FILE *file, tree decl, const char *name)
10363 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
10364 set in order to avoid putting out names that are never really
10366 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
10368 /* maybe_assemble_visibility will return 1 if the assembler
10369 visibility directive is output. */
10370 int need_visibility = ((*targetm.binds_local_p) (decl)
10371 && maybe_assemble_visibility (decl));
10373 #ifdef DO_CRTL_NAMES
10377 /* GNU as does not need anything here, but the HP linker does
10378 need something for external functions. */
10379 if ((TARGET_HPUX_LD || !TARGET_GNU_AS)
10380 && TREE_CODE (decl) == FUNCTION_DECL)
10381 (*targetm.asm_out.globalize_decl_name) (file, decl);
10382 else if (need_visibility && !TARGET_GNU_AS)
10383 (*targetm.asm_out.globalize_label) (file, name);
10387 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
10388 modes of word_mode and larger. Rename the TFmode libfuncs using the
10389 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
10390 backward compatibility. */
10393 ia64_init_libfuncs (void)
10395 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
10396 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
10397 set_optab_libfunc (smod_optab, SImode, "__modsi3");
10398 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
10400 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
10401 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
10402 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
10403 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
10404 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
10406 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
10407 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
10408 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
10409 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
10410 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
10411 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
10413 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
10414 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
10415 set_conv_libfunc (sfix_optab, TImode, TFmode, "_U_Qfcnvfxt_quad_to_quad");
10416 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
10417 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
10419 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
10420 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
10421 set_conv_libfunc (sfloat_optab, TFmode, TImode, "_U_Qfcnvxf_quad_to_quad");
10422 /* HP-UX 11.23 libc does not have a function for unsigned
10423 SImode-to-TFmode conversion. */
10424 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
10427 /* Rename all the TFmode libfuncs using the HPUX conventions. */
10430 ia64_hpux_init_libfuncs (void)
10432 ia64_init_libfuncs ();
10434 /* The HP SI millicode division and mod functions expect DI arguments.
10435 By turning them off completely we avoid using both libgcc and the
10436 non-standard millicode routines and use the HP DI millicode routines
10439 set_optab_libfunc (sdiv_optab, SImode, 0);
10440 set_optab_libfunc (udiv_optab, SImode, 0);
10441 set_optab_libfunc (smod_optab, SImode, 0);
10442 set_optab_libfunc (umod_optab, SImode, 0);
10444 set_optab_libfunc (sdiv_optab, DImode, "__milli_divI");
10445 set_optab_libfunc (udiv_optab, DImode, "__milli_divU");
10446 set_optab_libfunc (smod_optab, DImode, "__milli_remI");
10447 set_optab_libfunc (umod_optab, DImode, "__milli_remU");
10449 /* HP-UX libc has TF min/max/abs routines in it. */
10450 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
10451 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
10452 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
10454 /* ia64_expand_compare uses this. */
10455 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
10457 /* These should never be used. */
10458 set_optab_libfunc (eq_optab, TFmode, 0);
10459 set_optab_libfunc (ne_optab, TFmode, 0);
10460 set_optab_libfunc (gt_optab, TFmode, 0);
10461 set_optab_libfunc (ge_optab, TFmode, 0);
10462 set_optab_libfunc (lt_optab, TFmode, 0);
10463 set_optab_libfunc (le_optab, TFmode, 0);
10466 /* Rename the division and modulus functions in VMS. */
10469 ia64_vms_init_libfuncs (void)
10471 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
10472 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
10473 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
10474 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
10475 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
10476 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
10477 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
10478 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
10479 abort_libfunc = init_one_libfunc ("decc$abort");
10480 memcmp_libfunc = init_one_libfunc ("decc$memcmp");
10481 #ifdef MEM_LIBFUNCS_INIT
10486 /* Rename the TFmode libfuncs available from soft-fp in glibc using
10487 the HPUX conventions. */
10490 ia64_sysv4_init_libfuncs (void)
10492 ia64_init_libfuncs ();
10494 /* These functions are not part of the HPUX TFmode interface. We
10495 use them instead of _U_Qfcmp, which doesn't work the way we
10497 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
10498 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
10499 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
10500 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
10501 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
10502 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
10504 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
10505 glibc doesn't have them. */
10511 ia64_soft_fp_init_libfuncs (void)
10516 ia64_vms_valid_pointer_mode (enum machine_mode mode)
10518 return (mode == SImode || mode == DImode);
10521 /* For HPUX, it is illegal to have relocations in shared segments. */
10524 ia64_hpux_reloc_rw_mask (void)
10529 /* For others, relax this so that relocations to local data goes in
10530 read-only segments, but we still cannot allow global relocations
10531 in read-only segments. */
10534 ia64_reloc_rw_mask (void)
10536 return flag_pic ? 3 : 2;
10539 /* Return the section to use for X. The only special thing we do here
10540 is to honor small data. */
10543 ia64_select_rtx_section (enum machine_mode mode, rtx x,
10544 unsigned HOST_WIDE_INT align)
10546 if (GET_MODE_SIZE (mode) > 0
10547 && GET_MODE_SIZE (mode) <= ia64_section_threshold
10548 && !TARGET_NO_SDATA)
10549 return sdata_section;
10551 return default_elf_select_rtx_section (mode, x, align);
10554 static unsigned int
10555 ia64_section_type_flags (tree decl, const char *name, int reloc)
10557 unsigned int flags = 0;
10559 if (strcmp (name, ".sdata") == 0
10560 || strncmp (name, ".sdata.", 7) == 0
10561 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
10562 || strncmp (name, ".sdata2.", 8) == 0
10563 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
10564 || strcmp (name, ".sbss") == 0
10565 || strncmp (name, ".sbss.", 6) == 0
10566 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
10567 flags = SECTION_SMALL;
10569 #if TARGET_ABI_OPEN_VMS
10570 if (decl && DECL_ATTRIBUTES (decl)
10571 && lookup_attribute ("common_object", DECL_ATTRIBUTES (decl)))
10572 flags |= SECTION_VMS_OVERLAY;
10575 flags |= default_section_type_flags (decl, name, reloc);
10579 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
10580 structure type and that the address of that type should be passed
10581 in out0, rather than in r8. */
10584 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
10586 tree ret_type = TREE_TYPE (fntype);
10588 /* The Itanium C++ ABI requires that out0, rather than r8, be used
10589 as the structure return address parameter, if the return value
10590 type has a non-trivial copy constructor or destructor. It is not
10591 clear if this same convention should be used for other
10592 programming languages. Until G++ 3.4, we incorrectly used r8 for
10593 these return values. */
10594 return (abi_version_at_least (2)
10596 && TYPE_MODE (ret_type) == BLKmode
10597 && TREE_ADDRESSABLE (ret_type)
10598 && strcmp (lang_hooks.name, "GNU C++") == 0);
10601 /* Output the assembler code for a thunk function. THUNK_DECL is the
10602 declaration for the thunk function itself, FUNCTION is the decl for
10603 the target function. DELTA is an immediate constant offset to be
10604 added to THIS. If VCALL_OFFSET is nonzero, the word at
10605 *(*this + vcall_offset) should be added to THIS. */
10608 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
10609 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
10612 rtx this_rtx, insn, funexp;
10613 unsigned int this_parmno;
10614 unsigned int this_regno;
10617 reload_completed = 1;
10618 epilogue_completed = 1;
10620 /* Set things up as ia64_expand_prologue might. */
10621 last_scratch_gr_reg = 15;
10623 memset (¤t_frame_info, 0, sizeof (current_frame_info));
10624 current_frame_info.spill_cfa_off = -16;
10625 current_frame_info.n_input_regs = 1;
10626 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
10628 /* Mark the end of the (empty) prologue. */
10629 emit_note (NOTE_INSN_PROLOGUE_END);
10631 /* Figure out whether "this" will be the first parameter (the
10632 typical case) or the second parameter (as happens when the
10633 virtual function returns certain class objects). */
10635 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
10637 this_regno = IN_REG (this_parmno);
10638 if (!TARGET_REG_NAMES)
10639 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
10641 this_rtx = gen_rtx_REG (Pmode, this_regno);
10643 /* Apply the constant offset, if required. */
10644 delta_rtx = GEN_INT (delta);
10647 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
10648 REG_POINTER (tmp) = 1;
10649 if (delta && satisfies_constraint_I (delta_rtx))
10651 emit_insn (gen_ptr_extend_plus_imm (this_rtx, tmp, delta_rtx));
10655 emit_insn (gen_ptr_extend (this_rtx, tmp));
10659 if (!satisfies_constraint_I (delta_rtx))
10661 rtx tmp = gen_rtx_REG (Pmode, 2);
10662 emit_move_insn (tmp, delta_rtx);
10665 emit_insn (gen_adddi3 (this_rtx, this_rtx, delta_rtx));
10668 /* Apply the offset from the vtable, if required. */
10671 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
10672 rtx tmp = gen_rtx_REG (Pmode, 2);
10676 rtx t = gen_rtx_REG (ptr_mode, 2);
10677 REG_POINTER (t) = 1;
10678 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this_rtx));
10679 if (satisfies_constraint_I (vcall_offset_rtx))
10681 emit_insn (gen_ptr_extend_plus_imm (tmp, t, vcall_offset_rtx));
10685 emit_insn (gen_ptr_extend (tmp, t));
10688 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
10692 if (!satisfies_constraint_J (vcall_offset_rtx))
10694 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
10695 emit_move_insn (tmp2, vcall_offset_rtx);
10696 vcall_offset_rtx = tmp2;
10698 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
10702 emit_insn (gen_zero_extendsidi2 (tmp, gen_rtx_MEM (ptr_mode, tmp)));
10704 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
10706 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
10709 /* Generate a tail call to the target function. */
10710 if (! TREE_USED (function))
10712 assemble_external (function);
10713 TREE_USED (function) = 1;
10715 funexp = XEXP (DECL_RTL (function), 0);
10716 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
10717 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
10718 insn = get_last_insn ();
10719 SIBLING_CALL_P (insn) = 1;
10721 /* Code generation for calls relies on splitting. */
10722 reload_completed = 1;
10723 epilogue_completed = 1;
10724 try_split (PATTERN (insn), insn, 0);
10728 /* Run just enough of rest_of_compilation to get the insns emitted.
10729 There's not really enough bulk here to make other passes such as
10730 instruction scheduling worth while. Note that use_thunk calls
10731 assemble_start_function and assemble_end_function. */
10733 insn_locators_alloc ();
10734 emit_all_insn_group_barriers (NULL);
10735 insn = get_insns ();
10736 shorten_branches (insn);
10737 final_start_function (insn, file, 1);
10738 final (insn, file, 1);
10739 final_end_function ();
10741 reload_completed = 0;
10742 epilogue_completed = 0;
10745 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
10748 ia64_struct_value_rtx (tree fntype,
10749 int incoming ATTRIBUTE_UNUSED)
10751 if (TARGET_ABI_OPEN_VMS ||
10752 (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype)))
10754 return gen_rtx_REG (Pmode, GR_REG (8));
10758 ia64_scalar_mode_supported_p (enum machine_mode mode)
10784 ia64_vector_mode_supported_p (enum machine_mode mode)
10801 /* Implement the FUNCTION_PROFILER macro. */
10804 ia64_output_function_profiler (FILE *file, int labelno)
10806 bool indirect_call;
10808 /* If the function needs a static chain and the static chain
10809 register is r15, we use an indirect call so as to bypass
10810 the PLT stub in case the executable is dynamically linked,
10811 because the stub clobbers r15 as per 5.3.6 of the psABI.
10812 We don't need to do that in non canonical PIC mode. */
10814 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
10816 gcc_assert (STATIC_CHAIN_REGNUM == 15);
10817 indirect_call = true;
10820 indirect_call = false;
10823 fputs ("\t.prologue 4, r40\n", file);
10825 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
10826 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
10828 if (NO_PROFILE_COUNTERS)
10829 fputs ("\tmov out3 = r0\n", file);
10833 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10835 if (TARGET_AUTO_PIC)
10836 fputs ("\tmovl out3 = @gprel(", file);
10838 fputs ("\taddl out3 = @ltoff(", file);
10839 assemble_name (file, buf);
10840 if (TARGET_AUTO_PIC)
10841 fputs (")\n", file);
10843 fputs ("), r1\n", file);
10847 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
10848 fputs ("\t;;\n", file);
10850 fputs ("\t.save rp, r42\n", file);
10851 fputs ("\tmov out2 = b0\n", file);
10853 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
10854 fputs ("\t.body\n", file);
10855 fputs ("\tmov out1 = r1\n", file);
10858 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
10859 fputs ("\tmov b6 = r16\n", file);
10860 fputs ("\tld8 r1 = [r14]\n", file);
10861 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
10864 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
10867 static GTY(()) rtx mcount_func_rtx;
10869 gen_mcount_func_rtx (void)
10871 if (!mcount_func_rtx)
10872 mcount_func_rtx = init_one_libfunc ("_mcount");
10873 return mcount_func_rtx;
10877 ia64_profile_hook (int labelno)
10881 if (NO_PROFILE_COUNTERS)
10882 label = const0_rtx;
10886 const char *label_name;
10887 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
10888 label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
10889 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
10890 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
10892 ip = gen_reg_rtx (Pmode);
10893 emit_insn (gen_ip_value (ip));
10894 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
10896 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
10901 /* Return the mangling of TYPE if it is an extended fundamental type. */
10903 static const char *
10904 ia64_mangle_type (const_tree type)
10906 type = TYPE_MAIN_VARIANT (type);
10908 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
10909 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
10912 /* On HP-UX, "long double" is mangled as "e" so __float128 is
10914 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
10916 /* On HP-UX, "e" is not available as a mangling of __float80 so use
10917 an extended mangling. Elsewhere, "e" is available since long
10918 double is 80 bits. */
10919 if (TYPE_MODE (type) == XFmode)
10920 return TARGET_HPUX ? "u9__float80" : "e";
10921 if (TYPE_MODE (type) == RFmode)
10922 return "u7__fpreg";
10926 /* Return the diagnostic message string if conversion from FROMTYPE to
10927 TOTYPE is not allowed, NULL otherwise. */
10928 static const char *
10929 ia64_invalid_conversion (const_tree fromtype, const_tree totype)
10931 /* Reject nontrivial conversion to or from __fpreg. */
10932 if (TYPE_MODE (fromtype) == RFmode
10933 && TYPE_MODE (totype) != RFmode
10934 && TYPE_MODE (totype) != VOIDmode)
10935 return N_("invalid conversion from %<__fpreg%>");
10936 if (TYPE_MODE (totype) == RFmode
10937 && TYPE_MODE (fromtype) != RFmode)
10938 return N_("invalid conversion to %<__fpreg%>");
10942 /* Return the diagnostic message string if the unary operation OP is
10943 not permitted on TYPE, NULL otherwise. */
10944 static const char *
10945 ia64_invalid_unary_op (int op, const_tree type)
10947 /* Reject operations on __fpreg other than unary + or &. */
10948 if (TYPE_MODE (type) == RFmode
10949 && op != CONVERT_EXPR
10950 && op != ADDR_EXPR)
10951 return N_("invalid operation on %<__fpreg%>");
10955 /* Return the diagnostic message string if the binary operation OP is
10956 not permitted on TYPE1 and TYPE2, NULL otherwise. */
10957 static const char *
10958 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
10960 /* Reject operations on __fpreg. */
10961 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
10962 return N_("invalid operation on %<__fpreg%>");
10966 /* Implement TARGET_OPTION_DEFAULT_PARAMS. */
10968 ia64_option_default_params (void)
10970 /* Let the scheduler form additional regions. */
10971 set_default_param_value (PARAM_MAX_SCHED_EXTEND_REGIONS_ITERS, 2);
10973 /* Set the default values for cache-related parameters. */
10974 set_default_param_value (PARAM_SIMULTANEOUS_PREFETCHES, 6);
10975 set_default_param_value (PARAM_L1_CACHE_LINE_SIZE, 32);
10977 set_default_param_value (PARAM_SCHED_MEM_TRUE_DEP_COST, 4);
10980 /* HP-UX version_id attribute.
10981 For object foo, if the version_id is set to 1234 put out an alias
10982 of '.alias foo "foo{1234}" We can't use "foo{1234}" in anything
10983 other than an alias statement because it is an illegal symbol name. */
10986 ia64_handle_version_id_attribute (tree *node ATTRIBUTE_UNUSED,
10987 tree name ATTRIBUTE_UNUSED,
10989 int flags ATTRIBUTE_UNUSED,
10990 bool *no_add_attrs)
10992 tree arg = TREE_VALUE (args);
10994 if (TREE_CODE (arg) != STRING_CST)
10996 error("version attribute is not a string");
10997 *no_add_attrs = true;
11003 /* Target hook for c_mode_for_suffix. */
11005 static enum machine_mode
11006 ia64_c_mode_for_suffix (char suffix)
11016 static enum machine_mode
11017 ia64_promote_function_mode (const_tree type,
11018 enum machine_mode mode,
11020 const_tree funtype,
11023 /* Special processing required for OpenVMS ... */
11025 if (!TARGET_ABI_OPEN_VMS)
11026 return default_promote_function_mode(type, mode, punsignedp, funtype,
11029 /* HP OpenVMS Calling Standard dated June, 2004, that describes
11030 HP OpenVMS I64 Version 8.2EFT,
11031 chapter 4 "OpenVMS I64 Conventions"
11032 section 4.7 "Procedure Linkage"
11033 subsection 4.7.5.2, "Normal Register Parameters"
11035 "Unsigned integral (except unsigned 32-bit), set, and VAX floating-point
11036 values passed in registers are zero-filled; signed integral values as
11037 well as unsigned 32-bit integral values are sign-extended to 64 bits.
11038 For all other types passed in the general registers, unused bits are
11041 if (!AGGREGATE_TYPE_P (type)
11042 && GET_MODE_CLASS (mode) == MODE_INT
11043 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
11045 if (mode == SImode)
11050 return promote_mode (type, mode, punsignedp);
11053 static GTY(()) rtx ia64_dconst_0_5_rtx;
11056 ia64_dconst_0_5 (void)
11058 if (! ia64_dconst_0_5_rtx)
11060 REAL_VALUE_TYPE rv;
11061 real_from_string (&rv, "0.5");
11062 ia64_dconst_0_5_rtx = const_double_from_real_value (rv, DFmode);
11064 return ia64_dconst_0_5_rtx;
11067 static GTY(()) rtx ia64_dconst_0_375_rtx;
11070 ia64_dconst_0_375 (void)
11072 if (! ia64_dconst_0_375_rtx)
11074 REAL_VALUE_TYPE rv;
11075 real_from_string (&rv, "0.375");
11076 ia64_dconst_0_375_rtx = const_double_from_real_value (rv, DFmode);
11078 return ia64_dconst_0_375_rtx;
11081 static enum machine_mode
11082 ia64_get_reg_raw_mode (int regno)
11084 if (FR_REGNO_P (regno))
11086 return default_get_reg_raw_mode(regno);
11089 /* Always default to .text section until HP-UX linker is fixed. */
11091 ATTRIBUTE_UNUSED static section *
11092 ia64_hpux_function_section (tree decl ATTRIBUTE_UNUSED,
11093 enum node_frequency freq ATTRIBUTE_UNUSED,
11094 bool startup ATTRIBUTE_UNUSED,
11095 bool exit ATTRIBUTE_UNUSED)
11100 #include "gt-ia64.h"