1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to
21 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
22 Boston, MA 02110-1301, USA. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
44 #include "basic-block.h"
46 #include "sched-int.h"
49 #include "target-def.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
58 /* This is used for communication between ASM_OUTPUT_LABEL and
59 ASM_OUTPUT_LABELREF. */
60 int ia64_asm_output_label = 0;
62 /* Define the information needed to generate branch and scc insns. This is
63 stored from the compare operation. */
64 struct rtx_def * ia64_compare_op0;
65 struct rtx_def * ia64_compare_op1;
67 /* Register names for ia64_expand_prologue. */
68 static const char * const ia64_reg_numbers[96] =
69 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
70 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
71 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
72 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
73 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
74 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
75 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
76 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
77 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
78 "r104","r105","r106","r107","r108","r109","r110","r111",
79 "r112","r113","r114","r115","r116","r117","r118","r119",
80 "r120","r121","r122","r123","r124","r125","r126","r127"};
82 /* ??? These strings could be shared with REGISTER_NAMES. */
83 static const char * const ia64_input_reg_names[8] =
84 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
86 /* ??? These strings could be shared with REGISTER_NAMES. */
87 static const char * const ia64_local_reg_names[80] =
88 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
89 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
90 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
91 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
92 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
93 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
94 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
95 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
96 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
97 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
99 /* ??? These strings could be shared with REGISTER_NAMES. */
100 static const char * const ia64_output_reg_names[8] =
101 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
103 /* Which cpu are we scheduling for. */
104 enum processor_type ia64_tune = PROCESSOR_ITANIUM2;
106 /* Determines whether we run our final scheduling pass or not. We always
107 avoid the normal second scheduling pass. */
108 static int ia64_flag_schedule_insns2;
110 /* Determines whether we run variable tracking in machine dependent
112 static int ia64_flag_var_tracking;
114 /* Variables which are this size or smaller are put in the sdata/sbss
117 unsigned int ia64_section_threshold;
119 /* The following variable is used by the DFA insn scheduler. The value is
120 TRUE if we do insn bundling instead of insn scheduling. */
123 /* Structure to be filled in by ia64_compute_frame_size with register
124 save masks and offsets for the current function. */
126 struct ia64_frame_info
128 HOST_WIDE_INT total_size; /* size of the stack frame, not including
129 the caller's scratch area. */
130 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
131 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
132 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
133 HARD_REG_SET mask; /* mask of saved registers. */
134 unsigned int gr_used_mask; /* mask of registers in use as gr spill
135 registers or long-term scratches. */
136 int n_spilled; /* number of spilled registers. */
137 int reg_fp; /* register for fp. */
138 int reg_save_b0; /* save register for b0. */
139 int reg_save_pr; /* save register for prs. */
140 int reg_save_ar_pfs; /* save register for ar.pfs. */
141 int reg_save_ar_unat; /* save register for ar.unat. */
142 int reg_save_ar_lc; /* save register for ar.lc. */
143 int reg_save_gp; /* save register for gp. */
144 int n_input_regs; /* number of input registers used. */
145 int n_local_regs; /* number of local registers used. */
146 int n_output_regs; /* number of output registers used. */
147 int n_rotate_regs; /* number of rotating registers used. */
149 char need_regstk; /* true if a .regstk directive needed. */
150 char initialized; /* true if the data is finalized. */
153 /* Current frame information calculated by ia64_compute_frame_size. */
154 static struct ia64_frame_info current_frame_info;
156 static int ia64_first_cycle_multipass_dfa_lookahead (void);
157 static void ia64_dependencies_evaluation_hook (rtx, rtx);
158 static void ia64_init_dfa_pre_cycle_insn (void);
159 static rtx ia64_dfa_pre_cycle_insn (void);
160 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
161 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
162 static rtx gen_tls_get_addr (void);
163 static rtx gen_thread_pointer (void);
164 static int find_gr_spill (int);
165 static int next_scratch_gr_reg (void);
166 static void mark_reg_gr_used_mask (rtx, void *);
167 static void ia64_compute_frame_size (HOST_WIDE_INT);
168 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
169 static void finish_spill_pointers (void);
170 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
171 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
172 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
173 static rtx gen_movdi_x (rtx, rtx, rtx);
174 static rtx gen_fr_spill_x (rtx, rtx, rtx);
175 static rtx gen_fr_restore_x (rtx, rtx, rtx);
177 static enum machine_mode hfa_element_mode (tree, bool);
178 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
180 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
182 static bool ia64_function_ok_for_sibcall (tree, tree);
183 static bool ia64_return_in_memory (tree, tree);
184 static bool ia64_rtx_costs (rtx, int, int, int *);
185 static void fix_range (const char *);
186 static bool ia64_handle_option (size_t, const char *, int);
187 static struct machine_function * ia64_init_machine_status (void);
188 static void emit_insn_group_barriers (FILE *);
189 static void emit_all_insn_group_barriers (FILE *);
190 static void final_emit_insn_group_barriers (FILE *);
191 static void emit_predicate_relation_info (void);
192 static void ia64_reorg (void);
193 static bool ia64_in_small_data_p (tree);
194 static void process_epilogue (FILE *, rtx, bool, bool);
195 static int process_set (FILE *, rtx, rtx, bool, bool);
197 static bool ia64_assemble_integer (rtx, unsigned int, int);
198 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
199 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
200 static void ia64_output_function_end_prologue (FILE *);
202 static int ia64_issue_rate (void);
203 static int ia64_adjust_cost (rtx, rtx, rtx, int);
204 static void ia64_sched_init (FILE *, int, int);
205 static void ia64_sched_finish (FILE *, int);
206 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
207 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
208 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
209 static int ia64_variable_issue (FILE *, int, rtx, int);
211 static struct bundle_state *get_free_bundle_state (void);
212 static void free_bundle_state (struct bundle_state *);
213 static void initiate_bundle_states (void);
214 static void finish_bundle_states (void);
215 static unsigned bundle_state_hash (const void *);
216 static int bundle_state_eq_p (const void *, const void *);
217 static int insert_bundle_state (struct bundle_state *);
218 static void initiate_bundle_state_table (void);
219 static void finish_bundle_state_table (void);
220 static int try_issue_nops (struct bundle_state *, int);
221 static int try_issue_insn (struct bundle_state *, rtx);
222 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
223 static int get_max_pos (state_t);
224 static int get_template (state_t, int);
226 static rtx get_next_important_insn (rtx, rtx);
227 static void bundling (FILE *, int, rtx, rtx);
229 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
230 HOST_WIDE_INT, tree);
231 static void ia64_file_start (void);
233 static section *ia64_select_rtx_section (enum machine_mode, rtx,
234 unsigned HOST_WIDE_INT);
235 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
237 static section *ia64_rwreloc_select_section (tree, int, unsigned HOST_WIDE_INT)
239 static void ia64_rwreloc_unique_section (tree, int)
241 static section *ia64_rwreloc_select_rtx_section (enum machine_mode, rtx,
242 unsigned HOST_WIDE_INT)
244 static unsigned int ia64_section_type_flags (tree, const char *, int);
245 static void ia64_hpux_add_extern_decl (tree decl)
247 static void ia64_hpux_file_end (void)
249 static void ia64_init_libfuncs (void)
251 static void ia64_hpux_init_libfuncs (void)
253 static void ia64_sysv4_init_libfuncs (void)
255 static void ia64_vms_init_libfuncs (void)
258 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
259 static void ia64_encode_section_info (tree, rtx, int);
260 static rtx ia64_struct_value_rtx (tree, int);
261 static tree ia64_gimplify_va_arg (tree, tree, tree *, tree *);
262 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
263 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
264 static bool ia64_cannot_force_const_mem (rtx);
265 static const char *ia64_mangle_fundamental_type (tree);
266 static const char *ia64_invalid_conversion (tree, tree);
267 static const char *ia64_invalid_unary_op (int, tree);
268 static const char *ia64_invalid_binary_op (int, tree, tree);
270 /* Table of valid machine attributes. */
271 static const struct attribute_spec ia64_attribute_table[] =
273 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
274 { "syscall_linkage", 0, 0, false, true, true, NULL },
275 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
276 { NULL, 0, 0, false, false, false, NULL }
279 /* Initialize the GCC target structure. */
280 #undef TARGET_ATTRIBUTE_TABLE
281 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
283 #undef TARGET_INIT_BUILTINS
284 #define TARGET_INIT_BUILTINS ia64_init_builtins
286 #undef TARGET_EXPAND_BUILTIN
287 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
289 #undef TARGET_ASM_BYTE_OP
290 #define TARGET_ASM_BYTE_OP "\tdata1\t"
291 #undef TARGET_ASM_ALIGNED_HI_OP
292 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
293 #undef TARGET_ASM_ALIGNED_SI_OP
294 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
295 #undef TARGET_ASM_ALIGNED_DI_OP
296 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
297 #undef TARGET_ASM_UNALIGNED_HI_OP
298 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
299 #undef TARGET_ASM_UNALIGNED_SI_OP
300 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
301 #undef TARGET_ASM_UNALIGNED_DI_OP
302 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
303 #undef TARGET_ASM_INTEGER
304 #define TARGET_ASM_INTEGER ia64_assemble_integer
306 #undef TARGET_ASM_FUNCTION_PROLOGUE
307 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
308 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
309 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
310 #undef TARGET_ASM_FUNCTION_EPILOGUE
311 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
313 #undef TARGET_IN_SMALL_DATA_P
314 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
316 #undef TARGET_SCHED_ADJUST_COST
317 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
318 #undef TARGET_SCHED_ISSUE_RATE
319 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
320 #undef TARGET_SCHED_VARIABLE_ISSUE
321 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
322 #undef TARGET_SCHED_INIT
323 #define TARGET_SCHED_INIT ia64_sched_init
324 #undef TARGET_SCHED_FINISH
325 #define TARGET_SCHED_FINISH ia64_sched_finish
326 #undef TARGET_SCHED_REORDER
327 #define TARGET_SCHED_REORDER ia64_sched_reorder
328 #undef TARGET_SCHED_REORDER2
329 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
331 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
332 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
334 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
335 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
337 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
338 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
339 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
340 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
342 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
343 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
344 ia64_first_cycle_multipass_dfa_lookahead_guard
346 #undef TARGET_SCHED_DFA_NEW_CYCLE
347 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
349 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
350 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
351 #undef TARGET_ARG_PARTIAL_BYTES
352 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
354 #undef TARGET_ASM_OUTPUT_MI_THUNK
355 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
356 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
357 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
359 #undef TARGET_ASM_FILE_START
360 #define TARGET_ASM_FILE_START ia64_file_start
362 #undef TARGET_RTX_COSTS
363 #define TARGET_RTX_COSTS ia64_rtx_costs
364 #undef TARGET_ADDRESS_COST
365 #define TARGET_ADDRESS_COST hook_int_rtx_0
367 #undef TARGET_MACHINE_DEPENDENT_REORG
368 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
370 #undef TARGET_ENCODE_SECTION_INFO
371 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
373 #undef TARGET_SECTION_TYPE_FLAGS
374 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
377 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
378 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
381 /* ??? ABI doesn't allow us to define this. */
383 #undef TARGET_PROMOTE_FUNCTION_ARGS
384 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
387 /* ??? ABI doesn't allow us to define this. */
389 #undef TARGET_PROMOTE_FUNCTION_RETURN
390 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
393 /* ??? Investigate. */
395 #undef TARGET_PROMOTE_PROTOTYPES
396 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
399 #undef TARGET_STRUCT_VALUE_RTX
400 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
401 #undef TARGET_RETURN_IN_MEMORY
402 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
403 #undef TARGET_SETUP_INCOMING_VARARGS
404 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
405 #undef TARGET_STRICT_ARGUMENT_NAMING
406 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
407 #undef TARGET_MUST_PASS_IN_STACK
408 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
410 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
411 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
413 #undef TARGET_UNWIND_EMIT
414 #define TARGET_UNWIND_EMIT process_for_unwind_directive
416 #undef TARGET_SCALAR_MODE_SUPPORTED_P
417 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
418 #undef TARGET_VECTOR_MODE_SUPPORTED_P
419 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
421 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
422 in an order different from the specified program order. */
423 #undef TARGET_RELAXED_ORDERING
424 #define TARGET_RELAXED_ORDERING true
426 #undef TARGET_DEFAULT_TARGET_FLAGS
427 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
428 #undef TARGET_HANDLE_OPTION
429 #define TARGET_HANDLE_OPTION ia64_handle_option
431 #undef TARGET_CANNOT_FORCE_CONST_MEM
432 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
434 #undef TARGET_MANGLE_FUNDAMENTAL_TYPE
435 #define TARGET_MANGLE_FUNDAMENTAL_TYPE ia64_mangle_fundamental_type
437 #undef TARGET_INVALID_CONVERSION
438 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
439 #undef TARGET_INVALID_UNARY_OP
440 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
441 #undef TARGET_INVALID_BINARY_OP
442 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
444 struct gcc_target targetm = TARGET_INITIALIZER;
448 ADDR_AREA_NORMAL, /* normal address area */
449 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
453 static GTY(()) tree small_ident1;
454 static GTY(()) tree small_ident2;
459 if (small_ident1 == 0)
461 small_ident1 = get_identifier ("small");
462 small_ident2 = get_identifier ("__small__");
466 /* Retrieve the address area that has been chosen for the given decl. */
468 static ia64_addr_area
469 ia64_get_addr_area (tree decl)
473 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
479 id = TREE_VALUE (TREE_VALUE (model_attr));
480 if (id == small_ident1 || id == small_ident2)
481 return ADDR_AREA_SMALL;
483 return ADDR_AREA_NORMAL;
487 ia64_handle_model_attribute (tree *node, tree name, tree args,
488 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
490 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
492 tree arg, decl = *node;
495 arg = TREE_VALUE (args);
496 if (arg == small_ident1 || arg == small_ident2)
498 addr_area = ADDR_AREA_SMALL;
502 warning (OPT_Wattributes, "invalid argument of %qs attribute",
503 IDENTIFIER_POINTER (name));
504 *no_add_attrs = true;
507 switch (TREE_CODE (decl))
510 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
512 && !TREE_STATIC (decl))
514 error ("%Jan address area attribute cannot be specified for "
515 "local variables", decl);
516 *no_add_attrs = true;
518 area = ia64_get_addr_area (decl);
519 if (area != ADDR_AREA_NORMAL && addr_area != area)
521 error ("address area of %q+D conflicts with previous "
522 "declaration", decl);
523 *no_add_attrs = true;
528 error ("%Jaddress area attribute cannot be specified for functions",
530 *no_add_attrs = true;
534 warning (OPT_Wattributes, "%qs attribute ignored",
535 IDENTIFIER_POINTER (name));
536 *no_add_attrs = true;
544 ia64_encode_addr_area (tree decl, rtx symbol)
548 flags = SYMBOL_REF_FLAGS (symbol);
549 switch (ia64_get_addr_area (decl))
551 case ADDR_AREA_NORMAL: break;
552 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
553 default: gcc_unreachable ();
555 SYMBOL_REF_FLAGS (symbol) = flags;
559 ia64_encode_section_info (tree decl, rtx rtl, int first)
561 default_encode_section_info (decl, rtl, first);
563 /* Careful not to prod global register variables. */
564 if (TREE_CODE (decl) == VAR_DECL
565 && GET_CODE (DECL_RTL (decl)) == MEM
566 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
567 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
568 ia64_encode_addr_area (decl, XEXP (rtl, 0));
571 /* Implement CONST_OK_FOR_LETTER_P. */
574 ia64_const_ok_for_letter_p (HOST_WIDE_INT value, char c)
579 return CONST_OK_FOR_I (value);
581 return CONST_OK_FOR_J (value);
583 return CONST_OK_FOR_K (value);
585 return CONST_OK_FOR_L (value);
587 return CONST_OK_FOR_M (value);
589 return CONST_OK_FOR_N (value);
591 return CONST_OK_FOR_O (value);
593 return CONST_OK_FOR_P (value);
599 /* Implement CONST_DOUBLE_OK_FOR_LETTER_P. */
602 ia64_const_double_ok_for_letter_p (rtx value, char c)
607 return CONST_DOUBLE_OK_FOR_G (value);
613 /* Implement EXTRA_CONSTRAINT. */
616 ia64_extra_constraint (rtx value, char c)
621 /* Non-volatile memory for FP_REG loads/stores. */
622 return memory_operand(value, VOIDmode) && !MEM_VOLATILE_P (value);
625 /* 1..4 for shladd arguments. */
626 return (GET_CODE (value) == CONST_INT
627 && INTVAL (value) >= 1 && INTVAL (value) <= 4);
630 /* Non-post-inc memory for asms and other unsavory creatures. */
631 return (GET_CODE (value) == MEM
632 && GET_RTX_CLASS (GET_CODE (XEXP (value, 0))) != RTX_AUTOINC
633 && (reload_in_progress || memory_operand (value, VOIDmode)));
636 /* Symbol ref to small-address-area. */
637 return small_addr_symbolic_operand (value, VOIDmode);
641 return value == CONST0_RTX (GET_MODE (value));
644 /* An integer vector, such that conversion to an integer yields a
645 value appropriate for an integer 'J' constraint. */
646 if (GET_CODE (value) == CONST_VECTOR
647 && GET_MODE_CLASS (GET_MODE (value)) == MODE_VECTOR_INT)
649 value = simplify_subreg (DImode, value, GET_MODE (value), 0);
650 return ia64_const_ok_for_letter_p (INTVAL (value), 'J');
655 /* A V2SF vector containing elements that satisfy 'G'. */
657 (GET_CODE (value) == CONST_VECTOR
658 && GET_MODE (value) == V2SFmode
659 && ia64_const_double_ok_for_letter_p (XVECEXP (value, 0, 0), 'G')
660 && ia64_const_double_ok_for_letter_p (XVECEXP (value, 0, 1), 'G'));
667 /* Return 1 if the operands of a move are ok. */
670 ia64_move_ok (rtx dst, rtx src)
672 /* If we're under init_recog_no_volatile, we'll not be able to use
673 memory_operand. So check the code directly and don't worry about
674 the validity of the underlying address, which should have been
675 checked elsewhere anyway. */
676 if (GET_CODE (dst) != MEM)
678 if (GET_CODE (src) == MEM)
680 if (register_operand (src, VOIDmode))
683 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
684 if (INTEGRAL_MODE_P (GET_MODE (dst)))
685 return src == const0_rtx;
687 return GET_CODE (src) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (src);
690 /* Return 1 if the operands are ok for a floating point load pair. */
693 ia64_load_pair_ok (rtx dst, rtx src)
695 if (GET_CODE (dst) != REG || !FP_REGNO_P (REGNO (dst)))
697 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
699 switch (GET_CODE (XEXP (src, 0)))
708 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
710 if (GET_CODE (adjust) != CONST_INT
711 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
722 addp4_optimize_ok (rtx op1, rtx op2)
724 return (basereg_operand (op1, GET_MODE(op1)) !=
725 basereg_operand (op2, GET_MODE(op2)));
728 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
729 Return the length of the field, or <= 0 on failure. */
732 ia64_depz_field_mask (rtx rop, rtx rshift)
734 unsigned HOST_WIDE_INT op = INTVAL (rop);
735 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
737 /* Get rid of the zero bits we're shifting in. */
740 /* We must now have a solid block of 1's at bit 0. */
741 return exact_log2 (op + 1);
744 /* Return the TLS model to use for ADDR. */
746 static enum tls_model
747 tls_symbolic_operand_type (rtx addr)
749 enum tls_model tls_kind = 0;
751 if (GET_CODE (addr) == CONST)
753 if (GET_CODE (XEXP (addr, 0)) == PLUS
754 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
755 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
757 else if (GET_CODE (addr) == SYMBOL_REF)
758 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
763 /* Return true if X is a constant that is valid for some immediate
764 field in an instruction. */
767 ia64_legitimate_constant_p (rtx x)
769 switch (GET_CODE (x))
776 if (GET_MODE (x) == VOIDmode)
778 return CONST_DOUBLE_OK_FOR_G (x);
782 return tls_symbolic_operand_type (x) == 0;
786 enum machine_mode mode = GET_MODE (x);
788 if (mode == V2SFmode)
789 return ia64_extra_constraint (x, 'Y');
791 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
792 && GET_MODE_SIZE (mode) <= 8);
800 /* Don't allow TLS addresses to get spilled to memory. */
803 ia64_cannot_force_const_mem (rtx x)
805 return tls_symbolic_operand_type (x) != 0;
808 /* Expand a symbolic constant load. */
811 ia64_expand_load_address (rtx dest, rtx src)
813 gcc_assert (GET_CODE (dest) == REG);
815 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
816 having to pointer-extend the value afterward. Other forms of address
817 computation below are also more natural to compute as 64-bit quantities.
818 If we've been given an SImode destination register, change it. */
819 if (GET_MODE (dest) != Pmode)
820 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest), 0);
824 if (small_addr_symbolic_operand (src, VOIDmode))
828 emit_insn (gen_load_gprel64 (dest, src));
829 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
830 emit_insn (gen_load_fptr (dest, src));
831 else if (sdata_symbolic_operand (src, VOIDmode))
832 emit_insn (gen_load_gprel (dest, src));
835 HOST_WIDE_INT addend = 0;
838 /* We did split constant offsets in ia64_expand_move, and we did try
839 to keep them split in move_operand, but we also allowed reload to
840 rematerialize arbitrary constants rather than spill the value to
841 the stack and reload it. So we have to be prepared here to split
843 if (GET_CODE (src) == CONST)
845 HOST_WIDE_INT hi, lo;
847 hi = INTVAL (XEXP (XEXP (src, 0), 1));
848 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
854 src = plus_constant (XEXP (XEXP (src, 0), 0), hi);
858 tmp = gen_rtx_HIGH (Pmode, src);
859 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
860 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
862 tmp = gen_rtx_LO_SUM (Pmode, dest, src);
863 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
867 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
868 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
875 static GTY(()) rtx gen_tls_tga;
877 gen_tls_get_addr (void)
880 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
884 static GTY(()) rtx thread_pointer_rtx;
886 gen_thread_pointer (void)
888 if (!thread_pointer_rtx)
889 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
890 return thread_pointer_rtx;
894 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
895 rtx orig_op1, HOST_WIDE_INT addend)
897 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
899 HOST_WIDE_INT addend_lo, addend_hi;
903 case TLS_MODEL_GLOBAL_DYNAMIC:
906 tga_op1 = gen_reg_rtx (Pmode);
907 emit_insn (gen_load_dtpmod (tga_op1, op1));
909 tga_op2 = gen_reg_rtx (Pmode);
910 emit_insn (gen_load_dtprel (tga_op2, op1));
912 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
913 LCT_CONST, Pmode, 2, tga_op1,
914 Pmode, tga_op2, Pmode);
916 insns = get_insns ();
919 if (GET_MODE (op0) != Pmode)
921 emit_libcall_block (insns, op0, tga_ret, op1);
924 case TLS_MODEL_LOCAL_DYNAMIC:
925 /* ??? This isn't the completely proper way to do local-dynamic
926 If the call to __tls_get_addr is used only by a single symbol,
927 then we should (somehow) move the dtprel to the second arg
928 to avoid the extra add. */
931 tga_op1 = gen_reg_rtx (Pmode);
932 emit_insn (gen_load_dtpmod (tga_op1, op1));
934 tga_op2 = const0_rtx;
936 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
937 LCT_CONST, Pmode, 2, tga_op1,
938 Pmode, tga_op2, Pmode);
940 insns = get_insns ();
943 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
945 tmp = gen_reg_rtx (Pmode);
946 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
948 if (!register_operand (op0, Pmode))
949 op0 = gen_reg_rtx (Pmode);
952 emit_insn (gen_load_dtprel (op0, op1));
953 emit_insn (gen_adddi3 (op0, tmp, op0));
956 emit_insn (gen_add_dtprel (op0, op1, tmp));
959 case TLS_MODEL_INITIAL_EXEC:
960 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
961 addend_hi = addend - addend_lo;
963 op1 = plus_constant (op1, addend_hi);
966 tmp = gen_reg_rtx (Pmode);
967 emit_insn (gen_load_tprel (tmp, op1));
969 if (!register_operand (op0, Pmode))
970 op0 = gen_reg_rtx (Pmode);
971 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
974 case TLS_MODEL_LOCAL_EXEC:
975 if (!register_operand (op0, Pmode))
976 op0 = gen_reg_rtx (Pmode);
982 emit_insn (gen_load_tprel (op0, op1));
983 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
986 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
994 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
995 orig_op0, 1, OPTAB_DIRECT);
998 if (GET_MODE (orig_op0) == Pmode)
1000 return gen_lowpart (GET_MODE (orig_op0), op0);
1004 ia64_expand_move (rtx op0, rtx op1)
1006 enum machine_mode mode = GET_MODE (op0);
1008 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1009 op1 = force_reg (mode, op1);
1011 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1013 HOST_WIDE_INT addend = 0;
1014 enum tls_model tls_kind;
1017 if (GET_CODE (op1) == CONST
1018 && GET_CODE (XEXP (op1, 0)) == PLUS
1019 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
1021 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
1022 sym = XEXP (XEXP (op1, 0), 0);
1025 tls_kind = tls_symbolic_operand_type (sym);
1027 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
1029 if (any_offset_symbol_operand (sym, mode))
1031 else if (aligned_offset_symbol_operand (sym, mode))
1033 HOST_WIDE_INT addend_lo, addend_hi;
1035 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1036 addend_hi = addend - addend_lo;
1040 op1 = plus_constant (sym, addend_hi);
1049 if (reload_completed)
1051 /* We really should have taken care of this offset earlier. */
1052 gcc_assert (addend == 0);
1053 if (ia64_expand_load_address (op0, op1))
1059 rtx subtarget = no_new_pseudos ? op0 : gen_reg_rtx (mode);
1061 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1063 op1 = expand_simple_binop (mode, PLUS, subtarget,
1064 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1073 /* Split a move from OP1 to OP0 conditional on COND. */
1076 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1078 rtx insn, first = get_last_insn ();
1080 emit_move_insn (op0, op1);
1082 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1084 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1088 /* Split a post-reload TImode or TFmode reference into two DImode
1089 components. This is made extra difficult by the fact that we do
1090 not get any scratch registers to work with, because reload cannot
1091 be prevented from giving us a scratch that overlaps the register
1092 pair involved. So instead, when addressing memory, we tweak the
1093 pointer register up and back down with POST_INCs. Or up and not
1094 back down when we can get away with it.
1096 REVERSED is true when the loads must be done in reversed order
1097 (high word first) for correctness. DEAD is true when the pointer
1098 dies with the second insn we generate and therefore the second
1099 address must not carry a postmodify.
1101 May return an insn which is to be emitted after the moves. */
1104 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1108 switch (GET_CODE (in))
1111 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1112 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1117 /* Cannot occur reversed. */
1118 gcc_assert (!reversed);
1120 if (GET_MODE (in) != TFmode)
1121 split_double (in, &out[0], &out[1]);
1123 /* split_double does not understand how to split a TFmode
1124 quantity into a pair of DImode constants. */
1127 unsigned HOST_WIDE_INT p[2];
1128 long l[4]; /* TFmode is 128 bits */
1130 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1131 real_to_target (l, &r, TFmode);
1133 if (FLOAT_WORDS_BIG_ENDIAN)
1135 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1136 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1140 p[0] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1141 p[1] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1143 out[0] = GEN_INT (p[0]);
1144 out[1] = GEN_INT (p[1]);
1150 rtx base = XEXP (in, 0);
1153 switch (GET_CODE (base))
1158 out[0] = adjust_automodify_address
1159 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1160 out[1] = adjust_automodify_address
1161 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1165 /* Reversal requires a pre-increment, which can only
1166 be done as a separate insn. */
1167 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1168 out[0] = adjust_automodify_address
1169 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1170 out[1] = adjust_address (in, DImode, 0);
1175 gcc_assert (!reversed && !dead);
1177 /* Just do the increment in two steps. */
1178 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1179 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1183 gcc_assert (!reversed && !dead);
1185 /* Add 8, subtract 24. */
1186 base = XEXP (base, 0);
1187 out[0] = adjust_automodify_address
1188 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1189 out[1] = adjust_automodify_address
1191 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1196 gcc_assert (!reversed && !dead);
1198 /* Extract and adjust the modification. This case is
1199 trickier than the others, because we might have an
1200 index register, or we might have a combined offset that
1201 doesn't fit a signed 9-bit displacement field. We can
1202 assume the incoming expression is already legitimate. */
1203 offset = XEXP (base, 1);
1204 base = XEXP (base, 0);
1206 out[0] = adjust_automodify_address
1207 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1209 if (GET_CODE (XEXP (offset, 1)) == REG)
1211 /* Can't adjust the postmodify to match. Emit the
1212 original, then a separate addition insn. */
1213 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1214 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1218 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1219 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1221 /* Again the postmodify cannot be made to match,
1222 but in this case it's more efficient to get rid
1223 of the postmodify entirely and fix up with an
1225 out[1] = adjust_automodify_address (in, DImode, base, 8);
1227 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1231 /* Combined offset still fits in the displacement field.
1232 (We cannot overflow it at the high end.) */
1233 out[1] = adjust_automodify_address
1234 (in, DImode, gen_rtx_POST_MODIFY
1235 (Pmode, base, gen_rtx_PLUS
1237 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1256 /* Split a TImode or TFmode move instruction after reload.
1257 This is used by *movtf_internal and *movti_internal. */
1259 ia64_split_tmode_move (rtx operands[])
1261 rtx in[2], out[2], insn;
1264 bool reversed = false;
1266 /* It is possible for reload to decide to overwrite a pointer with
1267 the value it points to. In that case we have to do the loads in
1268 the appropriate order so that the pointer is not destroyed too
1269 early. Also we must not generate a postmodify for that second
1270 load, or rws_access_regno will die. */
1271 if (GET_CODE (operands[1]) == MEM
1272 && reg_overlap_mentioned_p (operands[0], operands[1]))
1274 rtx base = XEXP (operands[1], 0);
1275 while (GET_CODE (base) != REG)
1276 base = XEXP (base, 0);
1278 if (REGNO (base) == REGNO (operands[0]))
1282 /* Another reason to do the moves in reversed order is if the first
1283 element of the target register pair is also the second element of
1284 the source register pair. */
1285 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1286 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1289 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1290 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1292 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1293 if (GET_CODE (EXP) == MEM \
1294 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1295 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1296 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1297 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1298 XEXP (XEXP (EXP, 0), 0), \
1301 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1302 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1303 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1305 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1306 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1307 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1310 emit_insn (fixup[0]);
1312 emit_insn (fixup[1]);
1314 #undef MAYBE_ADD_REG_INC_NOTE
1317 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1318 through memory plus an extra GR scratch register. Except that you can
1319 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1320 SECONDARY_RELOAD_CLASS, but not both.
1322 We got into problems in the first place by allowing a construct like
1323 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1324 This solution attempts to prevent this situation from occurring. When
1325 we see something like the above, we spill the inner register to memory. */
1328 spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
1330 if (GET_CODE (in) == SUBREG
1331 && GET_MODE (SUBREG_REG (in)) == TImode
1332 && GET_CODE (SUBREG_REG (in)) == REG)
1334 rtx memt = assign_stack_temp (TImode, 16, 0);
1335 emit_move_insn (memt, SUBREG_REG (in));
1336 return adjust_address (memt, mode, 0);
1338 else if (force && GET_CODE (in) == REG)
1340 rtx memx = assign_stack_temp (mode, 16, 0);
1341 emit_move_insn (memx, in);
1348 /* Expand the movxf or movrf pattern (MODE says which) with the given
1349 OPERANDS, returning true if the pattern should then invoke
1353 ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
1355 rtx op0 = operands[0];
1357 if (GET_CODE (op0) == SUBREG)
1358 op0 = SUBREG_REG (op0);
1360 /* We must support XFmode loads into general registers for stdarg/vararg,
1361 unprototyped calls, and a rare case where a long double is passed as
1362 an argument after a float HFA fills the FP registers. We split them into
1363 DImode loads for convenience. We also need to support XFmode stores
1364 for the last case. This case does not happen for stdarg/vararg routines,
1365 because we do a block store to memory of unnamed arguments. */
1367 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1371 /* We're hoping to transform everything that deals with XFmode
1372 quantities and GR registers early in the compiler. */
1373 gcc_assert (!no_new_pseudos);
1375 /* Struct to register can just use TImode instead. */
1376 if ((GET_CODE (operands[1]) == SUBREG
1377 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1378 || (GET_CODE (operands[1]) == REG
1379 && GR_REGNO_P (REGNO (operands[1]))))
1381 rtx op1 = operands[1];
1383 if (GET_CODE (op1) == SUBREG)
1384 op1 = SUBREG_REG (op1);
1386 op1 = gen_rtx_REG (TImode, REGNO (op1));
1388 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1392 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1394 /* Don't word-swap when reading in the constant. */
1395 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1396 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1398 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1399 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1404 /* If the quantity is in a register not known to be GR, spill it. */
1405 if (register_operand (operands[1], mode))
1406 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1408 gcc_assert (GET_CODE (operands[1]) == MEM);
1410 /* Don't word-swap when reading in the value. */
1411 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1412 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1414 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1415 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1419 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1421 /* We're hoping to transform everything that deals with XFmode
1422 quantities and GR registers early in the compiler. */
1423 gcc_assert (!no_new_pseudos);
1425 /* Op0 can't be a GR_REG here, as that case is handled above.
1426 If op0 is a register, then we spill op1, so that we now have a
1427 MEM operand. This requires creating an XFmode subreg of a TImode reg
1428 to force the spill. */
1429 if (register_operand (operands[0], mode))
1431 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1432 op1 = gen_rtx_SUBREG (mode, op1, 0);
1433 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1440 gcc_assert (GET_CODE (operands[0]) == MEM);
1442 /* Don't word-swap when writing out the value. */
1443 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1444 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1446 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1447 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1452 if (!reload_in_progress && !reload_completed)
1454 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1456 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1458 rtx memt, memx, in = operands[1];
1459 if (CONSTANT_P (in))
1460 in = validize_mem (force_const_mem (mode, in));
1461 if (GET_CODE (in) == MEM)
1462 memt = adjust_address (in, TImode, 0);
1465 memt = assign_stack_temp (TImode, 16, 0);
1466 memx = adjust_address (memt, mode, 0);
1467 emit_move_insn (memx, in);
1469 emit_move_insn (op0, memt);
1473 if (!ia64_move_ok (operands[0], operands[1]))
1474 operands[1] = force_reg (mode, operands[1]);
1480 /* Emit comparison instruction if necessary, returning the expression
1481 that holds the compare result in the proper mode. */
1483 static GTY(()) rtx cmptf_libfunc;
1486 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1488 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1491 /* If we have a BImode input, then we already have a compare result, and
1492 do not need to emit another comparison. */
1493 if (GET_MODE (op0) == BImode)
1495 gcc_assert ((code == NE || code == EQ) && op1 == const0_rtx);
1498 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1499 magic number as its third argument, that indicates what to do.
1500 The return value is an integer to be compared against zero. */
1501 else if (GET_MODE (op0) == TFmode)
1504 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1510 enum rtx_code ncode;
1513 gcc_assert (cmptf_libfunc && GET_MODE (op1) == TFmode);
1516 /* 1 = equal, 0 = not equal. Equality operators do
1517 not raise FP_INVALID when given an SNaN operand. */
1518 case EQ: magic = QCMP_EQ; ncode = NE; break;
1519 case NE: magic = QCMP_EQ; ncode = EQ; break;
1520 /* isunordered() from C99. */
1521 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1522 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1523 /* Relational operators raise FP_INVALID when given
1525 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1526 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1527 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1528 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1529 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1530 Expanders for buneq etc. weuld have to be added to ia64.md
1531 for this to be useful. */
1532 default: gcc_unreachable ();
1537 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1538 op0, TFmode, op1, TFmode,
1539 GEN_INT (magic), DImode);
1540 cmp = gen_reg_rtx (BImode);
1541 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1542 gen_rtx_fmt_ee (ncode, BImode,
1545 insns = get_insns ();
1548 emit_libcall_block (insns, cmp, cmp,
1549 gen_rtx_fmt_ee (code, BImode, op0, op1));
1554 cmp = gen_reg_rtx (BImode);
1555 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1556 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1560 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1563 /* Generate an integral vector comparison. Return true if the condition has
1564 been reversed, and so the sense of the comparison should be inverted. */
1567 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1568 rtx dest, rtx op0, rtx op1)
1570 bool negate = false;
1573 /* Canonicalize the comparison to EQ, GT, GTU. */
1584 code = reverse_condition (code);
1590 code = reverse_condition (code);
1596 code = swap_condition (code);
1597 x = op0, op0 = op1, op1 = x;
1604 /* Unsigned parallel compare is not supported by the hardware. Play some
1605 tricks to turn this into a signed comparison against 0. */
1614 /* Perform a parallel modulo subtraction. */
1615 t1 = gen_reg_rtx (V2SImode);
1616 emit_insn (gen_subv2si3 (t1, op0, op1));
1618 /* Extract the original sign bit of op0. */
1619 mask = GEN_INT (-0x80000000);
1620 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1621 mask = force_reg (V2SImode, mask);
1622 t2 = gen_reg_rtx (V2SImode);
1623 emit_insn (gen_andv2si3 (t2, op0, mask));
1625 /* XOR it back into the result of the subtraction. This results
1626 in the sign bit set iff we saw unsigned underflow. */
1627 x = gen_reg_rtx (V2SImode);
1628 emit_insn (gen_xorv2si3 (x, t1, t2));
1632 op1 = CONST0_RTX (mode);
1638 /* Perform a parallel unsigned saturating subtraction. */
1639 x = gen_reg_rtx (mode);
1640 emit_insn (gen_rtx_SET (VOIDmode, x,
1641 gen_rtx_US_MINUS (mode, op0, op1)));
1645 op1 = CONST0_RTX (mode);
1654 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1655 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1660 /* Emit an integral vector conditional move. */
1663 ia64_expand_vecint_cmov (rtx operands[])
1665 enum machine_mode mode = GET_MODE (operands[0]);
1666 enum rtx_code code = GET_CODE (operands[3]);
1670 cmp = gen_reg_rtx (mode);
1671 negate = ia64_expand_vecint_compare (code, mode, cmp,
1672 operands[4], operands[5]);
1674 ot = operands[1+negate];
1675 of = operands[2-negate];
1677 if (ot == CONST0_RTX (mode))
1679 if (of == CONST0_RTX (mode))
1681 emit_move_insn (operands[0], ot);
1685 x = gen_rtx_NOT (mode, cmp);
1686 x = gen_rtx_AND (mode, x, of);
1687 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1689 else if (of == CONST0_RTX (mode))
1691 x = gen_rtx_AND (mode, cmp, ot);
1692 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1698 t = gen_reg_rtx (mode);
1699 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1700 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1702 f = gen_reg_rtx (mode);
1703 x = gen_rtx_NOT (mode, cmp);
1704 x = gen_rtx_AND (mode, x, operands[2-negate]);
1705 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1707 x = gen_rtx_IOR (mode, t, f);
1708 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1712 /* Emit an integral vector min or max operation. Return true if all done. */
1715 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1720 /* These four combinations are supported directly. */
1721 if (mode == V8QImode && (code == UMIN || code == UMAX))
1723 if (mode == V4HImode && (code == SMIN || code == SMAX))
1726 /* This combination can be implemented with only saturating subtraction. */
1727 if (mode == V4HImode && code == UMAX)
1729 rtx x, tmp = gen_reg_rtx (mode);
1731 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
1732 emit_insn (gen_rtx_SET (VOIDmode, tmp, x));
1734 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
1738 /* Everything else implemented via vector comparisons. */
1739 xops[0] = operands[0];
1740 xops[4] = xops[1] = operands[1];
1741 xops[5] = xops[2] = operands[2];
1760 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
1762 ia64_expand_vecint_cmov (xops);
1766 /* Emit an integral vector widening sum operations. */
1769 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
1772 enum machine_mode wmode, mode;
1773 rtx (*unpack_l) (rtx, rtx, rtx);
1774 rtx (*unpack_h) (rtx, rtx, rtx);
1775 rtx (*plus) (rtx, rtx, rtx);
1777 wmode = GET_MODE (operands[0]);
1778 mode = GET_MODE (operands[1]);
1783 unpack_l = gen_unpack1_l;
1784 unpack_h = gen_unpack1_h;
1785 plus = gen_addv4hi3;
1788 unpack_l = gen_unpack2_l;
1789 unpack_h = gen_unpack2_h;
1790 plus = gen_addv2si3;
1796 /* Fill in x with the sign extension of each element in op1. */
1798 x = CONST0_RTX (mode);
1803 x = gen_reg_rtx (mode);
1805 neg = ia64_expand_vecint_compare (LT, mode, x, operands[1],
1810 l = gen_reg_rtx (wmode);
1811 h = gen_reg_rtx (wmode);
1812 s = gen_reg_rtx (wmode);
1814 emit_insn (unpack_l (gen_lowpart (mode, l), operands[1], x));
1815 emit_insn (unpack_h (gen_lowpart (mode, h), operands[1], x));
1816 emit_insn (plus (s, l, operands[2]));
1817 emit_insn (plus (operands[0], h, s));
1820 /* Emit a signed or unsigned V8QI dot product operation. */
1823 ia64_expand_dot_prod_v8qi (rtx operands[4], bool unsignedp)
1825 rtx l1, l2, h1, h2, x1, x2, p1, p2, p3, p4, s1, s2, s3;
1827 /* Fill in x1 and x2 with the sign extension of each element. */
1829 x1 = x2 = CONST0_RTX (V8QImode);
1834 x1 = gen_reg_rtx (V8QImode);
1835 x2 = gen_reg_rtx (V8QImode);
1837 neg = ia64_expand_vecint_compare (LT, V8QImode, x1, operands[1],
1838 CONST0_RTX (V8QImode));
1840 neg = ia64_expand_vecint_compare (LT, V8QImode, x2, operands[2],
1841 CONST0_RTX (V8QImode));
1845 l1 = gen_reg_rtx (V4HImode);
1846 l2 = gen_reg_rtx (V4HImode);
1847 h1 = gen_reg_rtx (V4HImode);
1848 h2 = gen_reg_rtx (V4HImode);
1850 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l1), operands[1], x1));
1851 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l2), operands[2], x2));
1852 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h1), operands[1], x1));
1853 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h2), operands[2], x2));
1855 p1 = gen_reg_rtx (V2SImode);
1856 p2 = gen_reg_rtx (V2SImode);
1857 p3 = gen_reg_rtx (V2SImode);
1858 p4 = gen_reg_rtx (V2SImode);
1859 emit_insn (gen_pmpy2_r (p1, l1, l2));
1860 emit_insn (gen_pmpy2_l (p2, l1, l2));
1861 emit_insn (gen_pmpy2_r (p3, h1, h2));
1862 emit_insn (gen_pmpy2_l (p4, h1, h2));
1864 s1 = gen_reg_rtx (V2SImode);
1865 s2 = gen_reg_rtx (V2SImode);
1866 s3 = gen_reg_rtx (V2SImode);
1867 emit_insn (gen_addv2si3 (s1, p1, p2));
1868 emit_insn (gen_addv2si3 (s2, p3, p4));
1869 emit_insn (gen_addv2si3 (s3, s1, operands[3]));
1870 emit_insn (gen_addv2si3 (operands[0], s2, s3));
1873 /* Emit the appropriate sequence for a call. */
1876 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1881 addr = XEXP (addr, 0);
1882 addr = convert_memory_address (DImode, addr);
1883 b0 = gen_rtx_REG (DImode, R_BR (0));
1885 /* ??? Should do this for functions known to bind local too. */
1886 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1889 insn = gen_sibcall_nogp (addr);
1891 insn = gen_call_nogp (addr, b0);
1893 insn = gen_call_value_nogp (retval, addr, b0);
1894 insn = emit_call_insn (insn);
1899 insn = gen_sibcall_gp (addr);
1901 insn = gen_call_gp (addr, b0);
1903 insn = gen_call_value_gp (retval, addr, b0);
1904 insn = emit_call_insn (insn);
1906 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1910 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1914 ia64_reload_gp (void)
1918 if (current_frame_info.reg_save_gp)
1919 tmp = gen_rtx_REG (DImode, current_frame_info.reg_save_gp);
1922 HOST_WIDE_INT offset;
1924 offset = (current_frame_info.spill_cfa_off
1925 + current_frame_info.spill_size);
1926 if (frame_pointer_needed)
1928 tmp = hard_frame_pointer_rtx;
1933 tmp = stack_pointer_rtx;
1934 offset = current_frame_info.total_size - offset;
1937 if (CONST_OK_FOR_I (offset))
1938 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1939 tmp, GEN_INT (offset)));
1942 emit_move_insn (pic_offset_table_rtx, GEN_INT (offset));
1943 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1944 pic_offset_table_rtx, tmp));
1947 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1950 emit_move_insn (pic_offset_table_rtx, tmp);
1954 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1955 rtx scratch_b, int noreturn_p, int sibcall_p)
1958 bool is_desc = false;
1960 /* If we find we're calling through a register, then we're actually
1961 calling through a descriptor, so load up the values. */
1962 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1967 /* ??? We are currently constrained to *not* use peep2, because
1968 we can legitimately change the global lifetime of the GP
1969 (in the form of killing where previously live). This is
1970 because a call through a descriptor doesn't use the previous
1971 value of the GP, while a direct call does, and we do not
1972 commit to either form until the split here.
1974 That said, this means that we lack precise life info for
1975 whether ADDR is dead after this call. This is not terribly
1976 important, since we can fix things up essentially for free
1977 with the POST_DEC below, but it's nice to not use it when we
1978 can immediately tell it's not necessary. */
1979 addr_dead_p = ((noreturn_p || sibcall_p
1980 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1982 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1984 /* Load the code address into scratch_b. */
1985 tmp = gen_rtx_POST_INC (Pmode, addr);
1986 tmp = gen_rtx_MEM (Pmode, tmp);
1987 emit_move_insn (scratch_r, tmp);
1988 emit_move_insn (scratch_b, scratch_r);
1990 /* Load the GP address. If ADDR is not dead here, then we must
1991 revert the change made above via the POST_INCREMENT. */
1993 tmp = gen_rtx_POST_DEC (Pmode, addr);
1996 tmp = gen_rtx_MEM (Pmode, tmp);
1997 emit_move_insn (pic_offset_table_rtx, tmp);
2004 insn = gen_sibcall_nogp (addr);
2006 insn = gen_call_value_nogp (retval, addr, retaddr);
2008 insn = gen_call_nogp (addr, retaddr);
2009 emit_call_insn (insn);
2011 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
2015 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2017 This differs from the generic code in that we know about the zero-extending
2018 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2019 also know that ld.acq+cmpxchg.rel equals a full barrier.
2021 The loop we want to generate looks like
2026 new_reg = cmp_reg op val;
2027 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2028 if (cmp_reg != old_reg)
2031 Note that we only do the plain load from memory once. Subsequent
2032 iterations use the value loaded by the compare-and-swap pattern. */
2035 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
2036 rtx old_dst, rtx new_dst)
2038 enum machine_mode mode = GET_MODE (mem);
2039 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2040 enum insn_code icode;
2042 /* Special case for using fetchadd. */
2043 if ((mode == SImode || mode == DImode)
2044 && (code == PLUS || code == MINUS)
2045 && fetchadd_operand (val, mode))
2048 val = GEN_INT (-INTVAL (val));
2051 old_dst = gen_reg_rtx (mode);
2053 emit_insn (gen_memory_barrier ());
2056 icode = CODE_FOR_fetchadd_acq_si;
2058 icode = CODE_FOR_fetchadd_acq_di;
2059 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2063 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2065 if (new_reg != new_dst)
2066 emit_move_insn (new_dst, new_reg);
2071 /* Because of the volatile mem read, we get an ld.acq, which is the
2072 front half of the full barrier. The end half is the cmpxchg.rel. */
2073 gcc_assert (MEM_VOLATILE_P (mem));
2075 old_reg = gen_reg_rtx (DImode);
2076 cmp_reg = gen_reg_rtx (DImode);
2077 label = gen_label_rtx ();
2081 val = simplify_gen_subreg (DImode, val, mode, 0);
2082 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2085 emit_move_insn (cmp_reg, mem);
2089 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2090 emit_move_insn (old_reg, cmp_reg);
2091 emit_move_insn (ar_ccv, cmp_reg);
2094 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2099 new_reg = expand_simple_unop (DImode, NOT, new_reg, NULL_RTX, true);
2102 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2103 true, OPTAB_DIRECT);
2106 new_reg = gen_lowpart (mode, new_reg);
2108 emit_move_insn (new_dst, new_reg);
2112 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2113 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2114 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2115 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2120 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2122 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2125 /* Begin the assembly file. */
2128 ia64_file_start (void)
2130 /* Variable tracking should be run after all optimizations which change order
2131 of insns. It also needs a valid CFG. This can't be done in
2132 ia64_override_options, because flag_var_tracking is finalized after
2134 ia64_flag_var_tracking = flag_var_tracking;
2135 flag_var_tracking = 0;
2137 default_file_start ();
2138 emit_safe_across_calls ();
2142 emit_safe_across_calls (void)
2144 unsigned int rs, re;
2151 while (rs < 64 && call_used_regs[PR_REG (rs)])
2155 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2159 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2163 fputc (',', asm_out_file);
2165 fprintf (asm_out_file, "p%u", rs);
2167 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2171 fputc ('\n', asm_out_file);
2174 /* Helper function for ia64_compute_frame_size: find an appropriate general
2175 register to spill some special register to. SPECIAL_SPILL_MASK contains
2176 bits in GR0 to GR31 that have already been allocated by this routine.
2177 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2180 find_gr_spill (int try_locals)
2184 /* If this is a leaf function, first try an otherwise unused
2185 call-clobbered register. */
2186 if (current_function_is_leaf)
2188 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2189 if (! regs_ever_live[regno]
2190 && call_used_regs[regno]
2191 && ! fixed_regs[regno]
2192 && ! global_regs[regno]
2193 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2195 current_frame_info.gr_used_mask |= 1 << regno;
2202 regno = current_frame_info.n_local_regs;
2203 /* If there is a frame pointer, then we can't use loc79, because
2204 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2205 reg_name switching code in ia64_expand_prologue. */
2206 if (regno < (80 - frame_pointer_needed))
2208 current_frame_info.n_local_regs = regno + 1;
2209 return LOC_REG (0) + regno;
2213 /* Failed to find a general register to spill to. Must use stack. */
2217 /* In order to make for nice schedules, we try to allocate every temporary
2218 to a different register. We must of course stay away from call-saved,
2219 fixed, and global registers. We must also stay away from registers
2220 allocated in current_frame_info.gr_used_mask, since those include regs
2221 used all through the prologue.
2223 Any register allocated here must be used immediately. The idea is to
2224 aid scheduling, not to solve data flow problems. */
2226 static int last_scratch_gr_reg;
2229 next_scratch_gr_reg (void)
2233 for (i = 0; i < 32; ++i)
2235 regno = (last_scratch_gr_reg + i + 1) & 31;
2236 if (call_used_regs[regno]
2237 && ! fixed_regs[regno]
2238 && ! global_regs[regno]
2239 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2241 last_scratch_gr_reg = regno;
2246 /* There must be _something_ available. */
2250 /* Helper function for ia64_compute_frame_size, called through
2251 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2254 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2256 unsigned int regno = REGNO (reg);
2259 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2260 for (i = 0; i < n; ++i)
2261 current_frame_info.gr_used_mask |= 1 << (regno + i);
2265 /* Returns the number of bytes offset between the frame pointer and the stack
2266 pointer for the current function. SIZE is the number of bytes of space
2267 needed for local variables. */
2270 ia64_compute_frame_size (HOST_WIDE_INT size)
2272 HOST_WIDE_INT total_size;
2273 HOST_WIDE_INT spill_size = 0;
2274 HOST_WIDE_INT extra_spill_size = 0;
2275 HOST_WIDE_INT pretend_args_size;
2278 int spilled_gr_p = 0;
2279 int spilled_fr_p = 0;
2283 if (current_frame_info.initialized)
2286 memset (¤t_frame_info, 0, sizeof current_frame_info);
2287 CLEAR_HARD_REG_SET (mask);
2289 /* Don't allocate scratches to the return register. */
2290 diddle_return_value (mark_reg_gr_used_mask, NULL);
2292 /* Don't allocate scratches to the EH scratch registers. */
2293 if (cfun->machine->ia64_eh_epilogue_sp)
2294 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2295 if (cfun->machine->ia64_eh_epilogue_bsp)
2296 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2298 /* Find the size of the register stack frame. We have only 80 local
2299 registers, because we reserve 8 for the inputs and 8 for the
2302 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2303 since we'll be adjusting that down later. */
2304 regno = LOC_REG (78) + ! frame_pointer_needed;
2305 for (; regno >= LOC_REG (0); regno--)
2306 if (regs_ever_live[regno])
2308 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2310 /* For functions marked with the syscall_linkage attribute, we must mark
2311 all eight input registers as in use, so that locals aren't visible to
2314 if (cfun->machine->n_varargs > 0
2315 || lookup_attribute ("syscall_linkage",
2316 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2317 current_frame_info.n_input_regs = 8;
2320 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2321 if (regs_ever_live[regno])
2323 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2326 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2327 if (regs_ever_live[regno])
2329 i = regno - OUT_REG (0) + 1;
2331 #ifndef PROFILE_HOOK
2332 /* When -p profiling, we need one output register for the mcount argument.
2333 Likewise for -a profiling for the bb_init_func argument. For -ax
2334 profiling, we need two output registers for the two bb_init_trace_func
2336 if (current_function_profile)
2339 current_frame_info.n_output_regs = i;
2341 /* ??? No rotating register support yet. */
2342 current_frame_info.n_rotate_regs = 0;
2344 /* Discover which registers need spilling, and how much room that
2345 will take. Begin with floating point and general registers,
2346 which will always wind up on the stack. */
2348 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2349 if (regs_ever_live[regno] && ! call_used_regs[regno])
2351 SET_HARD_REG_BIT (mask, regno);
2357 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2358 if (regs_ever_live[regno] && ! call_used_regs[regno])
2360 SET_HARD_REG_BIT (mask, regno);
2366 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2367 if (regs_ever_live[regno] && ! call_used_regs[regno])
2369 SET_HARD_REG_BIT (mask, regno);
2374 /* Now come all special registers that might get saved in other
2375 general registers. */
2377 if (frame_pointer_needed)
2379 current_frame_info.reg_fp = find_gr_spill (1);
2380 /* If we did not get a register, then we take LOC79. This is guaranteed
2381 to be free, even if regs_ever_live is already set, because this is
2382 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2383 as we don't count loc79 above. */
2384 if (current_frame_info.reg_fp == 0)
2386 current_frame_info.reg_fp = LOC_REG (79);
2387 current_frame_info.n_local_regs++;
2391 if (! current_function_is_leaf)
2393 /* Emit a save of BR0 if we call other functions. Do this even
2394 if this function doesn't return, as EH depends on this to be
2395 able to unwind the stack. */
2396 SET_HARD_REG_BIT (mask, BR_REG (0));
2398 current_frame_info.reg_save_b0 = find_gr_spill (1);
2399 if (current_frame_info.reg_save_b0 == 0)
2405 /* Similarly for ar.pfs. */
2406 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2407 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
2408 if (current_frame_info.reg_save_ar_pfs == 0)
2410 extra_spill_size += 8;
2414 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2415 registers are clobbered, so we fall back to the stack. */
2416 current_frame_info.reg_save_gp
2417 = (current_function_calls_setjmp ? 0 : find_gr_spill (1));
2418 if (current_frame_info.reg_save_gp == 0)
2420 SET_HARD_REG_BIT (mask, GR_REG (1));
2427 if (regs_ever_live[BR_REG (0)] && ! call_used_regs[BR_REG (0)])
2429 SET_HARD_REG_BIT (mask, BR_REG (0));
2434 if (regs_ever_live[AR_PFS_REGNUM])
2436 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2437 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
2438 if (current_frame_info.reg_save_ar_pfs == 0)
2440 extra_spill_size += 8;
2446 /* Unwind descriptor hackery: things are most efficient if we allocate
2447 consecutive GR save registers for RP, PFS, FP in that order. However,
2448 it is absolutely critical that FP get the only hard register that's
2449 guaranteed to be free, so we allocated it first. If all three did
2450 happen to be allocated hard regs, and are consecutive, rearrange them
2451 into the preferred order now. */
2452 if (current_frame_info.reg_fp != 0
2453 && current_frame_info.reg_save_b0 == current_frame_info.reg_fp + 1
2454 && current_frame_info.reg_save_ar_pfs == current_frame_info.reg_fp + 2)
2456 current_frame_info.reg_save_b0 = current_frame_info.reg_fp;
2457 current_frame_info.reg_save_ar_pfs = current_frame_info.reg_fp + 1;
2458 current_frame_info.reg_fp = current_frame_info.reg_fp + 2;
2461 /* See if we need to store the predicate register block. */
2462 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2463 if (regs_ever_live[regno] && ! call_used_regs[regno])
2465 if (regno <= PR_REG (63))
2467 SET_HARD_REG_BIT (mask, PR_REG (0));
2468 current_frame_info.reg_save_pr = find_gr_spill (1);
2469 if (current_frame_info.reg_save_pr == 0)
2471 extra_spill_size += 8;
2475 /* ??? Mark them all as used so that register renaming and such
2476 are free to use them. */
2477 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2478 regs_ever_live[regno] = 1;
2481 /* If we're forced to use st8.spill, we're forced to save and restore
2482 ar.unat as well. The check for existing liveness allows inline asm
2483 to touch ar.unat. */
2484 if (spilled_gr_p || cfun->machine->n_varargs
2485 || regs_ever_live[AR_UNAT_REGNUM])
2487 regs_ever_live[AR_UNAT_REGNUM] = 1;
2488 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2489 current_frame_info.reg_save_ar_unat = find_gr_spill (spill_size == 0);
2490 if (current_frame_info.reg_save_ar_unat == 0)
2492 extra_spill_size += 8;
2497 if (regs_ever_live[AR_LC_REGNUM])
2499 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2500 current_frame_info.reg_save_ar_lc = find_gr_spill (spill_size == 0);
2501 if (current_frame_info.reg_save_ar_lc == 0)
2503 extra_spill_size += 8;
2508 /* If we have an odd number of words of pretend arguments written to
2509 the stack, then the FR save area will be unaligned. We round the
2510 size of this area up to keep things 16 byte aligned. */
2512 pretend_args_size = IA64_STACK_ALIGN (current_function_pretend_args_size);
2514 pretend_args_size = current_function_pretend_args_size;
2516 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2517 + current_function_outgoing_args_size);
2518 total_size = IA64_STACK_ALIGN (total_size);
2520 /* We always use the 16-byte scratch area provided by the caller, but
2521 if we are a leaf function, there's no one to which we need to provide
2523 if (current_function_is_leaf)
2524 total_size = MAX (0, total_size - 16);
2526 current_frame_info.total_size = total_size;
2527 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2528 current_frame_info.spill_size = spill_size;
2529 current_frame_info.extra_spill_size = extra_spill_size;
2530 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2531 current_frame_info.n_spilled = n_spilled;
2532 current_frame_info.initialized = reload_completed;
2535 /* Compute the initial difference between the specified pair of registers. */
2538 ia64_initial_elimination_offset (int from, int to)
2540 HOST_WIDE_INT offset;
2542 ia64_compute_frame_size (get_frame_size ());
2545 case FRAME_POINTER_REGNUM:
2548 case HARD_FRAME_POINTER_REGNUM:
2549 if (current_function_is_leaf)
2550 offset = -current_frame_info.total_size;
2552 offset = -(current_frame_info.total_size
2553 - current_function_outgoing_args_size - 16);
2556 case STACK_POINTER_REGNUM:
2557 if (current_function_is_leaf)
2560 offset = 16 + current_function_outgoing_args_size;
2568 case ARG_POINTER_REGNUM:
2569 /* Arguments start above the 16 byte save area, unless stdarg
2570 in which case we store through the 16 byte save area. */
2573 case HARD_FRAME_POINTER_REGNUM:
2574 offset = 16 - current_function_pretend_args_size;
2577 case STACK_POINTER_REGNUM:
2578 offset = (current_frame_info.total_size
2579 + 16 - current_function_pretend_args_size);
2594 /* If there are more than a trivial number of register spills, we use
2595 two interleaved iterators so that we can get two memory references
2598 In order to simplify things in the prologue and epilogue expanders,
2599 we use helper functions to fix up the memory references after the
2600 fact with the appropriate offsets to a POST_MODIFY memory mode.
2601 The following data structure tracks the state of the two iterators
2602 while insns are being emitted. */
2604 struct spill_fill_data
2606 rtx init_after; /* point at which to emit initializations */
2607 rtx init_reg[2]; /* initial base register */
2608 rtx iter_reg[2]; /* the iterator registers */
2609 rtx *prev_addr[2]; /* address of last memory use */
2610 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2611 HOST_WIDE_INT prev_off[2]; /* last offset */
2612 int n_iter; /* number of iterators in use */
2613 int next_iter; /* next iterator to use */
2614 unsigned int save_gr_used_mask;
2617 static struct spill_fill_data spill_fill_data;
2620 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2624 spill_fill_data.init_after = get_last_insn ();
2625 spill_fill_data.init_reg[0] = init_reg;
2626 spill_fill_data.init_reg[1] = init_reg;
2627 spill_fill_data.prev_addr[0] = NULL;
2628 spill_fill_data.prev_addr[1] = NULL;
2629 spill_fill_data.prev_insn[0] = NULL;
2630 spill_fill_data.prev_insn[1] = NULL;
2631 spill_fill_data.prev_off[0] = cfa_off;
2632 spill_fill_data.prev_off[1] = cfa_off;
2633 spill_fill_data.next_iter = 0;
2634 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2636 spill_fill_data.n_iter = 1 + (n_spills > 2);
2637 for (i = 0; i < spill_fill_data.n_iter; ++i)
2639 int regno = next_scratch_gr_reg ();
2640 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2641 current_frame_info.gr_used_mask |= 1 << regno;
2646 finish_spill_pointers (void)
2648 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
2652 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
2654 int iter = spill_fill_data.next_iter;
2655 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
2656 rtx disp_rtx = GEN_INT (disp);
2659 if (spill_fill_data.prev_addr[iter])
2661 if (CONST_OK_FOR_N (disp))
2663 *spill_fill_data.prev_addr[iter]
2664 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2665 gen_rtx_PLUS (DImode,
2666 spill_fill_data.iter_reg[iter],
2668 REG_NOTES (spill_fill_data.prev_insn[iter])
2669 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
2670 REG_NOTES (spill_fill_data.prev_insn[iter]));
2674 /* ??? Could use register post_modify for loads. */
2675 if (! CONST_OK_FOR_I (disp))
2677 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2678 emit_move_insn (tmp, disp_rtx);
2681 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2682 spill_fill_data.iter_reg[iter], disp_rtx));
2685 /* Micro-optimization: if we've created a frame pointer, it's at
2686 CFA 0, which may allow the real iterator to be initialized lower,
2687 slightly increasing parallelism. Also, if there are few saves
2688 it may eliminate the iterator entirely. */
2690 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
2691 && frame_pointer_needed)
2693 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
2694 set_mem_alias_set (mem, get_varargs_alias_set ());
2702 seq = gen_movdi (spill_fill_data.iter_reg[iter],
2703 spill_fill_data.init_reg[iter]);
2708 if (! CONST_OK_FOR_I (disp))
2710 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2711 emit_move_insn (tmp, disp_rtx);
2715 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2716 spill_fill_data.init_reg[iter],
2723 /* Careful for being the first insn in a sequence. */
2724 if (spill_fill_data.init_after)
2725 insn = emit_insn_after (seq, spill_fill_data.init_after);
2728 rtx first = get_insns ();
2730 insn = emit_insn_before (seq, first);
2732 insn = emit_insn (seq);
2734 spill_fill_data.init_after = insn;
2736 /* If DISP is 0, we may or may not have a further adjustment
2737 afterward. If we do, then the load/store insn may be modified
2738 to be a post-modify. If we don't, then this copy may be
2739 eliminated by copyprop_hardreg_forward, which makes this
2740 insn garbage, which runs afoul of the sanity check in
2741 propagate_one_insn. So mark this insn as legal to delete. */
2743 REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx,
2747 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
2749 /* ??? Not all of the spills are for varargs, but some of them are.
2750 The rest of the spills belong in an alias set of their own. But
2751 it doesn't actually hurt to include them here. */
2752 set_mem_alias_set (mem, get_varargs_alias_set ());
2754 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
2755 spill_fill_data.prev_off[iter] = cfa_off;
2757 if (++iter >= spill_fill_data.n_iter)
2759 spill_fill_data.next_iter = iter;
2765 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
2768 int iter = spill_fill_data.next_iter;
2771 mem = spill_restore_mem (reg, cfa_off);
2772 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
2773 spill_fill_data.prev_insn[iter] = insn;
2780 RTX_FRAME_RELATED_P (insn) = 1;
2782 /* Don't even pretend that the unwind code can intuit its way
2783 through a pair of interleaved post_modify iterators. Just
2784 provide the correct answer. */
2786 if (frame_pointer_needed)
2788 base = hard_frame_pointer_rtx;
2793 base = stack_pointer_rtx;
2794 off = current_frame_info.total_size - cfa_off;
2798 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2799 gen_rtx_SET (VOIDmode,
2800 gen_rtx_MEM (GET_MODE (reg),
2801 plus_constant (base, off)),
2808 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
2810 int iter = spill_fill_data.next_iter;
2813 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
2814 GEN_INT (cfa_off)));
2815 spill_fill_data.prev_insn[iter] = insn;
2818 /* Wrapper functions that discards the CONST_INT spill offset. These
2819 exist so that we can give gr_spill/gr_fill the offset they need and
2820 use a consistent function interface. */
2823 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2825 return gen_movdi (dest, src);
2829 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2831 return gen_fr_spill (dest, src);
2835 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2837 return gen_fr_restore (dest, src);
2840 /* Called after register allocation to add any instructions needed for the
2841 prologue. Using a prologue insn is favored compared to putting all of the
2842 instructions in output_function_prologue(), since it allows the scheduler
2843 to intermix instructions with the saves of the caller saved registers. In
2844 some cases, it might be necessary to emit a barrier instruction as the last
2845 insn to prevent such scheduling.
2847 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2848 so that the debug info generation code can handle them properly.
2850 The register save area is layed out like so:
2852 [ varargs spill area ]
2853 [ fr register spill area ]
2854 [ br register spill area ]
2855 [ ar register spill area ]
2856 [ pr register spill area ]
2857 [ gr register spill area ] */
2859 /* ??? Get inefficient code when the frame size is larger than can fit in an
2860 adds instruction. */
2863 ia64_expand_prologue (void)
2865 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2866 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2869 ia64_compute_frame_size (get_frame_size ());
2870 last_scratch_gr_reg = 15;
2872 /* If there is no epilogue, then we don't need some prologue insns.
2873 We need to avoid emitting the dead prologue insns, because flow
2874 will complain about them. */
2880 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
2881 if ((e->flags & EDGE_FAKE) == 0
2882 && (e->flags & EDGE_FALLTHRU) != 0)
2884 epilogue_p = (e != NULL);
2889 /* Set the local, input, and output register names. We need to do this
2890 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2891 half. If we use in/loc/out register names, then we get assembler errors
2892 in crtn.S because there is no alloc insn or regstk directive in there. */
2893 if (! TARGET_REG_NAMES)
2895 int inputs = current_frame_info.n_input_regs;
2896 int locals = current_frame_info.n_local_regs;
2897 int outputs = current_frame_info.n_output_regs;
2899 for (i = 0; i < inputs; i++)
2900 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2901 for (i = 0; i < locals; i++)
2902 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2903 for (i = 0; i < outputs; i++)
2904 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2907 /* Set the frame pointer register name. The regnum is logically loc79,
2908 but of course we'll not have allocated that many locals. Rather than
2909 worrying about renumbering the existing rtxs, we adjust the name. */
2910 /* ??? This code means that we can never use one local register when
2911 there is a frame pointer. loc79 gets wasted in this case, as it is
2912 renamed to a register that will never be used. See also the try_locals
2913 code in find_gr_spill. */
2914 if (current_frame_info.reg_fp)
2916 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2917 reg_names[HARD_FRAME_POINTER_REGNUM]
2918 = reg_names[current_frame_info.reg_fp];
2919 reg_names[current_frame_info.reg_fp] = tmp;
2922 /* We don't need an alloc instruction if we've used no outputs or locals. */
2923 if (current_frame_info.n_local_regs == 0
2924 && current_frame_info.n_output_regs == 0
2925 && current_frame_info.n_input_regs <= current_function_args_info.int_regs
2926 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2928 /* If there is no alloc, but there are input registers used, then we
2929 need a .regstk directive. */
2930 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
2931 ar_pfs_save_reg = NULL_RTX;
2935 current_frame_info.need_regstk = 0;
2937 if (current_frame_info.reg_save_ar_pfs)
2938 regno = current_frame_info.reg_save_ar_pfs;
2940 regno = next_scratch_gr_reg ();
2941 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
2943 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
2944 GEN_INT (current_frame_info.n_input_regs),
2945 GEN_INT (current_frame_info.n_local_regs),
2946 GEN_INT (current_frame_info.n_output_regs),
2947 GEN_INT (current_frame_info.n_rotate_regs)));
2948 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_pfs != 0);
2951 /* Set up frame pointer, stack pointer, and spill iterators. */
2953 n_varargs = cfun->machine->n_varargs;
2954 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
2955 stack_pointer_rtx, 0);
2957 if (frame_pointer_needed)
2959 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
2960 RTX_FRAME_RELATED_P (insn) = 1;
2963 if (current_frame_info.total_size != 0)
2965 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
2968 if (CONST_OK_FOR_I (- current_frame_info.total_size))
2969 offset = frame_size_rtx;
2972 regno = next_scratch_gr_reg ();
2973 offset = gen_rtx_REG (DImode, regno);
2974 emit_move_insn (offset, frame_size_rtx);
2977 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
2978 stack_pointer_rtx, offset));
2980 if (! frame_pointer_needed)
2982 RTX_FRAME_RELATED_P (insn) = 1;
2983 if (GET_CODE (offset) != CONST_INT)
2986 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2987 gen_rtx_SET (VOIDmode,
2989 gen_rtx_PLUS (DImode,
2996 /* ??? At this point we must generate a magic insn that appears to
2997 modify the stack pointer, the frame pointer, and all spill
2998 iterators. This would allow the most scheduling freedom. For
2999 now, just hard stop. */
3000 emit_insn (gen_blockage ());
3003 /* Must copy out ar.unat before doing any integer spills. */
3004 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3006 if (current_frame_info.reg_save_ar_unat)
3008 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
3011 alt_regno = next_scratch_gr_reg ();
3012 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3013 current_frame_info.gr_used_mask |= 1 << alt_regno;
3016 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3017 insn = emit_move_insn (ar_unat_save_reg, reg);
3018 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_unat != 0);
3020 /* Even if we're not going to generate an epilogue, we still
3021 need to save the register so that EH works. */
3022 if (! epilogue_p && current_frame_info.reg_save_ar_unat)
3023 emit_insn (gen_prologue_use (ar_unat_save_reg));
3026 ar_unat_save_reg = NULL_RTX;
3028 /* Spill all varargs registers. Do this before spilling any GR registers,
3029 since we want the UNAT bits for the GR registers to override the UNAT
3030 bits from varargs, which we don't care about. */
3033 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3035 reg = gen_rtx_REG (DImode, regno);
3036 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3039 /* Locate the bottom of the register save area. */
3040 cfa_off = (current_frame_info.spill_cfa_off
3041 + current_frame_info.spill_size
3042 + current_frame_info.extra_spill_size);
3044 /* Save the predicate register block either in a register or in memory. */
3045 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3047 reg = gen_rtx_REG (DImode, PR_REG (0));
3048 if (current_frame_info.reg_save_pr != 0)
3050 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
3051 insn = emit_move_insn (alt_reg, reg);
3053 /* ??? Denote pr spill/fill by a DImode move that modifies all
3054 64 hard registers. */
3055 RTX_FRAME_RELATED_P (insn) = 1;
3057 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3058 gen_rtx_SET (VOIDmode, alt_reg, reg),
3061 /* Even if we're not going to generate an epilogue, we still
3062 need to save the register so that EH works. */
3064 emit_insn (gen_prologue_use (alt_reg));
3068 alt_regno = next_scratch_gr_reg ();
3069 alt_reg = gen_rtx_REG (DImode, alt_regno);
3070 insn = emit_move_insn (alt_reg, reg);
3071 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3076 /* Handle AR regs in numerical order. All of them get special handling. */
3077 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3078 && current_frame_info.reg_save_ar_unat == 0)
3080 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3081 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3085 /* The alloc insn already copied ar.pfs into a general register. The
3086 only thing we have to do now is copy that register to a stack slot
3087 if we'd not allocated a local register for the job. */
3088 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3089 && current_frame_info.reg_save_ar_pfs == 0)
3091 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3092 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3096 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3098 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3099 if (current_frame_info.reg_save_ar_lc != 0)
3101 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
3102 insn = emit_move_insn (alt_reg, reg);
3103 RTX_FRAME_RELATED_P (insn) = 1;
3105 /* Even if we're not going to generate an epilogue, we still
3106 need to save the register so that EH works. */
3108 emit_insn (gen_prologue_use (alt_reg));
3112 alt_regno = next_scratch_gr_reg ();
3113 alt_reg = gen_rtx_REG (DImode, alt_regno);
3114 emit_move_insn (alt_reg, reg);
3115 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3120 if (current_frame_info.reg_save_gp)
3122 insn = emit_move_insn (gen_rtx_REG (DImode,
3123 current_frame_info.reg_save_gp),
3124 pic_offset_table_rtx);
3125 /* We don't know for sure yet if this is actually needed, since
3126 we've not split the PIC call patterns. If all of the calls
3127 are indirect, and not followed by any uses of the gp, then
3128 this save is dead. Allow it to go away. */
3130 = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, REG_NOTES (insn));
3133 /* We should now be at the base of the gr/br/fr spill area. */
3134 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3135 + current_frame_info.spill_size));
3137 /* Spill all general registers. */
3138 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3139 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3141 reg = gen_rtx_REG (DImode, regno);
3142 do_spill (gen_gr_spill, reg, cfa_off, reg);
3146 /* Handle BR0 specially -- it may be getting stored permanently in
3147 some GR register. */
3148 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3150 reg = gen_rtx_REG (DImode, BR_REG (0));
3151 if (current_frame_info.reg_save_b0 != 0)
3153 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3154 insn = emit_move_insn (alt_reg, reg);
3155 RTX_FRAME_RELATED_P (insn) = 1;
3157 /* Even if we're not going to generate an epilogue, we still
3158 need to save the register so that EH works. */
3160 emit_insn (gen_prologue_use (alt_reg));
3164 alt_regno = next_scratch_gr_reg ();
3165 alt_reg = gen_rtx_REG (DImode, alt_regno);
3166 emit_move_insn (alt_reg, reg);
3167 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3172 /* Spill the rest of the BR registers. */
3173 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3174 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3176 alt_regno = next_scratch_gr_reg ();
3177 alt_reg = gen_rtx_REG (DImode, alt_regno);
3178 reg = gen_rtx_REG (DImode, regno);
3179 emit_move_insn (alt_reg, reg);
3180 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3184 /* Align the frame and spill all FR registers. */
3185 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3186 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3188 gcc_assert (!(cfa_off & 15));
3189 reg = gen_rtx_REG (XFmode, regno);
3190 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3194 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3196 finish_spill_pointers ();
3199 /* Called after register allocation to add any instructions needed for the
3200 epilogue. Using an epilogue insn is favored compared to putting all of the
3201 instructions in output_function_prologue(), since it allows the scheduler
3202 to intermix instructions with the saves of the caller saved registers. In
3203 some cases, it might be necessary to emit a barrier instruction as the last
3204 insn to prevent such scheduling. */
3207 ia64_expand_epilogue (int sibcall_p)
3209 rtx insn, reg, alt_reg, ar_unat_save_reg;
3210 int regno, alt_regno, cfa_off;
3212 ia64_compute_frame_size (get_frame_size ());
3214 /* If there is a frame pointer, then we use it instead of the stack
3215 pointer, so that the stack pointer does not need to be valid when
3216 the epilogue starts. See EXIT_IGNORE_STACK. */
3217 if (frame_pointer_needed)
3218 setup_spill_pointers (current_frame_info.n_spilled,
3219 hard_frame_pointer_rtx, 0);
3221 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3222 current_frame_info.total_size);
3224 if (current_frame_info.total_size != 0)
3226 /* ??? At this point we must generate a magic insn that appears to
3227 modify the spill iterators and the frame pointer. This would
3228 allow the most scheduling freedom. For now, just hard stop. */
3229 emit_insn (gen_blockage ());
3232 /* Locate the bottom of the register save area. */
3233 cfa_off = (current_frame_info.spill_cfa_off
3234 + current_frame_info.spill_size
3235 + current_frame_info.extra_spill_size);
3237 /* Restore the predicate registers. */
3238 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3240 if (current_frame_info.reg_save_pr != 0)
3241 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
3244 alt_regno = next_scratch_gr_reg ();
3245 alt_reg = gen_rtx_REG (DImode, alt_regno);
3246 do_restore (gen_movdi_x, alt_reg, cfa_off);
3249 reg = gen_rtx_REG (DImode, PR_REG (0));
3250 emit_move_insn (reg, alt_reg);
3253 /* Restore the application registers. */
3255 /* Load the saved unat from the stack, but do not restore it until
3256 after the GRs have been restored. */
3257 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3259 if (current_frame_info.reg_save_ar_unat != 0)
3261 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
3264 alt_regno = next_scratch_gr_reg ();
3265 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3266 current_frame_info.gr_used_mask |= 1 << alt_regno;
3267 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3272 ar_unat_save_reg = NULL_RTX;
3274 if (current_frame_info.reg_save_ar_pfs != 0)
3276 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_pfs);
3277 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3278 emit_move_insn (reg, alt_reg);
3280 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3282 alt_regno = next_scratch_gr_reg ();
3283 alt_reg = gen_rtx_REG (DImode, alt_regno);
3284 do_restore (gen_movdi_x, alt_reg, cfa_off);
3286 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3287 emit_move_insn (reg, alt_reg);
3290 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3292 if (current_frame_info.reg_save_ar_lc != 0)
3293 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
3296 alt_regno = next_scratch_gr_reg ();
3297 alt_reg = gen_rtx_REG (DImode, alt_regno);
3298 do_restore (gen_movdi_x, alt_reg, cfa_off);
3301 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3302 emit_move_insn (reg, alt_reg);
3305 /* We should now be at the base of the gr/br/fr spill area. */
3306 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3307 + current_frame_info.spill_size));
3309 /* The GP may be stored on the stack in the prologue, but it's
3310 never restored in the epilogue. Skip the stack slot. */
3311 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3314 /* Restore all general registers. */
3315 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3316 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3318 reg = gen_rtx_REG (DImode, regno);
3319 do_restore (gen_gr_restore, reg, cfa_off);
3323 /* Restore the branch registers. Handle B0 specially, as it may
3324 have gotten stored in some GR register. */
3325 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3327 if (current_frame_info.reg_save_b0 != 0)
3328 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3331 alt_regno = next_scratch_gr_reg ();
3332 alt_reg = gen_rtx_REG (DImode, alt_regno);
3333 do_restore (gen_movdi_x, alt_reg, cfa_off);
3336 reg = gen_rtx_REG (DImode, BR_REG (0));
3337 emit_move_insn (reg, alt_reg);
3340 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3341 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3343 alt_regno = next_scratch_gr_reg ();
3344 alt_reg = gen_rtx_REG (DImode, alt_regno);
3345 do_restore (gen_movdi_x, alt_reg, cfa_off);
3347 reg = gen_rtx_REG (DImode, regno);
3348 emit_move_insn (reg, alt_reg);
3351 /* Restore floating point registers. */
3352 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3353 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3355 gcc_assert (!(cfa_off & 15));
3356 reg = gen_rtx_REG (XFmode, regno);
3357 do_restore (gen_fr_restore_x, reg, cfa_off);
3361 /* Restore ar.unat for real. */
3362 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3364 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3365 emit_move_insn (reg, ar_unat_save_reg);
3368 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3370 finish_spill_pointers ();
3372 if (current_frame_info.total_size || cfun->machine->ia64_eh_epilogue_sp)
3374 /* ??? At this point we must generate a magic insn that appears to
3375 modify the spill iterators, the stack pointer, and the frame
3376 pointer. This would allow the most scheduling freedom. For now,
3378 emit_insn (gen_blockage ());
3381 if (cfun->machine->ia64_eh_epilogue_sp)
3382 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
3383 else if (frame_pointer_needed)
3385 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
3386 RTX_FRAME_RELATED_P (insn) = 1;
3388 else if (current_frame_info.total_size)
3390 rtx offset, frame_size_rtx;
3392 frame_size_rtx = GEN_INT (current_frame_info.total_size);
3393 if (CONST_OK_FOR_I (current_frame_info.total_size))
3394 offset = frame_size_rtx;
3397 regno = next_scratch_gr_reg ();
3398 offset = gen_rtx_REG (DImode, regno);
3399 emit_move_insn (offset, frame_size_rtx);
3402 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3405 RTX_FRAME_RELATED_P (insn) = 1;
3406 if (GET_CODE (offset) != CONST_INT)
3409 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3410 gen_rtx_SET (VOIDmode,
3412 gen_rtx_PLUS (DImode,
3419 if (cfun->machine->ia64_eh_epilogue_bsp)
3420 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3423 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
3426 int fp = GR_REG (2);
3427 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
3428 first available call clobbered register. If there was a frame_pointer
3429 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
3430 so we have to make sure we're using the string "r2" when emitting
3431 the register name for the assembler. */
3432 if (current_frame_info.reg_fp && current_frame_info.reg_fp == GR_REG (2))
3433 fp = HARD_FRAME_POINTER_REGNUM;
3435 /* We must emit an alloc to force the input registers to become output
3436 registers. Otherwise, if the callee tries to pass its parameters
3437 through to another call without an intervening alloc, then these
3439 /* ??? We don't need to preserve all input registers. We only need to
3440 preserve those input registers used as arguments to the sibling call.
3441 It is unclear how to compute that number here. */
3442 if (current_frame_info.n_input_regs != 0)
3444 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
3445 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
3446 const0_rtx, const0_rtx,
3447 n_inputs, const0_rtx));
3448 RTX_FRAME_RELATED_P (insn) = 1;
3453 /* Return 1 if br.ret can do all the work required to return from a
3457 ia64_direct_return (void)
3459 if (reload_completed && ! frame_pointer_needed)
3461 ia64_compute_frame_size (get_frame_size ());
3463 return (current_frame_info.total_size == 0
3464 && current_frame_info.n_spilled == 0
3465 && current_frame_info.reg_save_b0 == 0
3466 && current_frame_info.reg_save_pr == 0
3467 && current_frame_info.reg_save_ar_pfs == 0
3468 && current_frame_info.reg_save_ar_unat == 0
3469 && current_frame_info.reg_save_ar_lc == 0);
3474 /* Return the magic cookie that we use to hold the return address
3475 during early compilation. */
3478 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3482 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3485 /* Split this value after reload, now that we know where the return
3486 address is saved. */
3489 ia64_split_return_addr_rtx (rtx dest)
3493 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3495 if (current_frame_info.reg_save_b0 != 0)
3496 src = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3502 /* Compute offset from CFA for BR0. */
3503 /* ??? Must be kept in sync with ia64_expand_prologue. */
3504 off = (current_frame_info.spill_cfa_off
3505 + current_frame_info.spill_size);
3506 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3507 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3510 /* Convert CFA offset to a register based offset. */
3511 if (frame_pointer_needed)
3512 src = hard_frame_pointer_rtx;
3515 src = stack_pointer_rtx;
3516 off += current_frame_info.total_size;
3519 /* Load address into scratch register. */
3520 if (CONST_OK_FOR_I (off))
3521 emit_insn (gen_adddi3 (dest, src, GEN_INT (off)));
3524 emit_move_insn (dest, GEN_INT (off));
3525 emit_insn (gen_adddi3 (dest, src, dest));
3528 src = gen_rtx_MEM (Pmode, dest);
3532 src = gen_rtx_REG (DImode, BR_REG (0));
3534 emit_move_insn (dest, src);
3538 ia64_hard_regno_rename_ok (int from, int to)
3540 /* Don't clobber any of the registers we reserved for the prologue. */
3541 if (to == current_frame_info.reg_fp
3542 || to == current_frame_info.reg_save_b0
3543 || to == current_frame_info.reg_save_pr
3544 || to == current_frame_info.reg_save_ar_pfs
3545 || to == current_frame_info.reg_save_ar_unat
3546 || to == current_frame_info.reg_save_ar_lc)
3549 if (from == current_frame_info.reg_fp
3550 || from == current_frame_info.reg_save_b0
3551 || from == current_frame_info.reg_save_pr
3552 || from == current_frame_info.reg_save_ar_pfs
3553 || from == current_frame_info.reg_save_ar_unat
3554 || from == current_frame_info.reg_save_ar_lc)
3557 /* Don't use output registers outside the register frame. */
3558 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3561 /* Retain even/oddness on predicate register pairs. */
3562 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3563 return (from & 1) == (to & 1);
3568 /* Target hook for assembling integer objects. Handle word-sized
3569 aligned objects and detect the cases when @fptr is needed. */
3572 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3574 if (size == POINTER_SIZE / BITS_PER_UNIT
3575 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3576 && GET_CODE (x) == SYMBOL_REF
3577 && SYMBOL_REF_FUNCTION_P (x))
3579 static const char * const directive[2][2] = {
3580 /* 64-bit pointer */ /* 32-bit pointer */
3581 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
3582 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
3584 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
3585 output_addr_const (asm_out_file, x);
3586 fputs (")\n", asm_out_file);
3589 return default_assemble_integer (x, size, aligned_p);
3592 /* Emit the function prologue. */
3595 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3597 int mask, grsave, grsave_prev;
3599 if (current_frame_info.need_regstk)
3600 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
3601 current_frame_info.n_input_regs,
3602 current_frame_info.n_local_regs,
3603 current_frame_info.n_output_regs,
3604 current_frame_info.n_rotate_regs);
3606 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3609 /* Emit the .prologue directive. */
3612 grsave = grsave_prev = 0;
3613 if (current_frame_info.reg_save_b0 != 0)
3616 grsave = grsave_prev = current_frame_info.reg_save_b0;
3618 if (current_frame_info.reg_save_ar_pfs != 0
3619 && (grsave_prev == 0
3620 || current_frame_info.reg_save_ar_pfs == grsave_prev + 1))
3623 if (grsave_prev == 0)
3624 grsave = current_frame_info.reg_save_ar_pfs;
3625 grsave_prev = current_frame_info.reg_save_ar_pfs;
3627 if (current_frame_info.reg_fp != 0
3628 && (grsave_prev == 0
3629 || current_frame_info.reg_fp == grsave_prev + 1))
3632 if (grsave_prev == 0)
3633 grsave = HARD_FRAME_POINTER_REGNUM;
3634 grsave_prev = current_frame_info.reg_fp;
3636 if (current_frame_info.reg_save_pr != 0
3637 && (grsave_prev == 0
3638 || current_frame_info.reg_save_pr == grsave_prev + 1))
3641 if (grsave_prev == 0)
3642 grsave = current_frame_info.reg_save_pr;
3645 if (mask && TARGET_GNU_AS)
3646 fprintf (file, "\t.prologue %d, %d\n", mask,
3647 ia64_dbx_register_number (grsave));
3649 fputs ("\t.prologue\n", file);
3651 /* Emit a .spill directive, if necessary, to relocate the base of
3652 the register spill area. */
3653 if (current_frame_info.spill_cfa_off != -16)
3654 fprintf (file, "\t.spill %ld\n",
3655 (long) (current_frame_info.spill_cfa_off
3656 + current_frame_info.spill_size));
3659 /* Emit the .body directive at the scheduled end of the prologue. */
3662 ia64_output_function_end_prologue (FILE *file)
3664 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3667 fputs ("\t.body\n", file);
3670 /* Emit the function epilogue. */
3673 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
3674 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3678 if (current_frame_info.reg_fp)
3680 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3681 reg_names[HARD_FRAME_POINTER_REGNUM]
3682 = reg_names[current_frame_info.reg_fp];
3683 reg_names[current_frame_info.reg_fp] = tmp;
3685 if (! TARGET_REG_NAMES)
3687 for (i = 0; i < current_frame_info.n_input_regs; i++)
3688 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
3689 for (i = 0; i < current_frame_info.n_local_regs; i++)
3690 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
3691 for (i = 0; i < current_frame_info.n_output_regs; i++)
3692 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
3695 current_frame_info.initialized = 0;
3699 ia64_dbx_register_number (int regno)
3701 /* In ia64_expand_prologue we quite literally renamed the frame pointer
3702 from its home at loc79 to something inside the register frame. We
3703 must perform the same renumbering here for the debug info. */
3704 if (current_frame_info.reg_fp)
3706 if (regno == HARD_FRAME_POINTER_REGNUM)
3707 regno = current_frame_info.reg_fp;
3708 else if (regno == current_frame_info.reg_fp)
3709 regno = HARD_FRAME_POINTER_REGNUM;
3712 if (IN_REGNO_P (regno))
3713 return 32 + regno - IN_REG (0);
3714 else if (LOC_REGNO_P (regno))
3715 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
3716 else if (OUT_REGNO_P (regno))
3717 return (32 + current_frame_info.n_input_regs
3718 + current_frame_info.n_local_regs + regno - OUT_REG (0));
3724 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
3726 rtx addr_reg, eight = GEN_INT (8);
3728 /* The Intel assembler requires that the global __ia64_trampoline symbol
3729 be declared explicitly */
3732 static bool declared_ia64_trampoline = false;
3734 if (!declared_ia64_trampoline)
3736 declared_ia64_trampoline = true;
3737 (*targetm.asm_out.globalize_label) (asm_out_file,
3738 "__ia64_trampoline");
3742 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
3743 addr = convert_memory_address (Pmode, addr);
3744 fnaddr = convert_memory_address (Pmode, fnaddr);
3745 static_chain = convert_memory_address (Pmode, static_chain);
3747 /* Load up our iterator. */
3748 addr_reg = gen_reg_rtx (Pmode);
3749 emit_move_insn (addr_reg, addr);
3751 /* The first two words are the fake descriptor:
3752 __ia64_trampoline, ADDR+16. */
3753 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3754 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
3755 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3757 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3758 copy_to_reg (plus_constant (addr, 16)));
3759 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3761 /* The third word is the target descriptor. */
3762 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
3763 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3765 /* The fourth word is the static chain. */
3766 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
3769 /* Do any needed setup for a variadic function. CUM has not been updated
3770 for the last named argument which has type TYPE and mode MODE.
3772 We generate the actual spill instructions during prologue generation. */
3775 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3776 tree type, int * pretend_size,
3777 int second_time ATTRIBUTE_UNUSED)
3779 CUMULATIVE_ARGS next_cum = *cum;
3781 /* Skip the current argument. */
3782 ia64_function_arg_advance (&next_cum, mode, type, 1);
3784 if (next_cum.words < MAX_ARGUMENT_SLOTS)
3786 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
3787 *pretend_size = n * UNITS_PER_WORD;
3788 cfun->machine->n_varargs = n;
3792 /* Check whether TYPE is a homogeneous floating point aggregate. If
3793 it is, return the mode of the floating point type that appears
3794 in all leafs. If it is not, return VOIDmode.
3796 An aggregate is a homogeneous floating point aggregate is if all
3797 fields/elements in it have the same floating point type (e.g,
3798 SFmode). 128-bit quad-precision floats are excluded.
3800 Variable sized aggregates should never arrive here, since we should
3801 have already decided to pass them by reference. Top-level zero-sized
3802 aggregates are excluded because our parallels crash the middle-end. */
3804 static enum machine_mode
3805 hfa_element_mode (tree type, bool nested)
3807 enum machine_mode element_mode = VOIDmode;
3808 enum machine_mode mode;
3809 enum tree_code code = TREE_CODE (type);
3810 int know_element_mode = 0;
3813 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
3818 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
3819 case BOOLEAN_TYPE: case CHAR_TYPE: case POINTER_TYPE:
3820 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
3821 case LANG_TYPE: case FUNCTION_TYPE:
3824 /* Fortran complex types are supposed to be HFAs, so we need to handle
3825 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
3828 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
3829 && TYPE_MODE (type) != TCmode)
3830 return GET_MODE_INNER (TYPE_MODE (type));
3835 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
3836 mode if this is contained within an aggregate. */
3837 if (nested && TYPE_MODE (type) != TFmode)
3838 return TYPE_MODE (type);
3843 return hfa_element_mode (TREE_TYPE (type), 1);
3847 case QUAL_UNION_TYPE:
3848 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3850 if (TREE_CODE (t) != FIELD_DECL)
3853 mode = hfa_element_mode (TREE_TYPE (t), 1);
3854 if (know_element_mode)
3856 if (mode != element_mode)
3859 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3863 know_element_mode = 1;
3864 element_mode = mode;
3867 return element_mode;
3870 /* If we reach here, we probably have some front-end specific type
3871 that the backend doesn't know about. This can happen via the
3872 aggregate_value_p call in init_function_start. All we can do is
3873 ignore unknown tree types. */
3880 /* Return the number of words required to hold a quantity of TYPE and MODE
3881 when passed as an argument. */
3883 ia64_function_arg_words (tree type, enum machine_mode mode)
3887 if (mode == BLKmode)
3888 words = int_size_in_bytes (type);
3890 words = GET_MODE_SIZE (mode);
3892 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
3895 /* Return the number of registers that should be skipped so the current
3896 argument (described by TYPE and WORDS) will be properly aligned.
3898 Integer and float arguments larger than 8 bytes start at the next
3899 even boundary. Aggregates larger than 8 bytes start at the next
3900 even boundary if the aggregate has 16 byte alignment. Note that
3901 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
3902 but are still to be aligned in registers.
3904 ??? The ABI does not specify how to handle aggregates with
3905 alignment from 9 to 15 bytes, or greater than 16. We handle them
3906 all as if they had 16 byte alignment. Such aggregates can occur
3907 only if gcc extensions are used. */
3909 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
3911 if ((cum->words & 1) == 0)
3915 && TREE_CODE (type) != INTEGER_TYPE
3916 && TREE_CODE (type) != REAL_TYPE)
3917 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
3922 /* Return rtx for register where argument is passed, or zero if it is passed
3924 /* ??? 128-bit quad-precision floats are always passed in general
3928 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
3929 int named, int incoming)
3931 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
3932 int words = ia64_function_arg_words (type, mode);
3933 int offset = ia64_function_arg_offset (cum, type, words);
3934 enum machine_mode hfa_mode = VOIDmode;
3936 /* If all argument slots are used, then it must go on the stack. */
3937 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3940 /* Check for and handle homogeneous FP aggregates. */
3942 hfa_mode = hfa_element_mode (type, 0);
3944 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3945 and unprototyped hfas are passed specially. */
3946 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3950 int fp_regs = cum->fp_regs;
3951 int int_regs = cum->words + offset;
3952 int hfa_size = GET_MODE_SIZE (hfa_mode);
3956 /* If prototyped, pass it in FR regs then GR regs.
3957 If not prototyped, pass it in both FR and GR regs.
3959 If this is an SFmode aggregate, then it is possible to run out of
3960 FR regs while GR regs are still left. In that case, we pass the
3961 remaining part in the GR regs. */
3963 /* Fill the FP regs. We do this always. We stop if we reach the end
3964 of the argument, the last FP register, or the last argument slot. */
3966 byte_size = ((mode == BLKmode)
3967 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3968 args_byte_size = int_regs * UNITS_PER_WORD;
3970 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3971 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
3973 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3974 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
3978 args_byte_size += hfa_size;
3982 /* If no prototype, then the whole thing must go in GR regs. */
3983 if (! cum->prototype)
3985 /* If this is an SFmode aggregate, then we might have some left over
3986 that needs to go in GR regs. */
3987 else if (byte_size != offset)
3988 int_regs += offset / UNITS_PER_WORD;
3990 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
3992 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
3994 enum machine_mode gr_mode = DImode;
3995 unsigned int gr_size;
3997 /* If we have an odd 4 byte hunk because we ran out of FR regs,
3998 then this goes in a GR reg left adjusted/little endian, right
3999 adjusted/big endian. */
4000 /* ??? Currently this is handled wrong, because 4-byte hunks are
4001 always right adjusted/little endian. */
4004 /* If we have an even 4 byte hunk because the aggregate is a
4005 multiple of 4 bytes in size, then this goes in a GR reg right
4006 adjusted/little endian. */
4007 else if (byte_size - offset == 4)
4010 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4011 gen_rtx_REG (gr_mode, (basereg
4015 gr_size = GET_MODE_SIZE (gr_mode);
4017 if (gr_size == UNITS_PER_WORD
4018 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4020 else if (gr_size > UNITS_PER_WORD)
4021 int_regs += gr_size / UNITS_PER_WORD;
4023 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4026 /* Integral and aggregates go in general registers. If we have run out of
4027 FR registers, then FP values must also go in general registers. This can
4028 happen when we have a SFmode HFA. */
4029 else if (mode == TFmode || mode == TCmode
4030 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4032 int byte_size = ((mode == BLKmode)
4033 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4034 if (BYTES_BIG_ENDIAN
4035 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4036 && byte_size < UNITS_PER_WORD
4039 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4040 gen_rtx_REG (DImode,
4041 (basereg + cum->words
4044 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4047 return gen_rtx_REG (mode, basereg + cum->words + offset);
4051 /* If there is a prototype, then FP values go in a FR register when
4052 named, and in a GR register when unnamed. */
4053 else if (cum->prototype)
4056 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4057 /* In big-endian mode, an anonymous SFmode value must be represented
4058 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4059 the value into the high half of the general register. */
4060 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4061 return gen_rtx_PARALLEL (mode,
4063 gen_rtx_EXPR_LIST (VOIDmode,
4064 gen_rtx_REG (DImode, basereg + cum->words + offset),
4067 return gen_rtx_REG (mode, basereg + cum->words + offset);
4069 /* If there is no prototype, then FP values go in both FR and GR
4073 /* See comment above. */
4074 enum machine_mode inner_mode =
4075 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4077 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4078 gen_rtx_REG (mode, (FR_ARG_FIRST
4081 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4082 gen_rtx_REG (inner_mode,
4083 (basereg + cum->words
4087 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4091 /* Return number of bytes, at the beginning of the argument, that must be
4092 put in registers. 0 is the argument is entirely in registers or entirely
4096 ia64_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4097 tree type, bool named ATTRIBUTE_UNUSED)
4099 int words = ia64_function_arg_words (type, mode);
4100 int offset = ia64_function_arg_offset (cum, type, words);
4102 /* If all argument slots are used, then it must go on the stack. */
4103 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4106 /* It doesn't matter whether the argument goes in FR or GR regs. If
4107 it fits within the 8 argument slots, then it goes entirely in
4108 registers. If it extends past the last argument slot, then the rest
4109 goes on the stack. */
4111 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4114 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4117 /* Update CUM to point after this argument. This is patterned after
4118 ia64_function_arg. */
4121 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4122 tree type, int named)
4124 int words = ia64_function_arg_words (type, mode);
4125 int offset = ia64_function_arg_offset (cum, type, words);
4126 enum machine_mode hfa_mode = VOIDmode;
4128 /* If all arg slots are already full, then there is nothing to do. */
4129 if (cum->words >= MAX_ARGUMENT_SLOTS)
4132 cum->words += words + offset;
4134 /* Check for and handle homogeneous FP aggregates. */
4136 hfa_mode = hfa_element_mode (type, 0);
4138 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4139 and unprototyped hfas are passed specially. */
4140 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4142 int fp_regs = cum->fp_regs;
4143 /* This is the original value of cum->words + offset. */
4144 int int_regs = cum->words - words;
4145 int hfa_size = GET_MODE_SIZE (hfa_mode);
4149 /* If prototyped, pass it in FR regs then GR regs.
4150 If not prototyped, pass it in both FR and GR regs.
4152 If this is an SFmode aggregate, then it is possible to run out of
4153 FR regs while GR regs are still left. In that case, we pass the
4154 remaining part in the GR regs. */
4156 /* Fill the FP regs. We do this always. We stop if we reach the end
4157 of the argument, the last FP register, or the last argument slot. */
4159 byte_size = ((mode == BLKmode)
4160 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4161 args_byte_size = int_regs * UNITS_PER_WORD;
4163 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4164 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4167 args_byte_size += hfa_size;
4171 cum->fp_regs = fp_regs;
4174 /* Integral and aggregates go in general registers. So do TFmode FP values.
4175 If we have run out of FR registers, then other FP values must also go in
4176 general registers. This can happen when we have a SFmode HFA. */
4177 else if (mode == TFmode || mode == TCmode
4178 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4179 cum->int_regs = cum->words;
4181 /* If there is a prototype, then FP values go in a FR register when
4182 named, and in a GR register when unnamed. */
4183 else if (cum->prototype)
4186 cum->int_regs = cum->words;
4188 /* ??? Complex types should not reach here. */
4189 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4191 /* If there is no prototype, then FP values go in both FR and GR
4195 /* ??? Complex types should not reach here. */
4196 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4197 cum->int_regs = cum->words;
4201 /* Arguments with alignment larger than 8 bytes start at the next even
4202 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
4203 even though their normal alignment is 8 bytes. See ia64_function_arg. */
4206 ia64_function_arg_boundary (enum machine_mode mode, tree type)
4209 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
4210 return PARM_BOUNDARY * 2;
4214 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
4215 return PARM_BOUNDARY * 2;
4217 return PARM_BOUNDARY;
4220 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
4221 return PARM_BOUNDARY * 2;
4223 return PARM_BOUNDARY;
4226 /* True if it is OK to do sibling call optimization for the specified
4227 call expression EXP. DECL will be the called function, or NULL if
4228 this is an indirect call. */
4230 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4232 /* We can't perform a sibcall if the current function has the syscall_linkage
4234 if (lookup_attribute ("syscall_linkage",
4235 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
4238 /* We must always return with our current GP. This means we can
4239 only sibcall to functions defined in the current module. */
4240 return decl && (*targetm.binds_local_p) (decl);
4244 /* Implement va_arg. */
4247 ia64_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
4249 /* Variable sized types are passed by reference. */
4250 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
4252 tree ptrtype = build_pointer_type (type);
4253 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
4254 return build_va_arg_indirect_ref (addr);
4257 /* Aggregate arguments with alignment larger than 8 bytes start at
4258 the next even boundary. Integer and floating point arguments
4259 do so if they are larger than 8 bytes, whether or not they are
4260 also aligned larger than 8 bytes. */
4261 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
4262 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
4264 tree t = build2 (PLUS_EXPR, TREE_TYPE (valist), valist,
4265 build_int_cst (NULL_TREE, 2 * UNITS_PER_WORD - 1));
4266 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4267 build_int_cst (NULL_TREE, -2 * UNITS_PER_WORD));
4268 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
4269 gimplify_and_add (t, pre_p);
4272 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4275 /* Return 1 if function return value returned in memory. Return 0 if it is
4279 ia64_return_in_memory (tree valtype, tree fntype ATTRIBUTE_UNUSED)
4281 enum machine_mode mode;
4282 enum machine_mode hfa_mode;
4283 HOST_WIDE_INT byte_size;
4285 mode = TYPE_MODE (valtype);
4286 byte_size = GET_MODE_SIZE (mode);
4287 if (mode == BLKmode)
4289 byte_size = int_size_in_bytes (valtype);
4294 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4296 hfa_mode = hfa_element_mode (valtype, 0);
4297 if (hfa_mode != VOIDmode)
4299 int hfa_size = GET_MODE_SIZE (hfa_mode);
4301 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
4306 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
4312 /* Return rtx for register that holds the function return value. */
4315 ia64_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
4317 enum machine_mode mode;
4318 enum machine_mode hfa_mode;
4320 mode = TYPE_MODE (valtype);
4321 hfa_mode = hfa_element_mode (valtype, 0);
4323 if (hfa_mode != VOIDmode)
4331 hfa_size = GET_MODE_SIZE (hfa_mode);
4332 byte_size = ((mode == BLKmode)
4333 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
4335 for (i = 0; offset < byte_size; i++)
4337 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4338 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
4342 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4344 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
4345 return gen_rtx_REG (mode, FR_ARG_FIRST);
4348 bool need_parallel = false;
4350 /* In big-endian mode, we need to manage the layout of aggregates
4351 in the registers so that we get the bits properly aligned in
4352 the highpart of the registers. */
4353 if (BYTES_BIG_ENDIAN
4354 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
4355 need_parallel = true;
4357 /* Something like struct S { long double x; char a[0] } is not an
4358 HFA structure, and therefore doesn't go in fp registers. But
4359 the middle-end will give it XFmode anyway, and XFmode values
4360 don't normally fit in integer registers. So we need to smuggle
4361 the value inside a parallel. */
4362 else if (mode == XFmode || mode == XCmode || mode == RFmode)
4363 need_parallel = true;
4373 bytesize = int_size_in_bytes (valtype);
4374 /* An empty PARALLEL is invalid here, but the return value
4375 doesn't matter for empty structs. */
4377 return gen_rtx_REG (mode, GR_RET_FIRST);
4378 for (i = 0; offset < bytesize; i++)
4380 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4381 gen_rtx_REG (DImode,
4384 offset += UNITS_PER_WORD;
4386 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4389 return gen_rtx_REG (mode, GR_RET_FIRST);
4393 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4394 We need to emit DTP-relative relocations. */
4397 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
4399 gcc_assert (size == 4 || size == 8);
4401 fputs ("\tdata4.ua\t@dtprel(", file);
4403 fputs ("\tdata8.ua\t@dtprel(", file);
4404 output_addr_const (file, x);
4408 /* Print a memory address as an operand to reference that memory location. */
4410 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
4411 also call this from ia64_print_operand for memory addresses. */
4414 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
4415 rtx address ATTRIBUTE_UNUSED)
4419 /* Print an operand to an assembler instruction.
4420 C Swap and print a comparison operator.
4421 D Print an FP comparison operator.
4422 E Print 32 - constant, for SImode shifts as extract.
4423 e Print 64 - constant, for DImode rotates.
4424 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4425 a floating point register emitted normally.
4426 I Invert a predicate register by adding 1.
4427 J Select the proper predicate register for a condition.
4428 j Select the inverse predicate register for a condition.
4429 O Append .acq for volatile load.
4430 P Postincrement of a MEM.
4431 Q Append .rel for volatile store.
4432 S Shift amount for shladd instruction.
4433 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
4434 for Intel assembler.
4435 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
4436 for Intel assembler.
4437 X A pair of floating point registers.
4438 r Print register name, or constant 0 as r0. HP compatibility for
4440 v Print vector constant value as an 8-byte integer value. */
4443 ia64_print_operand (FILE * file, rtx x, int code)
4450 /* Handled below. */
4455 enum rtx_code c = swap_condition (GET_CODE (x));
4456 fputs (GET_RTX_NAME (c), file);
4461 switch (GET_CODE (x))
4473 str = GET_RTX_NAME (GET_CODE (x));
4480 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
4484 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
4488 if (x == CONST0_RTX (GET_MODE (x)))
4489 str = reg_names [FR_REG (0)];
4490 else if (x == CONST1_RTX (GET_MODE (x)))
4491 str = reg_names [FR_REG (1)];
4494 gcc_assert (GET_CODE (x) == REG);
4495 str = reg_names [REGNO (x)];
4501 fputs (reg_names [REGNO (x) + 1], file);
4507 unsigned int regno = REGNO (XEXP (x, 0));
4508 if (GET_CODE (x) == EQ)
4512 fputs (reg_names [regno], file);
4517 if (MEM_VOLATILE_P (x))
4518 fputs(".acq", file);
4523 HOST_WIDE_INT value;
4525 switch (GET_CODE (XEXP (x, 0)))
4531 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
4532 if (GET_CODE (x) == CONST_INT)
4536 gcc_assert (GET_CODE (x) == REG);
4537 fprintf (file, ", %s", reg_names[REGNO (x)]);
4543 value = GET_MODE_SIZE (GET_MODE (x));
4547 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
4551 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
4556 if (MEM_VOLATILE_P (x))
4557 fputs(".rel", file);
4561 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4565 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4567 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
4573 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4575 const char *prefix = "0x";
4576 if (INTVAL (x) & 0x80000000)
4578 fprintf (file, "0xffffffff");
4581 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
4588 unsigned int regno = REGNO (x);
4589 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
4594 /* If this operand is the constant zero, write it as register zero.
4595 Any register, zero, or CONST_INT value is OK here. */
4596 if (GET_CODE (x) == REG)
4597 fputs (reg_names[REGNO (x)], file);
4598 else if (x == CONST0_RTX (GET_MODE (x)))
4600 else if (GET_CODE (x) == CONST_INT)
4601 output_addr_const (file, x);
4603 output_operand_lossage ("invalid %%r value");
4607 gcc_assert (GET_CODE (x) == CONST_VECTOR);
4608 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
4615 /* For conditional branches, returns or calls, substitute
4616 sptk, dptk, dpnt, or spnt for %s. */
4617 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
4620 int pred_val = INTVAL (XEXP (x, 0));
4622 /* Guess top and bottom 10% statically predicted. */
4623 if (pred_val < REG_BR_PROB_BASE / 50)
4625 else if (pred_val < REG_BR_PROB_BASE / 2)
4627 else if (pred_val < REG_BR_PROB_BASE / 100 * 98)
4632 else if (GET_CODE (current_output_insn) == CALL_INSN)
4637 fputs (which, file);
4642 x = current_insn_predicate;
4645 unsigned int regno = REGNO (XEXP (x, 0));
4646 if (GET_CODE (x) == EQ)
4648 fprintf (file, "(%s) ", reg_names [regno]);
4653 output_operand_lossage ("ia64_print_operand: unknown code");
4657 switch (GET_CODE (x))
4659 /* This happens for the spill/restore instructions. */
4664 /* ... fall through ... */
4667 fputs (reg_names [REGNO (x)], file);
4672 rtx addr = XEXP (x, 0);
4673 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
4674 addr = XEXP (addr, 0);
4675 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
4680 output_addr_const (file, x);
4687 /* Compute a (partial) cost for rtx X. Return true if the complete
4688 cost has been computed, and false if subexpressions should be
4689 scanned. In either case, *TOTAL contains the cost result. */
4690 /* ??? This is incomplete. */
4693 ia64_rtx_costs (rtx x, int code, int outer_code, int *total)
4701 *total = CONST_OK_FOR_J (INTVAL (x)) ? 0 : COSTS_N_INSNS (1);
4704 if (CONST_OK_FOR_I (INTVAL (x)))
4706 else if (CONST_OK_FOR_J (INTVAL (x)))
4709 *total = COSTS_N_INSNS (1);
4712 if (CONST_OK_FOR_K (INTVAL (x)) || CONST_OK_FOR_L (INTVAL (x)))
4715 *total = COSTS_N_INSNS (1);
4720 *total = COSTS_N_INSNS (1);
4726 *total = COSTS_N_INSNS (3);
4730 /* For multiplies wider than HImode, we have to go to the FPU,
4731 which normally involves copies. Plus there's the latency
4732 of the multiply itself, and the latency of the instructions to
4733 transfer integer regs to FP regs. */
4734 /* ??? Check for FP mode. */
4735 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
4736 *total = COSTS_N_INSNS (10);
4738 *total = COSTS_N_INSNS (2);
4746 *total = COSTS_N_INSNS (1);
4753 /* We make divide expensive, so that divide-by-constant will be
4754 optimized to a multiply. */
4755 *total = COSTS_N_INSNS (60);
4763 /* Calculate the cost of moving data from a register in class FROM to
4764 one in class TO, using MODE. */
4767 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
4770 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
4771 if (to == ADDL_REGS)
4773 if (from == ADDL_REGS)
4776 /* All costs are symmetric, so reduce cases by putting the
4777 lower number class as the destination. */
4780 enum reg_class tmp = to;
4781 to = from, from = tmp;
4784 /* Moving from FR<->GR in XFmode must be more expensive than 2,
4785 so that we get secondary memory reloads. Between FR_REGS,
4786 we have to make this at least as expensive as MEMORY_MOVE_COST
4787 to avoid spectacularly poor register class preferencing. */
4788 if (mode == XFmode || mode == RFmode)
4790 if (to != GR_REGS || from != GR_REGS)
4791 return MEMORY_MOVE_COST (mode, to, 0);
4799 /* Moving between PR registers takes two insns. */
4800 if (from == PR_REGS)
4802 /* Moving between PR and anything but GR is impossible. */
4803 if (from != GR_REGS)
4804 return MEMORY_MOVE_COST (mode, to, 0);
4808 /* Moving between BR and anything but GR is impossible. */
4809 if (from != GR_REGS && from != GR_AND_BR_REGS)
4810 return MEMORY_MOVE_COST (mode, to, 0);
4815 /* Moving between AR and anything but GR is impossible. */
4816 if (from != GR_REGS)
4817 return MEMORY_MOVE_COST (mode, to, 0);
4823 case GR_AND_FR_REGS:
4824 case GR_AND_BR_REGS:
4835 /* Implement PREFERRED_RELOAD_CLASS. Place additional restrictions on CLASS
4836 to use when copying X into that class. */
4839 ia64_preferred_reload_class (rtx x, enum reg_class class)
4845 /* Don't allow volatile mem reloads into floating point registers.
4846 This is defined to force reload to choose the r/m case instead
4847 of the f/f case when reloading (set (reg fX) (mem/v)). */
4848 if (MEM_P (x) && MEM_VOLATILE_P (x))
4851 /* Force all unrecognized constants into the constant pool. */
4869 /* This function returns the register class required for a secondary
4870 register when copying between one of the registers in CLASS, and X,
4871 using MODE. A return value of NO_REGS means that no secondary register
4875 ia64_secondary_reload_class (enum reg_class class,
4876 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
4880 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
4881 regno = true_regnum (x);
4888 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
4889 interaction. We end up with two pseudos with overlapping lifetimes
4890 both of which are equiv to the same constant, and both which need
4891 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
4892 changes depending on the path length, which means the qty_first_reg
4893 check in make_regs_eqv can give different answers at different times.
4894 At some point I'll probably need a reload_indi pattern to handle
4897 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
4898 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
4899 non-general registers for good measure. */
4900 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
4903 /* This is needed if a pseudo used as a call_operand gets spilled to a
4905 if (GET_CODE (x) == MEM)
4911 /* Need to go through general registers to get to other class regs. */
4912 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
4915 /* This can happen when a paradoxical subreg is an operand to the
4917 /* ??? This shouldn't be necessary after instruction scheduling is
4918 enabled, because paradoxical subregs are not accepted by
4919 register_operand when INSN_SCHEDULING is defined. Or alternatively,
4920 stop the paradoxical subreg stupidity in the *_operand functions
4922 if (GET_CODE (x) == MEM
4923 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
4924 || GET_MODE (x) == QImode))
4927 /* This can happen because of the ior/and/etc patterns that accept FP
4928 registers as operands. If the third operand is a constant, then it
4929 needs to be reloaded into a FP register. */
4930 if (GET_CODE (x) == CONST_INT)
4933 /* This can happen because of register elimination in a muldi3 insn.
4934 E.g. `26107 * (unsigned long)&u'. */
4935 if (GET_CODE (x) == PLUS)
4940 /* ??? This happens if we cse/gcse a BImode value across a call,
4941 and the function has a nonlocal goto. This is because global
4942 does not allocate call crossing pseudos to hard registers when
4943 current_function_has_nonlocal_goto is true. This is relatively
4944 common for C++ programs that use exceptions. To reproduce,
4945 return NO_REGS and compile libstdc++. */
4946 if (GET_CODE (x) == MEM)
4949 /* This can happen when we take a BImode subreg of a DImode value,
4950 and that DImode value winds up in some non-GR register. */
4951 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
4963 /* Emit text to declare externally defined variables and functions, because
4964 the Intel assembler does not support undefined externals. */
4967 ia64_asm_output_external (FILE *file, tree decl, const char *name)
4969 int save_referenced;
4971 /* GNU as does not need anything here, but the HP linker does need
4972 something for external functions. */
4976 || TREE_CODE (decl) != FUNCTION_DECL
4977 || strstr (name, "__builtin_") == name))
4980 /* ??? The Intel assembler creates a reference that needs to be satisfied by
4981 the linker when we do this, so we need to be careful not to do this for
4982 builtin functions which have no library equivalent. Unfortunately, we
4983 can't tell here whether or not a function will actually be called by
4984 expand_expr, so we pull in library functions even if we may not need
4986 if (! strcmp (name, "__builtin_next_arg")
4987 || ! strcmp (name, "alloca")
4988 || ! strcmp (name, "__builtin_constant_p")
4989 || ! strcmp (name, "__builtin_args_info"))
4993 ia64_hpux_add_extern_decl (decl);
4996 /* assemble_name will set TREE_SYMBOL_REFERENCED, so we must save and
4998 save_referenced = TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl));
4999 if (TREE_CODE (decl) == FUNCTION_DECL)
5000 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
5001 (*targetm.asm_out.globalize_label) (file, name);
5002 TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)) = save_referenced;
5006 /* Parse the -mfixed-range= option string. */
5009 fix_range (const char *const_str)
5012 char *str, *dash, *comma;
5014 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5015 REG2 are either register names or register numbers. The effect
5016 of this option is to mark the registers in the range from REG1 to
5017 REG2 as ``fixed'' so they won't be used by the compiler. This is
5018 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5020 i = strlen (const_str);
5021 str = (char *) alloca (i + 1);
5022 memcpy (str, const_str, i + 1);
5026 dash = strchr (str, '-');
5029 warning (0, "value of -mfixed-range must have form REG1-REG2");
5034 comma = strchr (dash + 1, ',');
5038 first = decode_reg_name (str);
5041 warning (0, "unknown register name: %s", str);
5045 last = decode_reg_name (dash + 1);
5048 warning (0, "unknown register name: %s", dash + 1);
5056 warning (0, "%s-%s is an empty range", str, dash + 1);
5060 for (i = first; i <= last; ++i)
5061 fixed_regs[i] = call_used_regs[i] = 1;
5071 /* Implement TARGET_HANDLE_OPTION. */
5074 ia64_handle_option (size_t code, const char *arg, int value)
5078 case OPT_mfixed_range_:
5082 case OPT_mtls_size_:
5083 if (value != 14 && value != 22 && value != 64)
5084 error ("bad value %<%s%> for -mtls-size= switch", arg);
5091 const char *name; /* processor name or nickname. */
5092 enum processor_type processor;
5094 const processor_alias_table[] =
5096 {"itanium", PROCESSOR_ITANIUM},
5097 {"itanium1", PROCESSOR_ITANIUM},
5098 {"merced", PROCESSOR_ITANIUM},
5099 {"itanium2", PROCESSOR_ITANIUM2},
5100 {"mckinley", PROCESSOR_ITANIUM2},
5102 int const pta_size = ARRAY_SIZE (processor_alias_table);
5105 for (i = 0; i < pta_size; i++)
5106 if (!strcmp (arg, processor_alias_table[i].name))
5108 ia64_tune = processor_alias_table[i].processor;
5112 error ("bad value %<%s%> for -mtune= switch", arg);
5121 /* Implement OVERRIDE_OPTIONS. */
5124 ia64_override_options (void)
5126 if (TARGET_AUTO_PIC)
5127 target_flags |= MASK_CONST_GP;
5129 if (TARGET_INLINE_SQRT == INL_MIN_LAT)
5131 warning (0, "not yet implemented: latency-optimized inline square root");
5132 TARGET_INLINE_SQRT = INL_MAX_THR;
5135 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
5136 flag_schedule_insns_after_reload = 0;
5138 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
5140 init_machine_status = ia64_init_machine_status;
5143 static struct machine_function *
5144 ia64_init_machine_status (void)
5146 return ggc_alloc_cleared (sizeof (struct machine_function));
5149 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
5150 static enum attr_type ia64_safe_type (rtx);
5152 static enum attr_itanium_class
5153 ia64_safe_itanium_class (rtx insn)
5155 if (recog_memoized (insn) >= 0)
5156 return get_attr_itanium_class (insn);
5158 return ITANIUM_CLASS_UNKNOWN;
5161 static enum attr_type
5162 ia64_safe_type (rtx insn)
5164 if (recog_memoized (insn) >= 0)
5165 return get_attr_type (insn);
5167 return TYPE_UNKNOWN;
5170 /* The following collection of routines emit instruction group stop bits as
5171 necessary to avoid dependencies. */
5173 /* Need to track some additional registers as far as serialization is
5174 concerned so we can properly handle br.call and br.ret. We could
5175 make these registers visible to gcc, but since these registers are
5176 never explicitly used in gcc generated code, it seems wasteful to
5177 do so (plus it would make the call and return patterns needlessly
5179 #define REG_RP (BR_REG (0))
5180 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
5181 /* This is used for volatile asms which may require a stop bit immediately
5182 before and after them. */
5183 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
5184 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
5185 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
5187 /* For each register, we keep track of how it has been written in the
5188 current instruction group.
5190 If a register is written unconditionally (no qualifying predicate),
5191 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
5193 If a register is written if its qualifying predicate P is true, we
5194 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
5195 may be written again by the complement of P (P^1) and when this happens,
5196 WRITE_COUNT gets set to 2.
5198 The result of this is that whenever an insn attempts to write a register
5199 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
5201 If a predicate register is written by a floating-point insn, we set
5202 WRITTEN_BY_FP to true.
5204 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
5205 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
5207 struct reg_write_state
5209 unsigned int write_count : 2;
5210 unsigned int first_pred : 16;
5211 unsigned int written_by_fp : 1;
5212 unsigned int written_by_and : 1;
5213 unsigned int written_by_or : 1;
5216 /* Cumulative info for the current instruction group. */
5217 struct reg_write_state rws_sum[NUM_REGS];
5218 /* Info for the current instruction. This gets copied to rws_sum after a
5219 stop bit is emitted. */
5220 struct reg_write_state rws_insn[NUM_REGS];
5222 /* Indicates whether this is the first instruction after a stop bit,
5223 in which case we don't need another stop bit. Without this,
5224 ia64_variable_issue will die when scheduling an alloc. */
5225 static int first_instruction;
5227 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
5228 RTL for one instruction. */
5231 unsigned int is_write : 1; /* Is register being written? */
5232 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
5233 unsigned int is_branch : 1; /* Is register used as part of a branch? */
5234 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
5235 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
5236 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
5239 static void rws_update (struct reg_write_state *, int, struct reg_flags, int);
5240 static int rws_access_regno (int, struct reg_flags, int);
5241 static int rws_access_reg (rtx, struct reg_flags, int);
5242 static void update_set_flags (rtx, struct reg_flags *);
5243 static int set_src_needs_barrier (rtx, struct reg_flags, int);
5244 static int rtx_needs_barrier (rtx, struct reg_flags, int);
5245 static void init_insn_group_barriers (void);
5246 static int group_barrier_needed (rtx);
5247 static int safe_group_barrier_needed (rtx);
5249 /* Update *RWS for REGNO, which is being written by the current instruction,
5250 with predicate PRED, and associated register flags in FLAGS. */
5253 rws_update (struct reg_write_state *rws, int regno, struct reg_flags flags, int pred)
5256 rws[regno].write_count++;
5258 rws[regno].write_count = 2;
5259 rws[regno].written_by_fp |= flags.is_fp;
5260 /* ??? Not tracking and/or across differing predicates. */
5261 rws[regno].written_by_and = flags.is_and;
5262 rws[regno].written_by_or = flags.is_or;
5263 rws[regno].first_pred = pred;
5266 /* Handle an access to register REGNO of type FLAGS using predicate register
5267 PRED. Update rws_insn and rws_sum arrays. Return 1 if this access creates
5268 a dependency with an earlier instruction in the same group. */
5271 rws_access_regno (int regno, struct reg_flags flags, int pred)
5273 int need_barrier = 0;
5275 gcc_assert (regno < NUM_REGS);
5277 if (! PR_REGNO_P (regno))
5278 flags.is_and = flags.is_or = 0;
5284 /* One insn writes same reg multiple times? */
5285 gcc_assert (!rws_insn[regno].write_count);
5287 /* Update info for current instruction. */
5288 rws_update (rws_insn, regno, flags, pred);
5289 write_count = rws_sum[regno].write_count;
5291 switch (write_count)
5294 /* The register has not been written yet. */
5295 rws_update (rws_sum, regno, flags, pred);
5299 /* The register has been written via a predicate. If this is
5300 not a complementary predicate, then we need a barrier. */
5301 /* ??? This assumes that P and P+1 are always complementary
5302 predicates for P even. */
5303 if (flags.is_and && rws_sum[regno].written_by_and)
5305 else if (flags.is_or && rws_sum[regno].written_by_or)
5307 else if ((rws_sum[regno].first_pred ^ 1) != pred)
5309 rws_update (rws_sum, regno, flags, pred);
5313 /* The register has been unconditionally written already. We
5315 if (flags.is_and && rws_sum[regno].written_by_and)
5317 else if (flags.is_or && rws_sum[regno].written_by_or)
5321 rws_sum[regno].written_by_and = flags.is_and;
5322 rws_sum[regno].written_by_or = flags.is_or;
5331 if (flags.is_branch)
5333 /* Branches have several RAW exceptions that allow to avoid
5336 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
5337 /* RAW dependencies on branch regs are permissible as long
5338 as the writer is a non-branch instruction. Since we
5339 never generate code that uses a branch register written
5340 by a branch instruction, handling this case is
5344 if (REGNO_REG_CLASS (regno) == PR_REGS
5345 && ! rws_sum[regno].written_by_fp)
5346 /* The predicates of a branch are available within the
5347 same insn group as long as the predicate was written by
5348 something other than a floating-point instruction. */
5352 if (flags.is_and && rws_sum[regno].written_by_and)
5354 if (flags.is_or && rws_sum[regno].written_by_or)
5357 switch (rws_sum[regno].write_count)
5360 /* The register has not been written yet. */
5364 /* The register has been written via a predicate. If this is
5365 not a complementary predicate, then we need a barrier. */
5366 /* ??? This assumes that P and P+1 are always complementary
5367 predicates for P even. */
5368 if ((rws_sum[regno].first_pred ^ 1) != pred)
5373 /* The register has been unconditionally written already. We
5383 return need_barrier;
5387 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
5389 int regno = REGNO (reg);
5390 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
5393 return rws_access_regno (regno, flags, pred);
5396 int need_barrier = 0;
5398 need_barrier |= rws_access_regno (regno + n, flags, pred);
5399 return need_barrier;
5403 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
5404 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
5407 update_set_flags (rtx x, struct reg_flags *pflags)
5409 rtx src = SET_SRC (x);
5411 switch (GET_CODE (src))
5417 /* There are three cases here:
5418 (1) The destination is (pc), in which case this is a branch,
5419 nothing here applies.
5420 (2) The destination is ar.lc, in which case this is a
5421 doloop_end_internal,
5422 (3) The destination is an fp register, in which case this is
5423 an fselect instruction.
5424 In all cases, nothing we do in this function applies. */
5428 if (COMPARISON_P (src)
5429 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
5430 /* Set pflags->is_fp to 1 so that we know we're dealing
5431 with a floating point comparison when processing the
5432 destination of the SET. */
5435 /* Discover if this is a parallel comparison. We only handle
5436 and.orcm and or.andcm at present, since we must retain a
5437 strict inverse on the predicate pair. */
5438 else if (GET_CODE (src) == AND)
5440 else if (GET_CODE (src) == IOR)
5447 /* Subroutine of rtx_needs_barrier; this function determines whether the
5448 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
5449 are as in rtx_needs_barrier. COND is an rtx that holds the condition
5453 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
5455 int need_barrier = 0;
5457 rtx src = SET_SRC (x);
5459 if (GET_CODE (src) == CALL)
5460 /* We don't need to worry about the result registers that
5461 get written by subroutine call. */
5462 return rtx_needs_barrier (src, flags, pred);
5463 else if (SET_DEST (x) == pc_rtx)
5465 /* X is a conditional branch. */
5466 /* ??? This seems redundant, as the caller sets this bit for
5468 flags.is_branch = 1;
5469 return rtx_needs_barrier (src, flags, pred);
5472 need_barrier = rtx_needs_barrier (src, flags, pred);
5475 if (GET_CODE (dst) == ZERO_EXTRACT)
5477 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
5478 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
5479 dst = XEXP (dst, 0);
5481 return need_barrier;
5484 /* Handle an access to rtx X of type FLAGS using predicate register
5485 PRED. Return 1 if this access creates a dependency with an earlier
5486 instruction in the same group. */
5489 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
5492 int is_complemented = 0;
5493 int need_barrier = 0;
5494 const char *format_ptr;
5495 struct reg_flags new_flags;
5503 switch (GET_CODE (x))
5506 update_set_flags (x, &new_flags);
5507 need_barrier = set_src_needs_barrier (x, new_flags, pred);
5508 if (GET_CODE (SET_SRC (x)) != CALL)
5510 new_flags.is_write = 1;
5511 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
5516 new_flags.is_write = 0;
5517 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5519 /* Avoid multiple register writes, in case this is a pattern with
5520 multiple CALL rtx. This avoids a failure in rws_access_reg. */
5521 if (! flags.is_sibcall && ! rws_insn[REG_AR_CFM].write_count)
5523 new_flags.is_write = 1;
5524 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
5525 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
5526 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5531 /* X is a predicated instruction. */
5533 cond = COND_EXEC_TEST (x);
5535 need_barrier = rtx_needs_barrier (cond, flags, 0);
5537 if (GET_CODE (cond) == EQ)
5538 is_complemented = 1;
5539 cond = XEXP (cond, 0);
5540 gcc_assert (GET_CODE (cond) == REG
5541 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
5542 pred = REGNO (cond);
5543 if (is_complemented)
5546 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
5547 return need_barrier;
5551 /* Clobber & use are for earlier compiler-phases only. */
5556 /* We always emit stop bits for traditional asms. We emit stop bits
5557 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
5558 if (GET_CODE (x) != ASM_OPERANDS
5559 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
5561 /* Avoid writing the register multiple times if we have multiple
5562 asm outputs. This avoids a failure in rws_access_reg. */
5563 if (! rws_insn[REG_VOLATILE].write_count)
5565 new_flags.is_write = 1;
5566 rws_access_regno (REG_VOLATILE, new_flags, pred);
5571 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
5572 We cannot just fall through here since then we would be confused
5573 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
5574 traditional asms unlike their normal usage. */
5576 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
5577 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
5582 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5584 rtx pat = XVECEXP (x, 0, i);
5585 switch (GET_CODE (pat))
5588 update_set_flags (pat, &new_flags);
5589 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
5595 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5606 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5608 rtx pat = XVECEXP (x, 0, i);
5609 if (GET_CODE (pat) == SET)
5611 if (GET_CODE (SET_SRC (pat)) != CALL)
5613 new_flags.is_write = 1;
5614 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
5618 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
5619 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5624 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
5627 if (REGNO (x) == AR_UNAT_REGNUM)
5629 for (i = 0; i < 64; ++i)
5630 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
5633 need_barrier = rws_access_reg (x, flags, pred);
5637 /* Find the regs used in memory address computation. */
5638 new_flags.is_write = 0;
5639 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5642 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
5643 case SYMBOL_REF: case LABEL_REF: case CONST:
5646 /* Operators with side-effects. */
5647 case POST_INC: case POST_DEC:
5648 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5650 new_flags.is_write = 0;
5651 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5652 new_flags.is_write = 1;
5653 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5657 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5659 new_flags.is_write = 0;
5660 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5661 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5662 new_flags.is_write = 1;
5663 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5666 /* Handle common unary and binary ops for efficiency. */
5667 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
5668 case MOD: case UDIV: case UMOD: case AND: case IOR:
5669 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
5670 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
5671 case NE: case EQ: case GE: case GT: case LE:
5672 case LT: case GEU: case GTU: case LEU: case LTU:
5673 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5674 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5677 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
5678 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
5679 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
5680 case SQRT: case FFS: case POPCOUNT:
5681 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5685 /* VEC_SELECT's second argument is a PARALLEL with integers that
5686 describe the elements selected. On ia64, those integers are
5687 always constants. Avoid walking the PARALLEL so that we don't
5688 get confused with "normal" parallels and then die. */
5689 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5693 switch (XINT (x, 1))
5695 case UNSPEC_LTOFF_DTPMOD:
5696 case UNSPEC_LTOFF_DTPREL:
5698 case UNSPEC_LTOFF_TPREL:
5700 case UNSPEC_PRED_REL_MUTEX:
5701 case UNSPEC_PIC_CALL:
5703 case UNSPEC_FETCHADD_ACQ:
5704 case UNSPEC_BSP_VALUE:
5705 case UNSPEC_FLUSHRS:
5706 case UNSPEC_BUNDLE_SELECTOR:
5709 case UNSPEC_GR_SPILL:
5710 case UNSPEC_GR_RESTORE:
5712 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
5713 HOST_WIDE_INT bit = (offset >> 3) & 63;
5715 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5716 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
5717 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
5722 case UNSPEC_FR_SPILL:
5723 case UNSPEC_FR_RESTORE:
5724 case UNSPEC_GETF_EXP:
5725 case UNSPEC_SETF_EXP:
5727 case UNSPEC_FR_SQRT_RECIP_APPROX:
5728 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5731 case UNSPEC_FR_RECIP_APPROX:
5733 case UNSPEC_COPYSIGN:
5734 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5735 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5738 case UNSPEC_CMPXCHG_ACQ:
5739 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5740 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
5748 case UNSPEC_VOLATILE:
5749 switch (XINT (x, 1))
5752 /* Alloc must always be the first instruction of a group.
5753 We force this by always returning true. */
5754 /* ??? We might get better scheduling if we explicitly check for
5755 input/local/output register dependencies, and modify the
5756 scheduler so that alloc is always reordered to the start of
5757 the current group. We could then eliminate all of the
5758 first_instruction code. */
5759 rws_access_regno (AR_PFS_REGNUM, flags, pred);
5761 new_flags.is_write = 1;
5762 rws_access_regno (REG_AR_CFM, new_flags, pred);
5765 case UNSPECV_SET_BSP:
5769 case UNSPECV_BLOCKAGE:
5770 case UNSPECV_INSN_GROUP_BARRIER:
5772 case UNSPECV_PSAC_ALL:
5773 case UNSPECV_PSAC_NORMAL:
5782 new_flags.is_write = 0;
5783 need_barrier = rws_access_regno (REG_RP, flags, pred);
5784 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
5786 new_flags.is_write = 1;
5787 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5788 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5792 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
5793 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5794 switch (format_ptr[i])
5796 case '0': /* unused field */
5797 case 'i': /* integer */
5798 case 'n': /* note */
5799 case 'w': /* wide integer */
5800 case 's': /* pointer to string */
5801 case 'S': /* optional pointer to string */
5805 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
5810 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
5811 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
5820 return need_barrier;
5823 /* Clear out the state for group_barrier_needed at the start of a
5824 sequence of insns. */
5827 init_insn_group_barriers (void)
5829 memset (rws_sum, 0, sizeof (rws_sum));
5830 first_instruction = 1;
5833 /* Given the current state, determine whether a group barrier (a stop bit) is
5834 necessary before INSN. Return nonzero if so. This modifies the state to
5835 include the effects of INSN as a side-effect. */
5838 group_barrier_needed (rtx insn)
5841 int need_barrier = 0;
5842 struct reg_flags flags;
5844 memset (&flags, 0, sizeof (flags));
5845 switch (GET_CODE (insn))
5851 /* A barrier doesn't imply an instruction group boundary. */
5855 memset (rws_insn, 0, sizeof (rws_insn));
5859 flags.is_branch = 1;
5860 flags.is_sibcall = SIBLING_CALL_P (insn);
5861 memset (rws_insn, 0, sizeof (rws_insn));
5863 /* Don't bundle a call following another call. */
5864 if ((pat = prev_active_insn (insn))
5865 && GET_CODE (pat) == CALL_INSN)
5871 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
5875 flags.is_branch = 1;
5877 /* Don't bundle a jump following a call. */
5878 if ((pat = prev_active_insn (insn))
5879 && GET_CODE (pat) == CALL_INSN)
5887 if (GET_CODE (PATTERN (insn)) == USE
5888 || GET_CODE (PATTERN (insn)) == CLOBBER)
5889 /* Don't care about USE and CLOBBER "insns"---those are used to
5890 indicate to the optimizer that it shouldn't get rid of
5891 certain operations. */
5894 pat = PATTERN (insn);
5896 /* Ug. Hack hacks hacked elsewhere. */
5897 switch (recog_memoized (insn))
5899 /* We play dependency tricks with the epilogue in order
5900 to get proper schedules. Undo this for dv analysis. */
5901 case CODE_FOR_epilogue_deallocate_stack:
5902 case CODE_FOR_prologue_allocate_stack:
5903 pat = XVECEXP (pat, 0, 0);
5906 /* The pattern we use for br.cloop confuses the code above.
5907 The second element of the vector is representative. */
5908 case CODE_FOR_doloop_end_internal:
5909 pat = XVECEXP (pat, 0, 1);
5912 /* Doesn't generate code. */
5913 case CODE_FOR_pred_rel_mutex:
5914 case CODE_FOR_prologue_use:
5921 memset (rws_insn, 0, sizeof (rws_insn));
5922 need_barrier = rtx_needs_barrier (pat, flags, 0);
5924 /* Check to see if the previous instruction was a volatile
5927 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
5934 if (first_instruction && INSN_P (insn)
5935 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
5936 && GET_CODE (PATTERN (insn)) != USE
5937 && GET_CODE (PATTERN (insn)) != CLOBBER)
5940 first_instruction = 0;
5943 return need_barrier;
5946 /* Like group_barrier_needed, but do not clobber the current state. */
5949 safe_group_barrier_needed (rtx insn)
5951 struct reg_write_state rws_saved[NUM_REGS];
5952 int saved_first_instruction;
5955 memcpy (rws_saved, rws_sum, NUM_REGS * sizeof *rws_saved);
5956 saved_first_instruction = first_instruction;
5958 t = group_barrier_needed (insn);
5960 memcpy (rws_sum, rws_saved, NUM_REGS * sizeof *rws_saved);
5961 first_instruction = saved_first_instruction;
5966 /* Scan the current function and insert stop bits as necessary to
5967 eliminate dependencies. This function assumes that a final
5968 instruction scheduling pass has been run which has already
5969 inserted most of the necessary stop bits. This function only
5970 inserts new ones at basic block boundaries, since these are
5971 invisible to the scheduler. */
5974 emit_insn_group_barriers (FILE *dump)
5978 int insns_since_last_label = 0;
5980 init_insn_group_barriers ();
5982 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5984 if (GET_CODE (insn) == CODE_LABEL)
5986 if (insns_since_last_label)
5988 insns_since_last_label = 0;
5990 else if (GET_CODE (insn) == NOTE
5991 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
5993 if (insns_since_last_label)
5995 insns_since_last_label = 0;
5997 else if (GET_CODE (insn) == INSN
5998 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
5999 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6001 init_insn_group_barriers ();
6004 else if (INSN_P (insn))
6006 insns_since_last_label = 1;
6008 if (group_barrier_needed (insn))
6013 fprintf (dump, "Emitting stop before label %d\n",
6014 INSN_UID (last_label));
6015 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
6018 init_insn_group_barriers ();
6026 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
6027 This function has to emit all necessary group barriers. */
6030 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6034 init_insn_group_barriers ();
6036 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6038 if (GET_CODE (insn) == BARRIER)
6040 rtx last = prev_active_insn (insn);
6044 if (GET_CODE (last) == JUMP_INSN
6045 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6046 last = prev_active_insn (last);
6047 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6048 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6050 init_insn_group_barriers ();
6052 else if (INSN_P (insn))
6054 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6055 init_insn_group_barriers ();
6056 else if (group_barrier_needed (insn))
6058 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
6059 init_insn_group_barriers ();
6060 group_barrier_needed (insn);
6068 /* Instruction scheduling support. */
6070 #define NR_BUNDLES 10
6072 /* A list of names of all available bundles. */
6074 static const char *bundle_name [NR_BUNDLES] =
6080 #if NR_BUNDLES == 10
6090 /* Nonzero if we should insert stop bits into the schedule. */
6092 int ia64_final_schedule = 0;
6094 /* Codes of the corresponding queried units: */
6096 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
6097 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
6099 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
6100 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
6102 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
6104 /* The following variable value is an insn group barrier. */
6106 static rtx dfa_stop_insn;
6108 /* The following variable value is the last issued insn. */
6110 static rtx last_scheduled_insn;
6112 /* The following variable value is size of the DFA state. */
6114 static size_t dfa_state_size;
6116 /* The following variable value is pointer to a DFA state used as
6117 temporary variable. */
6119 static state_t temp_dfa_state = NULL;
6121 /* The following variable value is DFA state after issuing the last
6124 static state_t prev_cycle_state = NULL;
6126 /* The following array element values are TRUE if the corresponding
6127 insn requires to add stop bits before it. */
6129 static char *stops_p;
6131 /* The following variable is used to set up the mentioned above array. */
6133 static int stop_before_p = 0;
6135 /* The following variable value is length of the arrays `clocks' and
6138 static int clocks_length;
6140 /* The following array element values are cycles on which the
6141 corresponding insn will be issued. The array is used only for
6146 /* The following array element values are numbers of cycles should be
6147 added to improve insn scheduling for MM_insns for Itanium1. */
6149 static int *add_cycles;
6151 static rtx ia64_single_set (rtx);
6152 static void ia64_emit_insn_before (rtx, rtx);
6154 /* Map a bundle number to its pseudo-op. */
6157 get_bundle_name (int b)
6159 return bundle_name[b];
6163 /* Return the maximum number of instructions a cpu can issue. */
6166 ia64_issue_rate (void)
6171 /* Helper function - like single_set, but look inside COND_EXEC. */
6174 ia64_single_set (rtx insn)
6176 rtx x = PATTERN (insn), ret;
6177 if (GET_CODE (x) == COND_EXEC)
6178 x = COND_EXEC_CODE (x);
6179 if (GET_CODE (x) == SET)
6182 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
6183 Although they are not classical single set, the second set is there just
6184 to protect it from moving past FP-relative stack accesses. */
6185 switch (recog_memoized (insn))
6187 case CODE_FOR_prologue_allocate_stack:
6188 case CODE_FOR_epilogue_deallocate_stack:
6189 ret = XVECEXP (x, 0, 0);
6193 ret = single_set_2 (insn, x);
6200 /* Adjust the cost of a scheduling dependency. Return the new cost of
6201 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
6204 ia64_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
6206 enum attr_itanium_class dep_class;
6207 enum attr_itanium_class insn_class;
6209 if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT)
6212 insn_class = ia64_safe_itanium_class (insn);
6213 dep_class = ia64_safe_itanium_class (dep_insn);
6214 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
6215 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
6221 /* Like emit_insn_before, but skip cycle_display notes.
6222 ??? When cycle display notes are implemented, update this. */
6225 ia64_emit_insn_before (rtx insn, rtx before)
6227 emit_insn_before (insn, before);
6230 /* The following function marks insns who produce addresses for load
6231 and store insns. Such insns will be placed into M slots because it
6232 decrease latency time for Itanium1 (see function
6233 `ia64_produce_address_p' and the DFA descriptions). */
6236 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
6238 rtx insn, link, next, next_tail;
6240 /* Before reload, which_alternative is not set, which means that
6241 ia64_safe_itanium_class will produce wrong results for (at least)
6242 move instructions. */
6243 if (!reload_completed)
6246 next_tail = NEXT_INSN (tail);
6247 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6250 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6252 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
6254 for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
6256 enum attr_itanium_class c;
6258 if (REG_NOTE_KIND (link) != REG_DEP_TRUE)
6260 next = XEXP (link, 0);
6261 c = ia64_safe_itanium_class (next);
6262 if ((c == ITANIUM_CLASS_ST
6263 || c == ITANIUM_CLASS_STF)
6264 && ia64_st_address_bypass_p (insn, next))
6266 else if ((c == ITANIUM_CLASS_LD
6267 || c == ITANIUM_CLASS_FLD
6268 || c == ITANIUM_CLASS_FLDP)
6269 && ia64_ld_address_bypass_p (insn, next))
6272 insn->call = link != 0;
6276 /* We're beginning a new block. Initialize data structures as necessary. */
6279 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
6280 int sched_verbose ATTRIBUTE_UNUSED,
6281 int max_ready ATTRIBUTE_UNUSED)
6283 #ifdef ENABLE_CHECKING
6286 if (reload_completed)
6287 for (insn = NEXT_INSN (current_sched_info->prev_head);
6288 insn != current_sched_info->next_tail;
6289 insn = NEXT_INSN (insn))
6290 gcc_assert (!SCHED_GROUP_P (insn));
6292 last_scheduled_insn = NULL_RTX;
6293 init_insn_group_barriers ();
6296 /* We are about to being issuing insns for this clock cycle.
6297 Override the default sort algorithm to better slot instructions. */
6300 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
6301 int *pn_ready, int clock_var ATTRIBUTE_UNUSED,
6305 int n_ready = *pn_ready;
6306 rtx *e_ready = ready + n_ready;
6310 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
6312 if (reorder_type == 0)
6314 /* First, move all USEs, CLOBBERs and other crud out of the way. */
6316 for (insnp = ready; insnp < e_ready; insnp++)
6317 if (insnp < e_ready)
6320 enum attr_type t = ia64_safe_type (insn);
6321 if (t == TYPE_UNKNOWN)
6323 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6324 || asm_noperands (PATTERN (insn)) >= 0)
6326 rtx lowest = ready[n_asms];
6327 ready[n_asms] = insn;
6333 rtx highest = ready[n_ready - 1];
6334 ready[n_ready - 1] = insn;
6341 if (n_asms < n_ready)
6343 /* Some normal insns to process. Skip the asms. */
6347 else if (n_ready > 0)
6351 if (ia64_final_schedule)
6354 int nr_need_stop = 0;
6356 for (insnp = ready; insnp < e_ready; insnp++)
6357 if (safe_group_barrier_needed (*insnp))
6360 if (reorder_type == 1 && n_ready == nr_need_stop)
6362 if (reorder_type == 0)
6365 /* Move down everything that needs a stop bit, preserving
6367 while (insnp-- > ready + deleted)
6368 while (insnp >= ready + deleted)
6371 if (! safe_group_barrier_needed (insn))
6373 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
6384 /* We are about to being issuing insns for this clock cycle. Override
6385 the default sort algorithm to better slot instructions. */
6388 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
6391 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
6392 pn_ready, clock_var, 0);
6395 /* Like ia64_sched_reorder, but called after issuing each insn.
6396 Override the default sort algorithm to better slot instructions. */
6399 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
6400 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
6401 int *pn_ready, int clock_var)
6403 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
6404 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
6405 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
6409 /* We are about to issue INSN. Return the number of insns left on the
6410 ready queue that can be issued this cycle. */
6413 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
6414 int sched_verbose ATTRIBUTE_UNUSED,
6415 rtx insn ATTRIBUTE_UNUSED,
6416 int can_issue_more ATTRIBUTE_UNUSED)
6418 last_scheduled_insn = insn;
6419 memcpy (prev_cycle_state, curr_state, dfa_state_size);
6420 if (reload_completed)
6422 int needed = group_barrier_needed (insn);
6424 gcc_assert (!needed);
6425 if (GET_CODE (insn) == CALL_INSN)
6426 init_insn_group_barriers ();
6427 stops_p [INSN_UID (insn)] = stop_before_p;
6433 /* We are choosing insn from the ready queue. Return nonzero if INSN
6437 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
6439 gcc_assert (insn && INSN_P (insn));
6440 return (!reload_completed
6441 || !safe_group_barrier_needed (insn));
6444 /* The following variable value is pseudo-insn used by the DFA insn
6445 scheduler to change the DFA state when the simulated clock is
6448 static rtx dfa_pre_cycle_insn;
6450 /* We are about to being issuing INSN. Return nonzero if we cannot
6451 issue it on given cycle CLOCK and return zero if we should not sort
6452 the ready queue on the next clock start. */
6455 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
6456 int clock, int *sort_p)
6458 int setup_clocks_p = FALSE;
6460 gcc_assert (insn && INSN_P (insn));
6461 if ((reload_completed && safe_group_barrier_needed (insn))
6462 || (last_scheduled_insn
6463 && (GET_CODE (last_scheduled_insn) == CALL_INSN
6464 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6465 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
6467 init_insn_group_barriers ();
6468 if (verbose && dump)
6469 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
6470 last_clock == clock ? " + cycle advance" : "");
6472 if (last_clock == clock)
6474 state_transition (curr_state, dfa_stop_insn);
6475 if (TARGET_EARLY_STOP_BITS)
6476 *sort_p = (last_scheduled_insn == NULL_RTX
6477 || GET_CODE (last_scheduled_insn) != CALL_INSN);
6482 else if (reload_completed)
6483 setup_clocks_p = TRUE;
6484 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6485 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
6486 state_reset (curr_state);
6489 memcpy (curr_state, prev_cycle_state, dfa_state_size);
6490 state_transition (curr_state, dfa_stop_insn);
6491 state_transition (curr_state, dfa_pre_cycle_insn);
6492 state_transition (curr_state, NULL);
6495 else if (reload_completed)
6496 setup_clocks_p = TRUE;
6497 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM
6498 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6499 && asm_noperands (PATTERN (insn)) < 0)
6501 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
6503 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
6508 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
6509 if (REG_NOTE_KIND (link) == 0)
6511 enum attr_itanium_class dep_class;
6512 rtx dep_insn = XEXP (link, 0);
6514 dep_class = ia64_safe_itanium_class (dep_insn);
6515 if ((dep_class == ITANIUM_CLASS_MMMUL
6516 || dep_class == ITANIUM_CLASS_MMSHF)
6517 && last_clock - clocks [INSN_UID (dep_insn)] < 4
6519 || last_clock - clocks [INSN_UID (dep_insn)] < d))
6520 d = last_clock - clocks [INSN_UID (dep_insn)];
6523 add_cycles [INSN_UID (insn)] = 3 - d;
6531 /* The following page contains abstract data `bundle states' which are
6532 used for bundling insns (inserting nops and template generation). */
6534 /* The following describes state of insn bundling. */
6538 /* Unique bundle state number to identify them in the debugging
6541 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
6542 /* number nops before and after the insn */
6543 short before_nops_num, after_nops_num;
6544 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
6546 int cost; /* cost of the state in cycles */
6547 int accumulated_insns_num; /* number of all previous insns including
6548 nops. L is considered as 2 insns */
6549 int branch_deviation; /* deviation of previous branches from 3rd slots */
6550 struct bundle_state *next; /* next state with the same insn_num */
6551 struct bundle_state *originator; /* originator (previous insn state) */
6552 /* All bundle states are in the following chain. */
6553 struct bundle_state *allocated_states_chain;
6554 /* The DFA State after issuing the insn and the nops. */
6558 /* The following is map insn number to the corresponding bundle state. */
6560 static struct bundle_state **index_to_bundle_states;
6562 /* The unique number of next bundle state. */
6564 static int bundle_states_num;
6566 /* All allocated bundle states are in the following chain. */
6568 static struct bundle_state *allocated_bundle_states_chain;
6570 /* All allocated but not used bundle states are in the following
6573 static struct bundle_state *free_bundle_state_chain;
6576 /* The following function returns a free bundle state. */
6578 static struct bundle_state *
6579 get_free_bundle_state (void)
6581 struct bundle_state *result;
6583 if (free_bundle_state_chain != NULL)
6585 result = free_bundle_state_chain;
6586 free_bundle_state_chain = result->next;
6590 result = xmalloc (sizeof (struct bundle_state));
6591 result->dfa_state = xmalloc (dfa_state_size);
6592 result->allocated_states_chain = allocated_bundle_states_chain;
6593 allocated_bundle_states_chain = result;
6595 result->unique_num = bundle_states_num++;
6600 /* The following function frees given bundle state. */
6603 free_bundle_state (struct bundle_state *state)
6605 state->next = free_bundle_state_chain;
6606 free_bundle_state_chain = state;
6609 /* Start work with abstract data `bundle states'. */
6612 initiate_bundle_states (void)
6614 bundle_states_num = 0;
6615 free_bundle_state_chain = NULL;
6616 allocated_bundle_states_chain = NULL;
6619 /* Finish work with abstract data `bundle states'. */
6622 finish_bundle_states (void)
6624 struct bundle_state *curr_state, *next_state;
6626 for (curr_state = allocated_bundle_states_chain;
6628 curr_state = next_state)
6630 next_state = curr_state->allocated_states_chain;
6631 free (curr_state->dfa_state);
6636 /* Hash table of the bundle states. The key is dfa_state and insn_num
6637 of the bundle states. */
6639 static htab_t bundle_state_table;
6641 /* The function returns hash of BUNDLE_STATE. */
6644 bundle_state_hash (const void *bundle_state)
6646 const struct bundle_state *state = (struct bundle_state *) bundle_state;
6649 for (result = i = 0; i < dfa_state_size; i++)
6650 result += (((unsigned char *) state->dfa_state) [i]
6651 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
6652 return result + state->insn_num;
6655 /* The function returns nonzero if the bundle state keys are equal. */
6658 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
6660 const struct bundle_state * state1 = (struct bundle_state *) bundle_state_1;
6661 const struct bundle_state * state2 = (struct bundle_state *) bundle_state_2;
6663 return (state1->insn_num == state2->insn_num
6664 && memcmp (state1->dfa_state, state2->dfa_state,
6665 dfa_state_size) == 0);
6668 /* The function inserts the BUNDLE_STATE into the hash table. The
6669 function returns nonzero if the bundle has been inserted into the
6670 table. The table contains the best bundle state with given key. */
6673 insert_bundle_state (struct bundle_state *bundle_state)
6677 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
6678 if (*entry_ptr == NULL)
6680 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
6681 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
6682 *entry_ptr = (void *) bundle_state;
6685 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
6686 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
6687 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
6688 > bundle_state->accumulated_insns_num
6689 || (((struct bundle_state *)
6690 *entry_ptr)->accumulated_insns_num
6691 == bundle_state->accumulated_insns_num
6692 && ((struct bundle_state *)
6693 *entry_ptr)->branch_deviation
6694 > bundle_state->branch_deviation))))
6697 struct bundle_state temp;
6699 temp = *(struct bundle_state *) *entry_ptr;
6700 *(struct bundle_state *) *entry_ptr = *bundle_state;
6701 ((struct bundle_state *) *entry_ptr)->next = temp.next;
6702 *bundle_state = temp;
6707 /* Start work with the hash table. */
6710 initiate_bundle_state_table (void)
6712 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
6716 /* Finish work with the hash table. */
6719 finish_bundle_state_table (void)
6721 htab_delete (bundle_state_table);
6726 /* The following variable is a insn `nop' used to check bundle states
6727 with different number of inserted nops. */
6729 static rtx ia64_nop;
6731 /* The following function tries to issue NOPS_NUM nops for the current
6732 state without advancing processor cycle. If it failed, the
6733 function returns FALSE and frees the current state. */
6736 try_issue_nops (struct bundle_state *curr_state, int nops_num)
6740 for (i = 0; i < nops_num; i++)
6741 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
6743 free_bundle_state (curr_state);
6749 /* The following function tries to issue INSN for the current
6750 state without advancing processor cycle. If it failed, the
6751 function returns FALSE and frees the current state. */
6754 try_issue_insn (struct bundle_state *curr_state, rtx insn)
6756 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
6758 free_bundle_state (curr_state);
6764 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
6765 starting with ORIGINATOR without advancing processor cycle. If
6766 TRY_BUNDLE_END_P is TRUE, the function also/only (if
6767 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
6768 If it was successful, the function creates new bundle state and
6769 insert into the hash table and into `index_to_bundle_states'. */
6772 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
6773 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
6775 struct bundle_state *curr_state;
6777 curr_state = get_free_bundle_state ();
6778 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
6779 curr_state->insn = insn;
6780 curr_state->insn_num = originator->insn_num + 1;
6781 curr_state->cost = originator->cost;
6782 curr_state->originator = originator;
6783 curr_state->before_nops_num = before_nops_num;
6784 curr_state->after_nops_num = 0;
6785 curr_state->accumulated_insns_num
6786 = originator->accumulated_insns_num + before_nops_num;
6787 curr_state->branch_deviation = originator->branch_deviation;
6789 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
6791 gcc_assert (GET_MODE (insn) != TImode);
6792 if (!try_issue_nops (curr_state, before_nops_num))
6794 if (!try_issue_insn (curr_state, insn))
6796 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
6797 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
6798 && curr_state->accumulated_insns_num % 3 != 0)
6800 free_bundle_state (curr_state);
6804 else if (GET_MODE (insn) != TImode)
6806 if (!try_issue_nops (curr_state, before_nops_num))
6808 if (!try_issue_insn (curr_state, insn))
6810 curr_state->accumulated_insns_num++;
6811 gcc_assert (GET_CODE (PATTERN (insn)) != ASM_INPUT
6812 && asm_noperands (PATTERN (insn)) < 0);
6814 if (ia64_safe_type (insn) == TYPE_L)
6815 curr_state->accumulated_insns_num++;
6819 /* If this is an insn that must be first in a group, then don't allow
6820 nops to be emitted before it. Currently, alloc is the only such
6821 supported instruction. */
6822 /* ??? The bundling automatons should handle this for us, but they do
6823 not yet have support for the first_insn attribute. */
6824 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
6826 free_bundle_state (curr_state);
6830 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
6831 state_transition (curr_state->dfa_state, NULL);
6833 if (!try_issue_nops (curr_state, before_nops_num))
6835 if (!try_issue_insn (curr_state, insn))
6837 curr_state->accumulated_insns_num++;
6838 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6839 || asm_noperands (PATTERN (insn)) >= 0)
6841 /* Finish bundle containing asm insn. */
6842 curr_state->after_nops_num
6843 = 3 - curr_state->accumulated_insns_num % 3;
6844 curr_state->accumulated_insns_num
6845 += 3 - curr_state->accumulated_insns_num % 3;
6847 else if (ia64_safe_type (insn) == TYPE_L)
6848 curr_state->accumulated_insns_num++;
6850 if (ia64_safe_type (insn) == TYPE_B)
6851 curr_state->branch_deviation
6852 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
6853 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
6855 if (!only_bundle_end_p && insert_bundle_state (curr_state))
6858 struct bundle_state *curr_state1;
6859 struct bundle_state *allocated_states_chain;
6861 curr_state1 = get_free_bundle_state ();
6862 dfa_state = curr_state1->dfa_state;
6863 allocated_states_chain = curr_state1->allocated_states_chain;
6864 *curr_state1 = *curr_state;
6865 curr_state1->dfa_state = dfa_state;
6866 curr_state1->allocated_states_chain = allocated_states_chain;
6867 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
6869 curr_state = curr_state1;
6871 if (!try_issue_nops (curr_state,
6872 3 - curr_state->accumulated_insns_num % 3))
6874 curr_state->after_nops_num
6875 = 3 - curr_state->accumulated_insns_num % 3;
6876 curr_state->accumulated_insns_num
6877 += 3 - curr_state->accumulated_insns_num % 3;
6879 if (!insert_bundle_state (curr_state))
6880 free_bundle_state (curr_state);
6884 /* The following function returns position in the two window bundle
6888 get_max_pos (state_t state)
6890 if (cpu_unit_reservation_p (state, pos_6))
6892 else if (cpu_unit_reservation_p (state, pos_5))
6894 else if (cpu_unit_reservation_p (state, pos_4))
6896 else if (cpu_unit_reservation_p (state, pos_3))
6898 else if (cpu_unit_reservation_p (state, pos_2))
6900 else if (cpu_unit_reservation_p (state, pos_1))
6906 /* The function returns code of a possible template for given position
6907 and state. The function should be called only with 2 values of
6908 position equal to 3 or 6. We avoid generating F NOPs by putting
6909 templates containing F insns at the end of the template search
6910 because undocumented anomaly in McKinley derived cores which can
6911 cause stalls if an F-unit insn (including a NOP) is issued within a
6912 six-cycle window after reading certain application registers (such
6913 as ar.bsp). Furthermore, power-considerations also argue against
6914 the use of F-unit instructions unless they're really needed. */
6917 get_template (state_t state, int pos)
6922 if (cpu_unit_reservation_p (state, _0mmi_))
6924 else if (cpu_unit_reservation_p (state, _0mii_))
6926 else if (cpu_unit_reservation_p (state, _0mmb_))
6928 else if (cpu_unit_reservation_p (state, _0mib_))
6930 else if (cpu_unit_reservation_p (state, _0mbb_))
6932 else if (cpu_unit_reservation_p (state, _0bbb_))
6934 else if (cpu_unit_reservation_p (state, _0mmf_))
6936 else if (cpu_unit_reservation_p (state, _0mfi_))
6938 else if (cpu_unit_reservation_p (state, _0mfb_))
6940 else if (cpu_unit_reservation_p (state, _0mlx_))
6945 if (cpu_unit_reservation_p (state, _1mmi_))
6947 else if (cpu_unit_reservation_p (state, _1mii_))
6949 else if (cpu_unit_reservation_p (state, _1mmb_))
6951 else if (cpu_unit_reservation_p (state, _1mib_))
6953 else if (cpu_unit_reservation_p (state, _1mbb_))
6955 else if (cpu_unit_reservation_p (state, _1bbb_))
6957 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
6959 else if (cpu_unit_reservation_p (state, _1mfi_))
6961 else if (cpu_unit_reservation_p (state, _1mfb_))
6963 else if (cpu_unit_reservation_p (state, _1mlx_))
6972 /* The following function returns an insn important for insn bundling
6973 followed by INSN and before TAIL. */
6976 get_next_important_insn (rtx insn, rtx tail)
6978 for (; insn && insn != tail; insn = NEXT_INSN (insn))
6980 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6981 && GET_CODE (PATTERN (insn)) != USE
6982 && GET_CODE (PATTERN (insn)) != CLOBBER)
6987 /* The following function does insn bundling. Bundling means
6988 inserting templates and nop insns to fit insn groups into permitted
6989 templates. Instruction scheduling uses NDFA (non-deterministic
6990 finite automata) encoding informations about the templates and the
6991 inserted nops. Nondeterminism of the automata permits follows
6992 all possible insn sequences very fast.
6994 Unfortunately it is not possible to get information about inserting
6995 nop insns and used templates from the automata states. The
6996 automata only says that we can issue an insn possibly inserting
6997 some nops before it and using some template. Therefore insn
6998 bundling in this function is implemented by using DFA
6999 (deterministic finite automata). We follows all possible insn
7000 sequences by inserting 0-2 nops (that is what the NDFA describe for
7001 insn scheduling) before/after each insn being bundled. We know the
7002 start of simulated processor cycle from insn scheduling (insn
7003 starting a new cycle has TImode).
7005 Simple implementation of insn bundling would create enormous
7006 number of possible insn sequences satisfying information about new
7007 cycle ticks taken from the insn scheduling. To make the algorithm
7008 practical we use dynamic programming. Each decision (about
7009 inserting nops and implicitly about previous decisions) is described
7010 by structure bundle_state (see above). If we generate the same
7011 bundle state (key is automaton state after issuing the insns and
7012 nops for it), we reuse already generated one. As consequence we
7013 reject some decisions which cannot improve the solution and
7014 reduce memory for the algorithm.
7016 When we reach the end of EBB (extended basic block), we choose the
7017 best sequence and then, moving back in EBB, insert templates for
7018 the best alternative. The templates are taken from querying
7019 automaton state for each insn in chosen bundle states.
7021 So the algorithm makes two (forward and backward) passes through
7022 EBB. There is an additional forward pass through EBB for Itanium1
7023 processor. This pass inserts more nops to make dependency between
7024 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
7027 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
7029 struct bundle_state *curr_state, *next_state, *best_state;
7030 rtx insn, next_insn;
7032 int i, bundle_end_p, only_bundle_end_p, asm_p;
7033 int pos = 0, max_pos, template0, template1;
7036 enum attr_type type;
7039 /* Count insns in the EBB. */
7040 for (insn = NEXT_INSN (prev_head_insn);
7041 insn && insn != tail;
7042 insn = NEXT_INSN (insn))
7048 dfa_clean_insn_cache ();
7049 initiate_bundle_state_table ();
7050 index_to_bundle_states = xmalloc ((insn_num + 2)
7051 * sizeof (struct bundle_state *));
7052 /* First (forward) pass -- generation of bundle states. */
7053 curr_state = get_free_bundle_state ();
7054 curr_state->insn = NULL;
7055 curr_state->before_nops_num = 0;
7056 curr_state->after_nops_num = 0;
7057 curr_state->insn_num = 0;
7058 curr_state->cost = 0;
7059 curr_state->accumulated_insns_num = 0;
7060 curr_state->branch_deviation = 0;
7061 curr_state->next = NULL;
7062 curr_state->originator = NULL;
7063 state_reset (curr_state->dfa_state);
7064 index_to_bundle_states [0] = curr_state;
7066 /* Shift cycle mark if it is put on insn which could be ignored. */
7067 for (insn = NEXT_INSN (prev_head_insn);
7069 insn = NEXT_INSN (insn))
7071 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
7072 || GET_CODE (PATTERN (insn)) == USE
7073 || GET_CODE (PATTERN (insn)) == CLOBBER)
7074 && GET_MODE (insn) == TImode)
7076 PUT_MODE (insn, VOIDmode);
7077 for (next_insn = NEXT_INSN (insn);
7079 next_insn = NEXT_INSN (next_insn))
7080 if (INSN_P (next_insn)
7081 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
7082 && GET_CODE (PATTERN (next_insn)) != USE
7083 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
7085 PUT_MODE (next_insn, TImode);
7089 /* Froward pass: generation of bundle states. */
7090 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
7094 gcc_assert (INSN_P (insn)
7095 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
7096 && GET_CODE (PATTERN (insn)) != USE
7097 && GET_CODE (PATTERN (insn)) != CLOBBER);
7098 type = ia64_safe_type (insn);
7099 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
7101 index_to_bundle_states [insn_num] = NULL;
7102 for (curr_state = index_to_bundle_states [insn_num - 1];
7104 curr_state = next_state)
7106 pos = curr_state->accumulated_insns_num % 3;
7107 next_state = curr_state->next;
7108 /* We must fill up the current bundle in order to start a
7109 subsequent asm insn in a new bundle. Asm insn is always
7110 placed in a separate bundle. */
7112 = (next_insn != NULL_RTX
7113 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
7114 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
7115 /* We may fill up the current bundle if it is the cycle end
7116 without a group barrier. */
7118 = (only_bundle_end_p || next_insn == NULL_RTX
7119 || (GET_MODE (next_insn) == TImode
7120 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
7121 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
7123 /* We need to insert 2 nops for cases like M_MII. To
7124 guarantee issuing all insns on the same cycle for
7125 Itanium 1, we need to issue 2 nops after the first M
7126 insn (MnnMII where n is a nop insn). */
7127 || ((type == TYPE_M || type == TYPE_A)
7128 && ia64_tune == PROCESSOR_ITANIUM
7129 && !bundle_end_p && pos == 1))
7130 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
7132 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
7134 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
7137 gcc_assert (index_to_bundle_states [insn_num]);
7138 for (curr_state = index_to_bundle_states [insn_num];
7140 curr_state = curr_state->next)
7141 if (verbose >= 2 && dump)
7143 /* This structure is taken from generated code of the
7144 pipeline hazard recognizer (see file insn-attrtab.c).
7145 Please don't forget to change the structure if a new
7146 automaton is added to .md file. */
7149 unsigned short one_automaton_state;
7150 unsigned short oneb_automaton_state;
7151 unsigned short two_automaton_state;
7152 unsigned short twob_automaton_state;
7157 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
7158 curr_state->unique_num,
7159 (curr_state->originator == NULL
7160 ? -1 : curr_state->originator->unique_num),
7162 curr_state->before_nops_num, curr_state->after_nops_num,
7163 curr_state->accumulated_insns_num, curr_state->branch_deviation,
7164 (ia64_tune == PROCESSOR_ITANIUM
7165 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
7166 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
7171 /* We should find a solution because the 2nd insn scheduling has
7173 gcc_assert (index_to_bundle_states [insn_num]);
7174 /* Find a state corresponding to the best insn sequence. */
7176 for (curr_state = index_to_bundle_states [insn_num];
7178 curr_state = curr_state->next)
7179 /* We are just looking at the states with fully filled up last
7180 bundle. The first we prefer insn sequences with minimal cost
7181 then with minimal inserted nops and finally with branch insns
7182 placed in the 3rd slots. */
7183 if (curr_state->accumulated_insns_num % 3 == 0
7184 && (best_state == NULL || best_state->cost > curr_state->cost
7185 || (best_state->cost == curr_state->cost
7186 && (curr_state->accumulated_insns_num
7187 < best_state->accumulated_insns_num
7188 || (curr_state->accumulated_insns_num
7189 == best_state->accumulated_insns_num
7190 && curr_state->branch_deviation
7191 < best_state->branch_deviation)))))
7192 best_state = curr_state;
7193 /* Second (backward) pass: adding nops and templates. */
7194 insn_num = best_state->before_nops_num;
7195 template0 = template1 = -1;
7196 for (curr_state = best_state;
7197 curr_state->originator != NULL;
7198 curr_state = curr_state->originator)
7200 insn = curr_state->insn;
7201 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
7202 || asm_noperands (PATTERN (insn)) >= 0);
7204 if (verbose >= 2 && dump)
7208 unsigned short one_automaton_state;
7209 unsigned short oneb_automaton_state;
7210 unsigned short two_automaton_state;
7211 unsigned short twob_automaton_state;
7216 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
7217 curr_state->unique_num,
7218 (curr_state->originator == NULL
7219 ? -1 : curr_state->originator->unique_num),
7221 curr_state->before_nops_num, curr_state->after_nops_num,
7222 curr_state->accumulated_insns_num, curr_state->branch_deviation,
7223 (ia64_tune == PROCESSOR_ITANIUM
7224 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
7225 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
7228 /* Find the position in the current bundle window. The window can
7229 contain at most two bundles. Two bundle window means that
7230 the processor will make two bundle rotation. */
7231 max_pos = get_max_pos (curr_state->dfa_state);
7233 /* The following (negative template number) means that the
7234 processor did one bundle rotation. */
7235 || (max_pos == 3 && template0 < 0))
7237 /* We are at the end of the window -- find template(s) for
7241 template0 = get_template (curr_state->dfa_state, 3);
7244 template1 = get_template (curr_state->dfa_state, 3);
7245 template0 = get_template (curr_state->dfa_state, 6);
7248 if (max_pos > 3 && template1 < 0)
7249 /* It may happen when we have the stop inside a bundle. */
7251 gcc_assert (pos <= 3);
7252 template1 = get_template (curr_state->dfa_state, 3);
7256 /* Emit nops after the current insn. */
7257 for (i = 0; i < curr_state->after_nops_num; i++)
7260 emit_insn_after (nop, insn);
7262 gcc_assert (pos >= 0);
7265 /* We are at the start of a bundle: emit the template
7266 (it should be defined). */
7267 gcc_assert (template0 >= 0);
7268 b = gen_bundle_selector (GEN_INT (template0));
7269 ia64_emit_insn_before (b, nop);
7270 /* If we have two bundle window, we make one bundle
7271 rotation. Otherwise template0 will be undefined
7272 (negative value). */
7273 template0 = template1;
7277 /* Move the position backward in the window. Group barrier has
7278 no slot. Asm insn takes all bundle. */
7279 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
7280 && GET_CODE (PATTERN (insn)) != ASM_INPUT
7281 && asm_noperands (PATTERN (insn)) < 0)
7283 /* Long insn takes 2 slots. */
7284 if (ia64_safe_type (insn) == TYPE_L)
7286 gcc_assert (pos >= 0);
7288 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
7289 && GET_CODE (PATTERN (insn)) != ASM_INPUT
7290 && asm_noperands (PATTERN (insn)) < 0)
7292 /* The current insn is at the bundle start: emit the
7294 gcc_assert (template0 >= 0);
7295 b = gen_bundle_selector (GEN_INT (template0));
7296 ia64_emit_insn_before (b, insn);
7297 b = PREV_INSN (insn);
7299 /* See comment above in analogous place for emitting nops
7301 template0 = template1;
7304 /* Emit nops after the current insn. */
7305 for (i = 0; i < curr_state->before_nops_num; i++)
7308 ia64_emit_insn_before (nop, insn);
7309 nop = PREV_INSN (insn);
7312 gcc_assert (pos >= 0);
7315 /* See comment above in analogous place for emitting nops
7317 gcc_assert (template0 >= 0);
7318 b = gen_bundle_selector (GEN_INT (template0));
7319 ia64_emit_insn_before (b, insn);
7320 b = PREV_INSN (insn);
7322 template0 = template1;
7327 if (ia64_tune == PROCESSOR_ITANIUM)
7328 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
7329 Itanium1 has a strange design, if the distance between an insn
7330 and dependent MM-insn is less 4 then we have a 6 additional
7331 cycles stall. So we make the distance equal to 4 cycles if it
7333 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
7337 gcc_assert (INSN_P (insn)
7338 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
7339 && GET_CODE (PATTERN (insn)) != USE
7340 && GET_CODE (PATTERN (insn)) != CLOBBER);
7341 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
7342 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
7343 /* We found a MM-insn which needs additional cycles. */
7349 /* Now we are searching for a template of the bundle in
7350 which the MM-insn is placed and the position of the
7351 insn in the bundle (0, 1, 2). Also we are searching
7352 for that there is a stop before the insn. */
7353 last = prev_active_insn (insn);
7354 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
7356 last = prev_active_insn (last);
7358 for (;; last = prev_active_insn (last))
7359 if (recog_memoized (last) == CODE_FOR_bundle_selector)
7361 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
7363 /* The insn is in MLX bundle. Change the template
7364 onto MFI because we will add nops before the
7365 insn. It simplifies subsequent code a lot. */
7367 = gen_bundle_selector (const2_rtx); /* -> MFI */
7370 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier
7371 && (ia64_safe_itanium_class (last)
7372 != ITANIUM_CLASS_IGNORE))
7374 /* Some check of correctness: the stop is not at the
7375 bundle start, there are no more 3 insns in the bundle,
7376 and the MM-insn is not at the start of bundle with
7378 gcc_assert ((!pred_stop_p || n)
7380 && (template0 != 9 || !n));
7381 /* Put nops after the insn in the bundle. */
7382 for (j = 3 - n; j > 0; j --)
7383 ia64_emit_insn_before (gen_nop (), insn);
7384 /* It takes into account that we will add more N nops
7385 before the insn lately -- please see code below. */
7386 add_cycles [INSN_UID (insn)]--;
7387 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
7388 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7391 add_cycles [INSN_UID (insn)]--;
7392 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
7394 /* Insert "MII;" template. */
7395 ia64_emit_insn_before (gen_bundle_selector (const0_rtx),
7397 ia64_emit_insn_before (gen_nop (), insn);
7398 ia64_emit_insn_before (gen_nop (), insn);
7401 /* To decrease code size, we use "MI;I;"
7403 ia64_emit_insn_before
7404 (gen_insn_group_barrier (GEN_INT (3)), insn);
7407 ia64_emit_insn_before (gen_nop (), insn);
7408 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7411 /* Put the MM-insn in the same slot of a bundle with the
7412 same template as the original one. */
7413 ia64_emit_insn_before (gen_bundle_selector (GEN_INT (template0)),
7415 /* To put the insn in the same slot, add necessary number
7417 for (j = n; j > 0; j --)
7418 ia64_emit_insn_before (gen_nop (), insn);
7419 /* Put the stop if the original bundle had it. */
7421 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7425 free (index_to_bundle_states);
7426 finish_bundle_state_table ();
7428 dfa_clean_insn_cache ();
7431 /* The following function is called at the end of scheduling BB or
7432 EBB. After reload, it inserts stop bits and does insn bundling. */
7435 ia64_sched_finish (FILE *dump, int sched_verbose)
7438 fprintf (dump, "// Finishing schedule.\n");
7439 if (!reload_completed)
7441 if (reload_completed)
7443 final_emit_insn_group_barriers (dump);
7444 bundling (dump, sched_verbose, current_sched_info->prev_head,
7445 current_sched_info->next_tail);
7446 if (sched_verbose && dump)
7447 fprintf (dump, "// finishing %d-%d\n",
7448 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
7449 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
7455 /* The following function inserts stop bits in scheduled BB or EBB. */
7458 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
7461 int need_barrier_p = 0;
7462 rtx prev_insn = NULL_RTX;
7464 init_insn_group_barriers ();
7466 for (insn = NEXT_INSN (current_sched_info->prev_head);
7467 insn != current_sched_info->next_tail;
7468 insn = NEXT_INSN (insn))
7470 if (GET_CODE (insn) == BARRIER)
7472 rtx last = prev_active_insn (insn);
7476 if (GET_CODE (last) == JUMP_INSN
7477 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
7478 last = prev_active_insn (last);
7479 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
7480 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
7482 init_insn_group_barriers ();
7484 prev_insn = NULL_RTX;
7486 else if (INSN_P (insn))
7488 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
7490 init_insn_group_barriers ();
7492 prev_insn = NULL_RTX;
7494 else if (need_barrier_p || group_barrier_needed (insn))
7496 if (TARGET_EARLY_STOP_BITS)
7501 last != current_sched_info->prev_head;
7502 last = PREV_INSN (last))
7503 if (INSN_P (last) && GET_MODE (last) == TImode
7504 && stops_p [INSN_UID (last)])
7506 if (last == current_sched_info->prev_head)
7508 last = prev_active_insn (last);
7510 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
7511 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
7513 init_insn_group_barriers ();
7514 for (last = NEXT_INSN (last);
7516 last = NEXT_INSN (last))
7518 group_barrier_needed (last);
7522 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7524 init_insn_group_barriers ();
7526 group_barrier_needed (insn);
7527 prev_insn = NULL_RTX;
7529 else if (recog_memoized (insn) >= 0)
7531 need_barrier_p = (GET_CODE (insn) == CALL_INSN
7532 || GET_CODE (PATTERN (insn)) == ASM_INPUT
7533 || asm_noperands (PATTERN (insn)) >= 0);
7540 /* If the following function returns TRUE, we will use the DFA
7544 ia64_first_cycle_multipass_dfa_lookahead (void)
7546 return (reload_completed ? 6 : 4);
7549 /* The following function initiates variable `dfa_pre_cycle_insn'. */
7552 ia64_init_dfa_pre_cycle_insn (void)
7554 if (temp_dfa_state == NULL)
7556 dfa_state_size = state_size ();
7557 temp_dfa_state = xmalloc (dfa_state_size);
7558 prev_cycle_state = xmalloc (dfa_state_size);
7560 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
7561 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
7562 recog_memoized (dfa_pre_cycle_insn);
7563 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
7564 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
7565 recog_memoized (dfa_stop_insn);
7568 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
7569 used by the DFA insn scheduler. */
7572 ia64_dfa_pre_cycle_insn (void)
7574 return dfa_pre_cycle_insn;
7577 /* The following function returns TRUE if PRODUCER (of type ilog or
7578 ld) produces address for CONSUMER (of type st or stf). */
7581 ia64_st_address_bypass_p (rtx producer, rtx consumer)
7585 gcc_assert (producer && consumer);
7586 dest = ia64_single_set (producer);
7588 reg = SET_DEST (dest);
7590 if (GET_CODE (reg) == SUBREG)
7591 reg = SUBREG_REG (reg);
7592 gcc_assert (GET_CODE (reg) == REG);
7594 dest = ia64_single_set (consumer);
7596 mem = SET_DEST (dest);
7597 gcc_assert (mem && GET_CODE (mem) == MEM);
7598 return reg_mentioned_p (reg, mem);
7601 /* The following function returns TRUE if PRODUCER (of type ilog or
7602 ld) produces address for CONSUMER (of type ld or fld). */
7605 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
7607 rtx dest, src, reg, mem;
7609 gcc_assert (producer && consumer);
7610 dest = ia64_single_set (producer);
7612 reg = SET_DEST (dest);
7614 if (GET_CODE (reg) == SUBREG)
7615 reg = SUBREG_REG (reg);
7616 gcc_assert (GET_CODE (reg) == REG);
7618 src = ia64_single_set (consumer);
7620 mem = SET_SRC (src);
7622 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
7623 mem = XVECEXP (mem, 0, 0);
7624 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
7625 mem = XEXP (mem, 0);
7627 /* Note that LO_SUM is used for GOT loads. */
7628 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
7630 return reg_mentioned_p (reg, mem);
7633 /* The following function returns TRUE if INSN produces address for a
7634 load/store insn. We will place such insns into M slot because it
7635 decreases its latency time. */
7638 ia64_produce_address_p (rtx insn)
7644 /* Emit pseudo-ops for the assembler to describe predicate relations.
7645 At present this assumes that we only consider predicate pairs to
7646 be mutex, and that the assembler can deduce proper values from
7647 straight-line code. */
7650 emit_predicate_relation_info (void)
7654 FOR_EACH_BB_REVERSE (bb)
7657 rtx head = BB_HEAD (bb);
7659 /* We only need such notes at code labels. */
7660 if (GET_CODE (head) != CODE_LABEL)
7662 if (GET_CODE (NEXT_INSN (head)) == NOTE
7663 && NOTE_LINE_NUMBER (NEXT_INSN (head)) == NOTE_INSN_BASIC_BLOCK)
7664 head = NEXT_INSN (head);
7666 /* Skip p0, which may be thought to be live due to (reg:DI p0)
7667 grabbing the entire block of predicate registers. */
7668 for (r = PR_REG (2); r < PR_REG (64); r += 2)
7669 if (REGNO_REG_SET_P (bb->il.rtl->global_live_at_start, r))
7671 rtx p = gen_rtx_REG (BImode, r);
7672 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
7673 if (head == BB_END (bb))
7679 /* Look for conditional calls that do not return, and protect predicate
7680 relations around them. Otherwise the assembler will assume the call
7681 returns, and complain about uses of call-clobbered predicates after
7683 FOR_EACH_BB_REVERSE (bb)
7685 rtx insn = BB_HEAD (bb);
7689 if (GET_CODE (insn) == CALL_INSN
7690 && GET_CODE (PATTERN (insn)) == COND_EXEC
7691 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
7693 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
7694 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
7695 if (BB_HEAD (bb) == insn)
7697 if (BB_END (bb) == insn)
7701 if (insn == BB_END (bb))
7703 insn = NEXT_INSN (insn);
7708 /* Perform machine dependent operations on the rtl chain INSNS. */
7713 /* We are freeing block_for_insn in the toplev to keep compatibility
7714 with old MDEP_REORGS that are not CFG based. Recompute it now. */
7715 compute_bb_for_insn ();
7717 /* If optimizing, we'll have split before scheduling. */
7719 split_all_insns (0);
7721 /* ??? update_life_info_in_dirty_blocks fails to terminate during
7722 non-optimizing bootstrap. */
7723 update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES);
7725 if (optimize && ia64_flag_schedule_insns2)
7727 timevar_push (TV_SCHED2);
7728 ia64_final_schedule = 1;
7730 initiate_bundle_states ();
7731 ia64_nop = make_insn_raw (gen_nop ());
7732 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
7733 recog_memoized (ia64_nop);
7734 clocks_length = get_max_uid () + 1;
7735 stops_p = xcalloc (1, clocks_length);
7736 if (ia64_tune == PROCESSOR_ITANIUM)
7738 clocks = xcalloc (clocks_length, sizeof (int));
7739 add_cycles = xcalloc (clocks_length, sizeof (int));
7741 if (ia64_tune == PROCESSOR_ITANIUM2)
7743 pos_1 = get_cpu_unit_code ("2_1");
7744 pos_2 = get_cpu_unit_code ("2_2");
7745 pos_3 = get_cpu_unit_code ("2_3");
7746 pos_4 = get_cpu_unit_code ("2_4");
7747 pos_5 = get_cpu_unit_code ("2_5");
7748 pos_6 = get_cpu_unit_code ("2_6");
7749 _0mii_ = get_cpu_unit_code ("2b_0mii.");
7750 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
7751 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
7752 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
7753 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
7754 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
7755 _0mib_ = get_cpu_unit_code ("2b_0mib.");
7756 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
7757 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
7758 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
7759 _1mii_ = get_cpu_unit_code ("2b_1mii.");
7760 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
7761 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
7762 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
7763 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
7764 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
7765 _1mib_ = get_cpu_unit_code ("2b_1mib.");
7766 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
7767 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
7768 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
7772 pos_1 = get_cpu_unit_code ("1_1");
7773 pos_2 = get_cpu_unit_code ("1_2");
7774 pos_3 = get_cpu_unit_code ("1_3");
7775 pos_4 = get_cpu_unit_code ("1_4");
7776 pos_5 = get_cpu_unit_code ("1_5");
7777 pos_6 = get_cpu_unit_code ("1_6");
7778 _0mii_ = get_cpu_unit_code ("1b_0mii.");
7779 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
7780 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
7781 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
7782 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
7783 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
7784 _0mib_ = get_cpu_unit_code ("1b_0mib.");
7785 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
7786 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
7787 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
7788 _1mii_ = get_cpu_unit_code ("1b_1mii.");
7789 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
7790 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
7791 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
7792 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
7793 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
7794 _1mib_ = get_cpu_unit_code ("1b_1mib.");
7795 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
7796 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
7797 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
7800 finish_bundle_states ();
7801 if (ia64_tune == PROCESSOR_ITANIUM)
7807 emit_insn_group_barriers (dump_file);
7809 ia64_final_schedule = 0;
7810 timevar_pop (TV_SCHED2);
7813 emit_all_insn_group_barriers (dump_file);
7815 /* A call must not be the last instruction in a function, so that the
7816 return address is still within the function, so that unwinding works
7817 properly. Note that IA-64 differs from dwarf2 on this point. */
7818 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7823 insn = get_last_insn ();
7824 if (! INSN_P (insn))
7825 insn = prev_active_insn (insn);
7826 /* Skip over insns that expand to nothing. */
7827 while (GET_CODE (insn) == INSN && get_attr_empty (insn) == EMPTY_YES)
7829 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
7830 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
7832 insn = prev_active_insn (insn);
7834 if (GET_CODE (insn) == CALL_INSN)
7837 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7838 emit_insn (gen_break_f ());
7839 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7843 emit_predicate_relation_info ();
7845 if (ia64_flag_var_tracking)
7847 timevar_push (TV_VAR_TRACKING);
7848 variable_tracking_main ();
7849 timevar_pop (TV_VAR_TRACKING);
7853 /* Return true if REGNO is used by the epilogue. */
7856 ia64_epilogue_uses (int regno)
7861 /* With a call to a function in another module, we will write a new
7862 value to "gp". After returning from such a call, we need to make
7863 sure the function restores the original gp-value, even if the
7864 function itself does not use the gp anymore. */
7865 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
7867 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
7868 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
7869 /* For functions defined with the syscall_linkage attribute, all
7870 input registers are marked as live at all function exits. This
7871 prevents the register allocator from using the input registers,
7872 which in turn makes it possible to restart a system call after
7873 an interrupt without having to save/restore the input registers.
7874 This also prevents kernel data from leaking to application code. */
7875 return lookup_attribute ("syscall_linkage",
7876 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
7879 /* Conditional return patterns can't represent the use of `b0' as
7880 the return address, so we force the value live this way. */
7884 /* Likewise for ar.pfs, which is used by br.ret. */
7892 /* Return true if REGNO is used by the frame unwinder. */
7895 ia64_eh_uses (int regno)
7897 if (! reload_completed)
7900 if (current_frame_info.reg_save_b0
7901 && regno == current_frame_info.reg_save_b0)
7903 if (current_frame_info.reg_save_pr
7904 && regno == current_frame_info.reg_save_pr)
7906 if (current_frame_info.reg_save_ar_pfs
7907 && regno == current_frame_info.reg_save_ar_pfs)
7909 if (current_frame_info.reg_save_ar_unat
7910 && regno == current_frame_info.reg_save_ar_unat)
7912 if (current_frame_info.reg_save_ar_lc
7913 && regno == current_frame_info.reg_save_ar_lc)
7919 /* Return true if this goes in small data/bss. */
7921 /* ??? We could also support own long data here. Generating movl/add/ld8
7922 instead of addl,ld8/ld8. This makes the code bigger, but should make the
7923 code faster because there is one less load. This also includes incomplete
7924 types which can't go in sdata/sbss. */
7927 ia64_in_small_data_p (tree exp)
7929 if (TARGET_NO_SDATA)
7932 /* We want to merge strings, so we never consider them small data. */
7933 if (TREE_CODE (exp) == STRING_CST)
7936 /* Functions are never small data. */
7937 if (TREE_CODE (exp) == FUNCTION_DECL)
7940 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
7942 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
7944 if (strcmp (section, ".sdata") == 0
7945 || strncmp (section, ".sdata.", 7) == 0
7946 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
7947 || strcmp (section, ".sbss") == 0
7948 || strncmp (section, ".sbss.", 6) == 0
7949 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
7954 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
7956 /* If this is an incomplete type with size 0, then we can't put it
7957 in sdata because it might be too big when completed. */
7958 if (size > 0 && size <= ia64_section_threshold)
7965 /* Output assembly directives for prologue regions. */
7967 /* The current basic block number. */
7969 static bool last_block;
7971 /* True if we need a copy_state command at the start of the next block. */
7973 static bool need_copy_state;
7975 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
7976 # define MAX_ARTIFICIAL_LABEL_BYTES 30
7979 /* Emit a debugging label after a call-frame-related insn. We'd
7980 rather output the label right away, but we'd have to output it
7981 after, not before, the instruction, and the instruction has not
7982 been output yet. So we emit the label after the insn, delete it to
7983 avoid introducing basic blocks, and mark it as preserved, such that
7984 it is still output, given that it is referenced in debug info. */
7987 ia64_emit_deleted_label_after_insn (rtx insn)
7989 char label[MAX_ARTIFICIAL_LABEL_BYTES];
7990 rtx lb = gen_label_rtx ();
7991 rtx label_insn = emit_label_after (lb, insn);
7993 LABEL_PRESERVE_P (lb) = 1;
7995 delete_insn (label_insn);
7997 ASM_GENERATE_INTERNAL_LABEL (label, "L", CODE_LABEL_NUMBER (label_insn));
7999 return xstrdup (label);
8002 /* Define the CFA after INSN with the steady-state definition. */
8005 ia64_dwarf2out_def_steady_cfa (rtx insn)
8007 rtx fp = frame_pointer_needed
8008 ? hard_frame_pointer_rtx
8009 : stack_pointer_rtx;
8012 (ia64_emit_deleted_label_after_insn (insn),
8014 ia64_initial_elimination_offset
8015 (REGNO (arg_pointer_rtx), REGNO (fp))
8016 + ARG_POINTER_CFA_OFFSET (current_function_decl));
8019 /* The generic dwarf2 frame debug info generator does not define a
8020 separate region for the very end of the epilogue, so refrain from
8021 doing so in the IA64-specific code as well. */
8023 #define IA64_CHANGE_CFA_IN_EPILOGUE 0
8025 /* The function emits unwind directives for the start of an epilogue. */
8028 process_epilogue (FILE *asm_out_file, rtx insn, bool unwind, bool frame)
8030 /* If this isn't the last block of the function, then we need to label the
8031 current state, and copy it back in at the start of the next block. */
8036 fprintf (asm_out_file, "\t.label_state %d\n",
8037 ++cfun->machine->state_num);
8038 need_copy_state = true;
8042 fprintf (asm_out_file, "\t.restore sp\n");
8043 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
8044 dwarf2out_def_cfa (ia64_emit_deleted_label_after_insn (insn),
8045 STACK_POINTER_REGNUM, INCOMING_FRAME_SP_OFFSET);
8048 /* This function processes a SET pattern looking for specific patterns
8049 which result in emitting an assembly directive required for unwinding. */
8052 process_set (FILE *asm_out_file, rtx pat, rtx insn, bool unwind, bool frame)
8054 rtx src = SET_SRC (pat);
8055 rtx dest = SET_DEST (pat);
8056 int src_regno, dest_regno;
8058 /* Look for the ALLOC insn. */
8059 if (GET_CODE (src) == UNSPEC_VOLATILE
8060 && XINT (src, 1) == UNSPECV_ALLOC
8061 && GET_CODE (dest) == REG)
8063 dest_regno = REGNO (dest);
8065 /* If this is the final destination for ar.pfs, then this must
8066 be the alloc in the prologue. */
8067 if (dest_regno == current_frame_info.reg_save_ar_pfs)
8070 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
8071 ia64_dbx_register_number (dest_regno));
8075 /* This must be an alloc before a sibcall. We must drop the
8076 old frame info. The easiest way to drop the old frame
8077 info is to ensure we had a ".restore sp" directive
8078 followed by a new prologue. If the procedure doesn't
8079 have a memory-stack frame, we'll issue a dummy ".restore
8081 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
8082 /* if haven't done process_epilogue() yet, do it now */
8083 process_epilogue (asm_out_file, insn, unwind, frame);
8085 fprintf (asm_out_file, "\t.prologue\n");
8090 /* Look for SP = .... */
8091 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
8093 if (GET_CODE (src) == PLUS)
8095 rtx op0 = XEXP (src, 0);
8096 rtx op1 = XEXP (src, 1);
8098 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
8100 if (INTVAL (op1) < 0)
8102 gcc_assert (!frame_pointer_needed);
8104 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
8107 ia64_dwarf2out_def_steady_cfa (insn);
8110 process_epilogue (asm_out_file, insn, unwind, frame);
8114 gcc_assert (GET_CODE (src) == REG
8115 && REGNO (src) == HARD_FRAME_POINTER_REGNUM);
8116 process_epilogue (asm_out_file, insn, unwind, frame);
8122 /* Register move we need to look at. */
8123 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
8125 src_regno = REGNO (src);
8126 dest_regno = REGNO (dest);
8131 /* Saving return address pointer. */
8132 gcc_assert (dest_regno == current_frame_info.reg_save_b0);
8134 fprintf (asm_out_file, "\t.save rp, r%d\n",
8135 ia64_dbx_register_number (dest_regno));
8139 gcc_assert (dest_regno == current_frame_info.reg_save_pr);
8141 fprintf (asm_out_file, "\t.save pr, r%d\n",
8142 ia64_dbx_register_number (dest_regno));
8145 case AR_UNAT_REGNUM:
8146 gcc_assert (dest_regno == current_frame_info.reg_save_ar_unat);
8148 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
8149 ia64_dbx_register_number (dest_regno));
8153 gcc_assert (dest_regno == current_frame_info.reg_save_ar_lc);
8155 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
8156 ia64_dbx_register_number (dest_regno));
8159 case STACK_POINTER_REGNUM:
8160 gcc_assert (dest_regno == HARD_FRAME_POINTER_REGNUM
8161 && frame_pointer_needed);
8163 fprintf (asm_out_file, "\t.vframe r%d\n",
8164 ia64_dbx_register_number (dest_regno));
8166 ia64_dwarf2out_def_steady_cfa (insn);
8170 /* Everything else should indicate being stored to memory. */
8175 /* Memory store we need to look at. */
8176 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
8182 if (GET_CODE (XEXP (dest, 0)) == REG)
8184 base = XEXP (dest, 0);
8189 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
8190 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
8191 base = XEXP (XEXP (dest, 0), 0);
8192 off = INTVAL (XEXP (XEXP (dest, 0), 1));
8195 if (base == hard_frame_pointer_rtx)
8197 saveop = ".savepsp";
8202 gcc_assert (base == stack_pointer_rtx);
8206 src_regno = REGNO (src);
8210 gcc_assert (!current_frame_info.reg_save_b0);
8212 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
8216 gcc_assert (!current_frame_info.reg_save_pr);
8218 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
8222 gcc_assert (!current_frame_info.reg_save_ar_lc);
8224 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
8228 gcc_assert (!current_frame_info.reg_save_ar_pfs);
8230 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
8233 case AR_UNAT_REGNUM:
8234 gcc_assert (!current_frame_info.reg_save_ar_unat);
8236 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
8244 fprintf (asm_out_file, "\t.save.g 0x%x\n",
8245 1 << (src_regno - GR_REG (4)));
8254 fprintf (asm_out_file, "\t.save.b 0x%x\n",
8255 1 << (src_regno - BR_REG (1)));
8263 fprintf (asm_out_file, "\t.save.f 0x%x\n",
8264 1 << (src_regno - FR_REG (2)));
8267 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
8268 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
8269 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
8270 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
8272 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
8273 1 << (src_regno - FR_REG (12)));
8285 /* This function looks at a single insn and emits any directives
8286 required to unwind this insn. */
8288 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
8290 bool unwind = (flag_unwind_tables
8291 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS));
8292 bool frame = dwarf2out_do_frame ();
8294 if (unwind || frame)
8298 if (GET_CODE (insn) == NOTE
8299 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
8301 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
8303 /* Restore unwind state from immediately before the epilogue. */
8304 if (need_copy_state)
8308 fprintf (asm_out_file, "\t.body\n");
8309 fprintf (asm_out_file, "\t.copy_state %d\n",
8310 cfun->machine->state_num);
8312 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
8313 ia64_dwarf2out_def_steady_cfa (insn);
8314 need_copy_state = false;
8318 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
8321 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
8323 pat = XEXP (pat, 0);
8325 pat = PATTERN (insn);
8327 switch (GET_CODE (pat))
8330 process_set (asm_out_file, pat, insn, unwind, frame);
8336 int limit = XVECLEN (pat, 0);
8337 for (par_index = 0; par_index < limit; par_index++)
8339 rtx x = XVECEXP (pat, 0, par_index);
8340 if (GET_CODE (x) == SET)
8341 process_set (asm_out_file, x, insn, unwind, frame);
8356 IA64_BUILTIN_FLUSHRS
8360 ia64_init_builtins (void)
8365 /* The __fpreg type. */
8366 fpreg_type = make_node (REAL_TYPE);
8367 TYPE_PRECISION (fpreg_type) = 82;
8368 layout_type (fpreg_type);
8369 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
8371 /* The __float80 type. */
8372 float80_type = make_node (REAL_TYPE);
8373 TYPE_PRECISION (float80_type) = 80;
8374 layout_type (float80_type);
8375 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
8377 /* The __float128 type. */
8380 tree float128_type = make_node (REAL_TYPE);
8381 TYPE_PRECISION (float128_type) = 128;
8382 layout_type (float128_type);
8383 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
8386 /* Under HPUX, this is a synonym for "long double". */
8387 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
8390 #define def_builtin(name, type, code) \
8391 lang_hooks.builtin_function ((name), (type), (code), BUILT_IN_MD, \
8394 def_builtin ("__builtin_ia64_bsp",
8395 build_function_type (ptr_type_node, void_list_node),
8398 def_builtin ("__builtin_ia64_flushrs",
8399 build_function_type (void_type_node, void_list_node),
8400 IA64_BUILTIN_FLUSHRS);
8406 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
8407 enum machine_mode mode ATTRIBUTE_UNUSED,
8408 int ignore ATTRIBUTE_UNUSED)
8410 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
8411 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8415 case IA64_BUILTIN_BSP:
8416 if (! target || ! register_operand (target, DImode))
8417 target = gen_reg_rtx (DImode);
8418 emit_insn (gen_bsp_value (target));
8419 #ifdef POINTERS_EXTEND_UNSIGNED
8420 target = convert_memory_address (ptr_mode, target);
8424 case IA64_BUILTIN_FLUSHRS:
8425 emit_insn (gen_flushrs ());
8435 /* For the HP-UX IA64 aggregate parameters are passed stored in the
8436 most significant bits of the stack slot. */
8439 ia64_hpux_function_arg_padding (enum machine_mode mode, tree type)
8441 /* Exception to normal case for structures/unions/etc. */
8443 if (type && AGGREGATE_TYPE_P (type)
8444 && int_size_in_bytes (type) < UNITS_PER_WORD)
8447 /* Fall back to the default. */
8448 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
8451 /* Linked list of all external functions that are to be emitted by GCC.
8452 We output the name if and only if TREE_SYMBOL_REFERENCED is set in
8453 order to avoid putting out names that are never really used. */
8455 struct extern_func_list GTY(())
8457 struct extern_func_list *next;
8461 static GTY(()) struct extern_func_list *extern_func_head;
8464 ia64_hpux_add_extern_decl (tree decl)
8466 struct extern_func_list *p = ggc_alloc (sizeof (struct extern_func_list));
8469 p->next = extern_func_head;
8470 extern_func_head = p;
8473 /* Print out the list of used global functions. */
8476 ia64_hpux_file_end (void)
8478 struct extern_func_list *p;
8480 for (p = extern_func_head; p; p = p->next)
8482 tree decl = p->decl;
8483 tree id = DECL_ASSEMBLER_NAME (decl);
8487 if (!TREE_ASM_WRITTEN (decl) && TREE_SYMBOL_REFERENCED (id))
8489 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
8491 TREE_ASM_WRITTEN (decl) = 1;
8492 (*targetm.asm_out.globalize_label) (asm_out_file, name);
8493 fputs (TYPE_ASM_OP, asm_out_file);
8494 assemble_name (asm_out_file, name);
8495 fprintf (asm_out_file, "," TYPE_OPERAND_FMT "\n", "function");
8499 extern_func_head = 0;
8502 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
8503 modes of word_mode and larger. Rename the TFmode libfuncs using the
8504 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
8505 backward compatibility. */
8508 ia64_init_libfuncs (void)
8510 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
8511 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
8512 set_optab_libfunc (smod_optab, SImode, "__modsi3");
8513 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
8515 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
8516 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
8517 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
8518 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
8519 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
8521 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
8522 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
8523 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
8524 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
8525 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
8526 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
8528 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
8529 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
8530 set_conv_libfunc (sfix_optab, TImode, TFmode, "_U_Qfcnvfxt_quad_to_quad");
8531 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
8532 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
8534 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
8535 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
8536 set_conv_libfunc (sfloat_optab, TFmode, TImode, "_U_Qfcnvxf_quad_to_quad");
8537 /* HP-UX 11.23 libc does not have a function for unsigned
8538 SImode-to-TFmode conversion. */
8539 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
8542 /* Rename all the TFmode libfuncs using the HPUX conventions. */
8545 ia64_hpux_init_libfuncs (void)
8547 ia64_init_libfuncs ();
8549 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
8550 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
8551 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
8553 /* ia64_expand_compare uses this. */
8554 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
8556 /* These should never be used. */
8557 set_optab_libfunc (eq_optab, TFmode, 0);
8558 set_optab_libfunc (ne_optab, TFmode, 0);
8559 set_optab_libfunc (gt_optab, TFmode, 0);
8560 set_optab_libfunc (ge_optab, TFmode, 0);
8561 set_optab_libfunc (lt_optab, TFmode, 0);
8562 set_optab_libfunc (le_optab, TFmode, 0);
8565 /* Rename the division and modulus functions in VMS. */
8568 ia64_vms_init_libfuncs (void)
8570 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
8571 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
8572 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
8573 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
8574 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
8575 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
8576 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
8577 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
8580 /* Rename the TFmode libfuncs available from soft-fp in glibc using
8581 the HPUX conventions. */
8584 ia64_sysv4_init_libfuncs (void)
8586 ia64_init_libfuncs ();
8588 /* These functions are not part of the HPUX TFmode interface. We
8589 use them instead of _U_Qfcmp, which doesn't work the way we
8591 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
8592 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
8593 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
8594 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
8595 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
8596 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
8598 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
8599 glibc doesn't have them. */
8602 /* Return the section to use for X. The only special thing we do here
8603 is to honor small data. */
8606 ia64_select_rtx_section (enum machine_mode mode, rtx x,
8607 unsigned HOST_WIDE_INT align)
8609 if (GET_MODE_SIZE (mode) > 0
8610 && GET_MODE_SIZE (mode) <= ia64_section_threshold)
8611 return sdata_section;
8613 return default_elf_select_rtx_section (mode, x, align);
8616 /* It is illegal to have relocations in shared segments on AIX and HPUX.
8617 Pretend flag_pic is always set. */
8620 ia64_rwreloc_select_section (tree exp, int reloc, unsigned HOST_WIDE_INT align)
8622 return default_elf_select_section_1 (exp, reloc, align, true);
8626 ia64_rwreloc_unique_section (tree decl, int reloc)
8628 default_unique_section_1 (decl, reloc, true);
8632 ia64_rwreloc_select_rtx_section (enum machine_mode mode, rtx x,
8633 unsigned HOST_WIDE_INT align)
8636 int save_pic = flag_pic;
8638 sect = ia64_select_rtx_section (mode, x, align);
8639 flag_pic = save_pic;
8643 #ifndef TARGET_RWRELOC
8644 #define TARGET_RWRELOC flag_pic
8648 ia64_section_type_flags (tree decl, const char *name, int reloc)
8650 unsigned int flags = 0;
8652 if (strcmp (name, ".sdata") == 0
8653 || strncmp (name, ".sdata.", 7) == 0
8654 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
8655 || strncmp (name, ".sdata2.", 8) == 0
8656 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
8657 || strcmp (name, ".sbss") == 0
8658 || strncmp (name, ".sbss.", 6) == 0
8659 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
8660 flags = SECTION_SMALL;
8662 flags |= default_section_type_flags_1 (decl, name, reloc, TARGET_RWRELOC);
8666 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
8667 structure type and that the address of that type should be passed
8668 in out0, rather than in r8. */
8671 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
8673 tree ret_type = TREE_TYPE (fntype);
8675 /* The Itanium C++ ABI requires that out0, rather than r8, be used
8676 as the structure return address parameter, if the return value
8677 type has a non-trivial copy constructor or destructor. It is not
8678 clear if this same convention should be used for other
8679 programming languages. Until G++ 3.4, we incorrectly used r8 for
8680 these return values. */
8681 return (abi_version_at_least (2)
8683 && TYPE_MODE (ret_type) == BLKmode
8684 && TREE_ADDRESSABLE (ret_type)
8685 && strcmp (lang_hooks.name, "GNU C++") == 0);
8688 /* Output the assembler code for a thunk function. THUNK_DECL is the
8689 declaration for the thunk function itself, FUNCTION is the decl for
8690 the target function. DELTA is an immediate constant offset to be
8691 added to THIS. If VCALL_OFFSET is nonzero, the word at
8692 *(*this + vcall_offset) should be added to THIS. */
8695 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
8696 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8699 rtx this, insn, funexp;
8700 unsigned int this_parmno;
8701 unsigned int this_regno;
8703 reload_completed = 1;
8704 epilogue_completed = 1;
8706 reset_block_changes ();
8708 /* Set things up as ia64_expand_prologue might. */
8709 last_scratch_gr_reg = 15;
8711 memset (¤t_frame_info, 0, sizeof (current_frame_info));
8712 current_frame_info.spill_cfa_off = -16;
8713 current_frame_info.n_input_regs = 1;
8714 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
8716 /* Mark the end of the (empty) prologue. */
8717 emit_note (NOTE_INSN_PROLOGUE_END);
8719 /* Figure out whether "this" will be the first parameter (the
8720 typical case) or the second parameter (as happens when the
8721 virtual function returns certain class objects). */
8723 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
8725 this_regno = IN_REG (this_parmno);
8726 if (!TARGET_REG_NAMES)
8727 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
8729 this = gen_rtx_REG (Pmode, this_regno);
8732 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
8733 REG_POINTER (tmp) = 1;
8734 if (delta && CONST_OK_FOR_I (delta))
8736 emit_insn (gen_ptr_extend_plus_imm (this, tmp, GEN_INT (delta)));
8740 emit_insn (gen_ptr_extend (this, tmp));
8743 /* Apply the constant offset, if required. */
8746 rtx delta_rtx = GEN_INT (delta);
8748 if (!CONST_OK_FOR_I (delta))
8750 rtx tmp = gen_rtx_REG (Pmode, 2);
8751 emit_move_insn (tmp, delta_rtx);
8754 emit_insn (gen_adddi3 (this, this, delta_rtx));
8757 /* Apply the offset from the vtable, if required. */
8760 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8761 rtx tmp = gen_rtx_REG (Pmode, 2);
8765 rtx t = gen_rtx_REG (ptr_mode, 2);
8766 REG_POINTER (t) = 1;
8767 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this));
8768 if (CONST_OK_FOR_I (vcall_offset))
8770 emit_insn (gen_ptr_extend_plus_imm (tmp, t,
8775 emit_insn (gen_ptr_extend (tmp, t));
8778 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8782 if (!CONST_OK_FOR_J (vcall_offset))
8784 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
8785 emit_move_insn (tmp2, vcall_offset_rtx);
8786 vcall_offset_rtx = tmp2;
8788 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
8792 emit_move_insn (gen_rtx_REG (ptr_mode, 2),
8793 gen_rtx_MEM (ptr_mode, tmp));
8795 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
8797 emit_insn (gen_adddi3 (this, this, tmp));
8800 /* Generate a tail call to the target function. */
8801 if (! TREE_USED (function))
8803 assemble_external (function);
8804 TREE_USED (function) = 1;
8806 funexp = XEXP (DECL_RTL (function), 0);
8807 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8808 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
8809 insn = get_last_insn ();
8810 SIBLING_CALL_P (insn) = 1;
8812 /* Code generation for calls relies on splitting. */
8813 reload_completed = 1;
8814 epilogue_completed = 1;
8815 try_split (PATTERN (insn), insn, 0);
8819 /* Run just enough of rest_of_compilation to get the insns emitted.
8820 There's not really enough bulk here to make other passes such as
8821 instruction scheduling worth while. Note that use_thunk calls
8822 assemble_start_function and assemble_end_function. */
8824 insn_locators_initialize ();
8825 emit_all_insn_group_barriers (NULL);
8826 insn = get_insns ();
8827 shorten_branches (insn);
8828 final_start_function (insn, file, 1);
8829 final (insn, file, 1);
8830 final_end_function ();
8832 reload_completed = 0;
8833 epilogue_completed = 0;
8837 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
8840 ia64_struct_value_rtx (tree fntype,
8841 int incoming ATTRIBUTE_UNUSED)
8843 if (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype))
8845 return gen_rtx_REG (Pmode, GR_REG (8));
8849 ia64_scalar_mode_supported_p (enum machine_mode mode)
8875 ia64_vector_mode_supported_p (enum machine_mode mode)
8892 /* Implement the FUNCTION_PROFILER macro. */
8895 ia64_output_function_profiler (FILE *file, int labelno)
8899 /* If the function needs a static chain and the static chain
8900 register is r15, we use an indirect call so as to bypass
8901 the PLT stub in case the executable is dynamically linked,
8902 because the stub clobbers r15 as per 5.3.6 of the psABI.
8903 We don't need to do that in non canonical PIC mode. */
8905 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
8907 gcc_assert (STATIC_CHAIN_REGNUM == 15);
8908 indirect_call = true;
8911 indirect_call = false;
8914 fputs ("\t.prologue 4, r40\n", file);
8916 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
8917 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
8919 if (NO_PROFILE_COUNTERS)
8920 fputs ("\tmov out3 = r0\n", file);
8924 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
8926 if (TARGET_AUTO_PIC)
8927 fputs ("\tmovl out3 = @gprel(", file);
8929 fputs ("\taddl out3 = @ltoff(", file);
8930 assemble_name (file, buf);
8931 if (TARGET_AUTO_PIC)
8932 fputs (")\n", file);
8934 fputs ("), r1\n", file);
8938 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
8939 fputs ("\t;;\n", file);
8941 fputs ("\t.save rp, r42\n", file);
8942 fputs ("\tmov out2 = b0\n", file);
8944 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
8945 fputs ("\t.body\n", file);
8946 fputs ("\tmov out1 = r1\n", file);
8949 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
8950 fputs ("\tmov b6 = r16\n", file);
8951 fputs ("\tld8 r1 = [r14]\n", file);
8952 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
8955 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
8958 static GTY(()) rtx mcount_func_rtx;
8960 gen_mcount_func_rtx (void)
8962 if (!mcount_func_rtx)
8963 mcount_func_rtx = init_one_libfunc ("_mcount");
8964 return mcount_func_rtx;
8968 ia64_profile_hook (int labelno)
8972 if (NO_PROFILE_COUNTERS)
8977 const char *label_name;
8978 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
8979 label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
8980 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
8981 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
8983 ip = gen_reg_rtx (Pmode);
8984 emit_insn (gen_ip_value (ip));
8985 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
8987 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
8992 /* Return the mangling of TYPE if it is an extended fundamental type. */
8995 ia64_mangle_fundamental_type (tree type)
8997 /* On HP-UX, "long double" is mangled as "e" so __float128 is
8999 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
9001 /* On HP-UX, "e" is not available as a mangling of __float80 so use
9002 an extended mangling. Elsewhere, "e" is available since long
9003 double is 80 bits. */
9004 if (TYPE_MODE (type) == XFmode)
9005 return TARGET_HPUX ? "u9__float80" : "e";
9006 if (TYPE_MODE (type) == RFmode)
9011 /* Return the diagnostic message string if conversion from FROMTYPE to
9012 TOTYPE is not allowed, NULL otherwise. */
9014 ia64_invalid_conversion (tree fromtype, tree totype)
9016 /* Reject nontrivial conversion to or from __fpreg. */
9017 if (TYPE_MODE (fromtype) == RFmode
9018 && TYPE_MODE (totype) != RFmode
9019 && TYPE_MODE (totype) != VOIDmode)
9020 return N_("invalid conversion from %<__fpreg%>");
9021 if (TYPE_MODE (totype) == RFmode
9022 && TYPE_MODE (fromtype) != RFmode)
9023 return N_("invalid conversion to %<__fpreg%>");
9027 /* Return the diagnostic message string if the unary operation OP is
9028 not permitted on TYPE, NULL otherwise. */
9030 ia64_invalid_unary_op (int op, tree type)
9032 /* Reject operations on __fpreg other than unary + or &. */
9033 if (TYPE_MODE (type) == RFmode
9034 && op != CONVERT_EXPR
9036 return N_("invalid operation on %<__fpreg%>");
9040 /* Return the diagnostic message string if the binary operation OP is
9041 not permitted on TYPE1 and TYPE2, NULL otherwise. */
9043 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, tree type1, tree type2)
9045 /* Reject operations on __fpreg. */
9046 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
9047 return N_("invalid operation on %<__fpreg%>");
9051 #include "gt-ia64.h"