1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to
21 the Free Software Foundation, 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
44 #include "basic-block.h"
46 #include "sched-int.h"
49 #include "target-def.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
56 /* This is used for communication between ASM_OUTPUT_LABEL and
57 ASM_OUTPUT_LABELREF. */
58 int ia64_asm_output_label = 0;
60 /* Define the information needed to generate branch and scc insns. This is
61 stored from the compare operation. */
62 struct rtx_def * ia64_compare_op0;
63 struct rtx_def * ia64_compare_op1;
65 /* Register names for ia64_expand_prologue. */
66 static const char * const ia64_reg_numbers[96] =
67 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
68 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
69 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
70 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
71 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
72 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
73 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
74 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
75 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
76 "r104","r105","r106","r107","r108","r109","r110","r111",
77 "r112","r113","r114","r115","r116","r117","r118","r119",
78 "r120","r121","r122","r123","r124","r125","r126","r127"};
80 /* ??? These strings could be shared with REGISTER_NAMES. */
81 static const char * const ia64_input_reg_names[8] =
82 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
84 /* ??? These strings could be shared with REGISTER_NAMES. */
85 static const char * const ia64_local_reg_names[80] =
86 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
87 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
88 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
89 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
90 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
91 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
92 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
93 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
94 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
95 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
97 /* ??? These strings could be shared with REGISTER_NAMES. */
98 static const char * const ia64_output_reg_names[8] =
99 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
101 /* Which cpu are we scheduling for. */
102 enum processor_type ia64_tune = PROCESSOR_ITANIUM2;
104 /* Determines whether we run our final scheduling pass or not. We always
105 avoid the normal second scheduling pass. */
106 static int ia64_flag_schedule_insns2;
108 /* Determines whether we run variable tracking in machine dependent
110 static int ia64_flag_var_tracking;
112 /* Variables which are this size or smaller are put in the sdata/sbss
115 unsigned int ia64_section_threshold;
117 /* The following variable is used by the DFA insn scheduler. The value is
118 TRUE if we do insn bundling instead of insn scheduling. */
121 /* Structure to be filled in by ia64_compute_frame_size with register
122 save masks and offsets for the current function. */
124 struct ia64_frame_info
126 HOST_WIDE_INT total_size; /* size of the stack frame, not including
127 the caller's scratch area. */
128 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
129 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
130 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
131 HARD_REG_SET mask; /* mask of saved registers. */
132 unsigned int gr_used_mask; /* mask of registers in use as gr spill
133 registers or long-term scratches. */
134 int n_spilled; /* number of spilled registers. */
135 int reg_fp; /* register for fp. */
136 int reg_save_b0; /* save register for b0. */
137 int reg_save_pr; /* save register for prs. */
138 int reg_save_ar_pfs; /* save register for ar.pfs. */
139 int reg_save_ar_unat; /* save register for ar.unat. */
140 int reg_save_ar_lc; /* save register for ar.lc. */
141 int reg_save_gp; /* save register for gp. */
142 int n_input_regs; /* number of input registers used. */
143 int n_local_regs; /* number of local registers used. */
144 int n_output_regs; /* number of output registers used. */
145 int n_rotate_regs; /* number of rotating registers used. */
147 char need_regstk; /* true if a .regstk directive needed. */
148 char initialized; /* true if the data is finalized. */
151 /* Current frame information calculated by ia64_compute_frame_size. */
152 static struct ia64_frame_info current_frame_info;
154 static int ia64_first_cycle_multipass_dfa_lookahead (void);
155 static void ia64_dependencies_evaluation_hook (rtx, rtx);
156 static void ia64_init_dfa_pre_cycle_insn (void);
157 static rtx ia64_dfa_pre_cycle_insn (void);
158 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
159 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
160 static rtx gen_tls_get_addr (void);
161 static rtx gen_thread_pointer (void);
162 static int find_gr_spill (int);
163 static int next_scratch_gr_reg (void);
164 static void mark_reg_gr_used_mask (rtx, void *);
165 static void ia64_compute_frame_size (HOST_WIDE_INT);
166 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
167 static void finish_spill_pointers (void);
168 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
169 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
170 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
171 static rtx gen_movdi_x (rtx, rtx, rtx);
172 static rtx gen_fr_spill_x (rtx, rtx, rtx);
173 static rtx gen_fr_restore_x (rtx, rtx, rtx);
175 static enum machine_mode hfa_element_mode (tree, bool);
176 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
178 static bool ia64_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
180 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
182 static bool ia64_function_ok_for_sibcall (tree, tree);
183 static bool ia64_return_in_memory (tree, tree);
184 static bool ia64_rtx_costs (rtx, int, int, int *);
185 static void fix_range (const char *);
186 static bool ia64_handle_option (size_t, const char *, int);
187 static struct machine_function * ia64_init_machine_status (void);
188 static void emit_insn_group_barriers (FILE *);
189 static void emit_all_insn_group_barriers (FILE *);
190 static void final_emit_insn_group_barriers (FILE *);
191 static void emit_predicate_relation_info (void);
192 static void ia64_reorg (void);
193 static bool ia64_in_small_data_p (tree);
194 static void process_epilogue (void);
195 static int process_set (FILE *, rtx);
197 static bool ia64_assemble_integer (rtx, unsigned int, int);
198 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
199 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
200 static void ia64_output_function_end_prologue (FILE *);
202 static int ia64_issue_rate (void);
203 static int ia64_adjust_cost (rtx, rtx, rtx, int);
204 static void ia64_sched_init (FILE *, int, int);
205 static void ia64_sched_finish (FILE *, int);
206 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
207 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
208 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
209 static int ia64_variable_issue (FILE *, int, rtx, int);
211 static struct bundle_state *get_free_bundle_state (void);
212 static void free_bundle_state (struct bundle_state *);
213 static void initiate_bundle_states (void);
214 static void finish_bundle_states (void);
215 static unsigned bundle_state_hash (const void *);
216 static int bundle_state_eq_p (const void *, const void *);
217 static int insert_bundle_state (struct bundle_state *);
218 static void initiate_bundle_state_table (void);
219 static void finish_bundle_state_table (void);
220 static int try_issue_nops (struct bundle_state *, int);
221 static int try_issue_insn (struct bundle_state *, rtx);
222 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
223 static int get_max_pos (state_t);
224 static int get_template (state_t, int);
226 static rtx get_next_important_insn (rtx, rtx);
227 static void bundling (FILE *, int, rtx, rtx);
229 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
230 HOST_WIDE_INT, tree);
231 static void ia64_file_start (void);
233 static void ia64_select_rtx_section (enum machine_mode, rtx,
234 unsigned HOST_WIDE_INT);
235 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
237 static void ia64_rwreloc_select_section (tree, int, unsigned HOST_WIDE_INT)
239 static void ia64_rwreloc_unique_section (tree, int)
241 static void ia64_rwreloc_select_rtx_section (enum machine_mode, rtx,
242 unsigned HOST_WIDE_INT)
244 static unsigned int ia64_section_type_flags (tree, const char *, int);
245 static void ia64_hpux_add_extern_decl (tree decl)
247 static void ia64_hpux_file_end (void)
249 static void ia64_init_libfuncs (void)
251 static void ia64_hpux_init_libfuncs (void)
253 static void ia64_sysv4_init_libfuncs (void)
255 static void ia64_vms_init_libfuncs (void)
258 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
259 static void ia64_encode_section_info (tree, rtx, int);
260 static rtx ia64_struct_value_rtx (tree, int);
261 static tree ia64_gimplify_va_arg (tree, tree, tree *, tree *);
262 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
263 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
264 static bool ia64_cannot_force_const_mem (rtx);
266 /* Table of valid machine attributes. */
267 static const struct attribute_spec ia64_attribute_table[] =
269 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
270 { "syscall_linkage", 0, 0, false, true, true, NULL },
271 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
272 { NULL, 0, 0, false, false, false, NULL }
275 /* Initialize the GCC target structure. */
276 #undef TARGET_ATTRIBUTE_TABLE
277 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
279 #undef TARGET_INIT_BUILTINS
280 #define TARGET_INIT_BUILTINS ia64_init_builtins
282 #undef TARGET_EXPAND_BUILTIN
283 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
285 #undef TARGET_ASM_BYTE_OP
286 #define TARGET_ASM_BYTE_OP "\tdata1\t"
287 #undef TARGET_ASM_ALIGNED_HI_OP
288 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
289 #undef TARGET_ASM_ALIGNED_SI_OP
290 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
291 #undef TARGET_ASM_ALIGNED_DI_OP
292 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
293 #undef TARGET_ASM_UNALIGNED_HI_OP
294 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
295 #undef TARGET_ASM_UNALIGNED_SI_OP
296 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
297 #undef TARGET_ASM_UNALIGNED_DI_OP
298 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
299 #undef TARGET_ASM_INTEGER
300 #define TARGET_ASM_INTEGER ia64_assemble_integer
302 #undef TARGET_ASM_FUNCTION_PROLOGUE
303 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
304 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
305 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
306 #undef TARGET_ASM_FUNCTION_EPILOGUE
307 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
309 #undef TARGET_IN_SMALL_DATA_P
310 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
312 #undef TARGET_SCHED_ADJUST_COST
313 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
314 #undef TARGET_SCHED_ISSUE_RATE
315 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
316 #undef TARGET_SCHED_VARIABLE_ISSUE
317 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
318 #undef TARGET_SCHED_INIT
319 #define TARGET_SCHED_INIT ia64_sched_init
320 #undef TARGET_SCHED_FINISH
321 #define TARGET_SCHED_FINISH ia64_sched_finish
322 #undef TARGET_SCHED_REORDER
323 #define TARGET_SCHED_REORDER ia64_sched_reorder
324 #undef TARGET_SCHED_REORDER2
325 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
327 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
328 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
330 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
331 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
333 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
334 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
335 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
336 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
338 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
339 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
340 ia64_first_cycle_multipass_dfa_lookahead_guard
342 #undef TARGET_SCHED_DFA_NEW_CYCLE
343 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
345 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
346 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
347 #undef TARGET_PASS_BY_REFERENCE
348 #define TARGET_PASS_BY_REFERENCE ia64_pass_by_reference
349 #undef TARGET_ARG_PARTIAL_BYTES
350 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
352 #undef TARGET_ASM_OUTPUT_MI_THUNK
353 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
354 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
355 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
357 #undef TARGET_ASM_FILE_START
358 #define TARGET_ASM_FILE_START ia64_file_start
360 #undef TARGET_RTX_COSTS
361 #define TARGET_RTX_COSTS ia64_rtx_costs
362 #undef TARGET_ADDRESS_COST
363 #define TARGET_ADDRESS_COST hook_int_rtx_0
365 #undef TARGET_MACHINE_DEPENDENT_REORG
366 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
368 #undef TARGET_ENCODE_SECTION_INFO
369 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
371 #undef TARGET_SECTION_TYPE_FLAGS
372 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
375 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
376 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
379 /* ??? ABI doesn't allow us to define this. */
381 #undef TARGET_PROMOTE_FUNCTION_ARGS
382 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
385 /* ??? ABI doesn't allow us to define this. */
387 #undef TARGET_PROMOTE_FUNCTION_RETURN
388 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
391 /* ??? Investigate. */
393 #undef TARGET_PROMOTE_PROTOTYPES
394 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
397 #undef TARGET_STRUCT_VALUE_RTX
398 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
399 #undef TARGET_RETURN_IN_MEMORY
400 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
401 #undef TARGET_SETUP_INCOMING_VARARGS
402 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
403 #undef TARGET_STRICT_ARGUMENT_NAMING
404 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
405 #undef TARGET_MUST_PASS_IN_STACK
406 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
408 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
409 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
411 #undef TARGET_UNWIND_EMIT
412 #define TARGET_UNWIND_EMIT process_for_unwind_directive
414 #undef TARGET_SCALAR_MODE_SUPPORTED_P
415 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
416 #undef TARGET_VECTOR_MODE_SUPPORTED_P
417 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
419 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
420 in an order different from the specified program order. */
421 #undef TARGET_RELAXED_ORDERING
422 #define TARGET_RELAXED_ORDERING true
424 #undef TARGET_DEFAULT_TARGET_FLAGS
425 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
426 #undef TARGET_HANDLE_OPTION
427 #define TARGET_HANDLE_OPTION ia64_handle_option
429 #undef TARGET_CANNOT_FORCE_CONST_MEM
430 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
432 struct gcc_target targetm = TARGET_INITIALIZER;
436 ADDR_AREA_NORMAL, /* normal address area */
437 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
441 static GTY(()) tree small_ident1;
442 static GTY(()) tree small_ident2;
447 if (small_ident1 == 0)
449 small_ident1 = get_identifier ("small");
450 small_ident2 = get_identifier ("__small__");
454 /* Retrieve the address area that has been chosen for the given decl. */
456 static ia64_addr_area
457 ia64_get_addr_area (tree decl)
461 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
467 id = TREE_VALUE (TREE_VALUE (model_attr));
468 if (id == small_ident1 || id == small_ident2)
469 return ADDR_AREA_SMALL;
471 return ADDR_AREA_NORMAL;
475 ia64_handle_model_attribute (tree *node, tree name, tree args,
476 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
478 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
480 tree arg, decl = *node;
483 arg = TREE_VALUE (args);
484 if (arg == small_ident1 || arg == small_ident2)
486 addr_area = ADDR_AREA_SMALL;
490 warning (OPT_Wattributes, "invalid argument of %qs attribute",
491 IDENTIFIER_POINTER (name));
492 *no_add_attrs = true;
495 switch (TREE_CODE (decl))
498 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
500 && !TREE_STATIC (decl))
502 error ("%Jan address area attribute cannot be specified for "
503 "local variables", decl, decl);
504 *no_add_attrs = true;
506 area = ia64_get_addr_area (decl);
507 if (area != ADDR_AREA_NORMAL && addr_area != area)
509 error ("%Jaddress area of '%s' conflicts with previous "
510 "declaration", decl, decl);
511 *no_add_attrs = true;
516 error ("%Jaddress area attribute cannot be specified for functions",
518 *no_add_attrs = true;
522 warning (OPT_Wattributes, "%qs attribute ignored",
523 IDENTIFIER_POINTER (name));
524 *no_add_attrs = true;
532 ia64_encode_addr_area (tree decl, rtx symbol)
536 flags = SYMBOL_REF_FLAGS (symbol);
537 switch (ia64_get_addr_area (decl))
539 case ADDR_AREA_NORMAL: break;
540 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
541 default: gcc_unreachable ();
543 SYMBOL_REF_FLAGS (symbol) = flags;
547 ia64_encode_section_info (tree decl, rtx rtl, int first)
549 default_encode_section_info (decl, rtl, first);
551 /* Careful not to prod global register variables. */
552 if (TREE_CODE (decl) == VAR_DECL
553 && GET_CODE (DECL_RTL (decl)) == MEM
554 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
555 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
556 ia64_encode_addr_area (decl, XEXP (rtl, 0));
559 /* Implement CONST_OK_FOR_LETTER_P. */
562 ia64_const_ok_for_letter_p (HOST_WIDE_INT value, char c)
567 return CONST_OK_FOR_I (value);
569 return CONST_OK_FOR_J (value);
571 return CONST_OK_FOR_K (value);
573 return CONST_OK_FOR_L (value);
575 return CONST_OK_FOR_M (value);
577 return CONST_OK_FOR_N (value);
579 return CONST_OK_FOR_O (value);
581 return CONST_OK_FOR_P (value);
587 /* Implement CONST_DOUBLE_OK_FOR_LETTER_P. */
590 ia64_const_double_ok_for_letter_p (rtx value, char c)
595 return CONST_DOUBLE_OK_FOR_G (value);
601 /* Implement EXTRA_CONSTRAINT. */
604 ia64_extra_constraint (rtx value, char c)
609 /* Non-volatile memory for FP_REG loads/stores. */
610 return memory_operand(value, VOIDmode) && !MEM_VOLATILE_P (value);
613 /* 1..4 for shladd arguments. */
614 return (GET_CODE (value) == CONST_INT
615 && INTVAL (value) >= 1 && INTVAL (value) <= 4);
618 /* Non-post-inc memory for asms and other unsavory creatures. */
619 return (GET_CODE (value) == MEM
620 && GET_RTX_CLASS (GET_CODE (XEXP (value, 0))) != RTX_AUTOINC
621 && (reload_in_progress || memory_operand (value, VOIDmode)));
624 /* Symbol ref to small-address-area. */
625 return small_addr_symbolic_operand (value, VOIDmode);
629 return value == CONST0_RTX (GET_MODE (value));
632 /* An integer vector, such that conversion to an integer yields a
633 value appropriate for an integer 'J' constraint. */
634 if (GET_CODE (value) == CONST_VECTOR
635 && GET_MODE_CLASS (GET_MODE (value)) == MODE_VECTOR_INT)
637 value = simplify_subreg (DImode, value, GET_MODE (value), 0);
638 return ia64_const_ok_for_letter_p (INTVAL (value), 'J');
643 /* A V2SF vector containing elements that satisfy 'G'. */
645 (GET_CODE (value) == CONST_VECTOR
646 && GET_MODE (value) == V2SFmode
647 && ia64_const_double_ok_for_letter_p (XVECEXP (value, 0, 0), 'G')
648 && ia64_const_double_ok_for_letter_p (XVECEXP (value, 0, 1), 'G'));
655 /* Return 1 if the operands of a move are ok. */
658 ia64_move_ok (rtx dst, rtx src)
660 /* If we're under init_recog_no_volatile, we'll not be able to use
661 memory_operand. So check the code directly and don't worry about
662 the validity of the underlying address, which should have been
663 checked elsewhere anyway. */
664 if (GET_CODE (dst) != MEM)
666 if (GET_CODE (src) == MEM)
668 if (register_operand (src, VOIDmode))
671 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
672 if (INTEGRAL_MODE_P (GET_MODE (dst)))
673 return src == const0_rtx;
675 return GET_CODE (src) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (src);
679 addp4_optimize_ok (rtx op1, rtx op2)
681 return (basereg_operand (op1, GET_MODE(op1)) !=
682 basereg_operand (op2, GET_MODE(op2)));
685 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
686 Return the length of the field, or <= 0 on failure. */
689 ia64_depz_field_mask (rtx rop, rtx rshift)
691 unsigned HOST_WIDE_INT op = INTVAL (rop);
692 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
694 /* Get rid of the zero bits we're shifting in. */
697 /* We must now have a solid block of 1's at bit 0. */
698 return exact_log2 (op + 1);
701 /* Return the TLS model to use for ADDR. */
703 static enum tls_model
704 tls_symbolic_operand_type (rtx addr)
706 enum tls_model tls_kind = 0;
708 if (GET_CODE (addr) == CONST)
710 if (GET_CODE (XEXP (addr, 0)) == PLUS
711 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
712 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
714 else if (GET_CODE (addr) == SYMBOL_REF)
715 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
720 /* Return true if X is a constant that is valid for some immediate
721 field in an instruction. */
724 ia64_legitimate_constant_p (rtx x)
726 switch (GET_CODE (x))
733 if (GET_MODE (x) == VOIDmode)
735 return CONST_DOUBLE_OK_FOR_G (x);
739 return tls_symbolic_operand_type (x) == 0;
746 /* Don't allow TLS addresses to get spilled to memory. */
749 ia64_cannot_force_const_mem (rtx x)
751 return tls_symbolic_operand_type (x) != 0;
754 /* Expand a symbolic constant load. */
757 ia64_expand_load_address (rtx dest, rtx src)
759 gcc_assert (GET_CODE (dest) == REG);
761 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
762 having to pointer-extend the value afterward. Other forms of address
763 computation below are also more natural to compute as 64-bit quantities.
764 If we've been given an SImode destination register, change it. */
765 if (GET_MODE (dest) != Pmode)
766 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest), 0);
770 if (small_addr_symbolic_operand (src, VOIDmode))
774 emit_insn (gen_load_gprel64 (dest, src));
775 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
776 emit_insn (gen_load_fptr (dest, src));
777 else if (sdata_symbolic_operand (src, VOIDmode))
778 emit_insn (gen_load_gprel (dest, src));
781 HOST_WIDE_INT addend = 0;
784 /* We did split constant offsets in ia64_expand_move, and we did try
785 to keep them split in move_operand, but we also allowed reload to
786 rematerialize arbitrary constants rather than spill the value to
787 the stack and reload it. So we have to be prepared here to split
789 if (GET_CODE (src) == CONST)
791 HOST_WIDE_INT hi, lo;
793 hi = INTVAL (XEXP (XEXP (src, 0), 1));
794 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
800 src = plus_constant (XEXP (XEXP (src, 0), 0), hi);
804 tmp = gen_rtx_HIGH (Pmode, src);
805 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
806 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
808 tmp = gen_rtx_LO_SUM (Pmode, dest, src);
809 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
813 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
814 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
821 static GTY(()) rtx gen_tls_tga;
823 gen_tls_get_addr (void)
826 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
830 static GTY(()) rtx thread_pointer_rtx;
832 gen_thread_pointer (void)
834 if (!thread_pointer_rtx)
835 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
836 return thread_pointer_rtx;
840 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
841 HOST_WIDE_INT addend)
843 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
844 rtx orig_op0 = op0, orig_op1 = op1;
845 HOST_WIDE_INT addend_lo, addend_hi;
847 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
848 addend_hi = addend - addend_lo;
852 case TLS_MODEL_GLOBAL_DYNAMIC:
855 tga_op1 = gen_reg_rtx (Pmode);
856 emit_insn (gen_load_dtpmod (tga_op1, op1));
858 tga_op2 = gen_reg_rtx (Pmode);
859 emit_insn (gen_load_dtprel (tga_op2, op1));
861 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
862 LCT_CONST, Pmode, 2, tga_op1,
863 Pmode, tga_op2, Pmode);
865 insns = get_insns ();
868 if (GET_MODE (op0) != Pmode)
870 emit_libcall_block (insns, op0, tga_ret, op1);
873 case TLS_MODEL_LOCAL_DYNAMIC:
874 /* ??? This isn't the completely proper way to do local-dynamic
875 If the call to __tls_get_addr is used only by a single symbol,
876 then we should (somehow) move the dtprel to the second arg
877 to avoid the extra add. */
880 tga_op1 = gen_reg_rtx (Pmode);
881 emit_insn (gen_load_dtpmod (tga_op1, op1));
883 tga_op2 = const0_rtx;
885 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
886 LCT_CONST, Pmode, 2, tga_op1,
887 Pmode, tga_op2, Pmode);
889 insns = get_insns ();
892 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
894 tmp = gen_reg_rtx (Pmode);
895 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
897 if (!register_operand (op0, Pmode))
898 op0 = gen_reg_rtx (Pmode);
901 emit_insn (gen_load_dtprel (op0, op1));
902 emit_insn (gen_adddi3 (op0, tmp, op0));
905 emit_insn (gen_add_dtprel (op0, op1, tmp));
908 case TLS_MODEL_INITIAL_EXEC:
909 op1 = plus_constant (op1, addend_hi);
912 tmp = gen_reg_rtx (Pmode);
913 emit_insn (gen_load_tprel (tmp, op1));
915 if (!register_operand (op0, Pmode))
916 op0 = gen_reg_rtx (Pmode);
917 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
920 case TLS_MODEL_LOCAL_EXEC:
921 if (!register_operand (op0, Pmode))
922 op0 = gen_reg_rtx (Pmode);
928 emit_insn (gen_load_tprel (op0, op1));
929 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
932 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
940 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
941 orig_op0, 1, OPTAB_DIRECT);
944 if (GET_MODE (orig_op0) == Pmode)
946 return gen_lowpart (GET_MODE (orig_op0), op0);
950 ia64_expand_move (rtx op0, rtx op1)
952 enum machine_mode mode = GET_MODE (op0);
954 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
955 op1 = force_reg (mode, op1);
957 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
959 HOST_WIDE_INT addend = 0;
960 enum tls_model tls_kind;
963 if (GET_CODE (op1) == CONST
964 && GET_CODE (XEXP (op1, 0)) == PLUS
965 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
967 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
968 sym = XEXP (XEXP (op1, 0), 0);
971 tls_kind = tls_symbolic_operand_type (sym);
973 return ia64_expand_tls_address (tls_kind, op0, sym, addend);
975 if (any_offset_symbol_operand (sym, mode))
977 else if (aligned_offset_symbol_operand (sym, mode))
979 HOST_WIDE_INT addend_lo, addend_hi;
981 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
982 addend_hi = addend - addend_lo;
986 op1 = plus_constant (sym, addend_hi);
995 if (reload_completed)
997 /* We really should have taken care of this offset earlier. */
998 gcc_assert (addend == 0);
999 if (ia64_expand_load_address (op0, op1))
1005 rtx subtarget = no_new_pseudos ? op0 : gen_reg_rtx (mode);
1007 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1009 op1 = expand_simple_binop (mode, PLUS, subtarget,
1010 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1019 /* Split a move from OP1 to OP0 conditional on COND. */
1022 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1024 rtx insn, first = get_last_insn ();
1026 emit_move_insn (op0, op1);
1028 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1030 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1034 /* Split a post-reload TImode or TFmode reference into two DImode
1035 components. This is made extra difficult by the fact that we do
1036 not get any scratch registers to work with, because reload cannot
1037 be prevented from giving us a scratch that overlaps the register
1038 pair involved. So instead, when addressing memory, we tweak the
1039 pointer register up and back down with POST_INCs. Or up and not
1040 back down when we can get away with it.
1042 REVERSED is true when the loads must be done in reversed order
1043 (high word first) for correctness. DEAD is true when the pointer
1044 dies with the second insn we generate and therefore the second
1045 address must not carry a postmodify.
1047 May return an insn which is to be emitted after the moves. */
1050 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1054 switch (GET_CODE (in))
1057 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1058 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1063 /* Cannot occur reversed. */
1064 gcc_assert (!reversed);
1066 if (GET_MODE (in) != TFmode)
1067 split_double (in, &out[0], &out[1]);
1069 /* split_double does not understand how to split a TFmode
1070 quantity into a pair of DImode constants. */
1073 unsigned HOST_WIDE_INT p[2];
1074 long l[4]; /* TFmode is 128 bits */
1076 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1077 real_to_target (l, &r, TFmode);
1079 if (FLOAT_WORDS_BIG_ENDIAN)
1081 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1082 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1086 p[0] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1087 p[1] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1089 out[0] = GEN_INT (p[0]);
1090 out[1] = GEN_INT (p[1]);
1096 rtx base = XEXP (in, 0);
1099 switch (GET_CODE (base))
1104 out[0] = adjust_automodify_address
1105 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1106 out[1] = adjust_automodify_address
1107 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1111 /* Reversal requires a pre-increment, which can only
1112 be done as a separate insn. */
1113 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1114 out[0] = adjust_automodify_address
1115 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1116 out[1] = adjust_address (in, DImode, 0);
1121 gcc_assert (!reversed && !dead);
1123 /* Just do the increment in two steps. */
1124 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1125 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1129 gcc_assert (!reversed && !dead);
1131 /* Add 8, subtract 24. */
1132 base = XEXP (base, 0);
1133 out[0] = adjust_automodify_address
1134 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1135 out[1] = adjust_automodify_address
1137 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1142 gcc_assert (!reversed && !dead);
1144 /* Extract and adjust the modification. This case is
1145 trickier than the others, because we might have an
1146 index register, or we might have a combined offset that
1147 doesn't fit a signed 9-bit displacement field. We can
1148 assume the incoming expression is already legitimate. */
1149 offset = XEXP (base, 1);
1150 base = XEXP (base, 0);
1152 out[0] = adjust_automodify_address
1153 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1155 if (GET_CODE (XEXP (offset, 1)) == REG)
1157 /* Can't adjust the postmodify to match. Emit the
1158 original, then a separate addition insn. */
1159 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1160 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1164 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1165 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1167 /* Again the postmodify cannot be made to match,
1168 but in this case it's more efficient to get rid
1169 of the postmodify entirely and fix up with an
1171 out[1] = adjust_automodify_address (in, DImode, base, 8);
1173 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1177 /* Combined offset still fits in the displacement field.
1178 (We cannot overflow it at the high end.) */
1179 out[1] = adjust_automodify_address
1180 (in, DImode, gen_rtx_POST_MODIFY
1181 (Pmode, base, gen_rtx_PLUS
1183 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1202 /* Split a TImode or TFmode move instruction after reload.
1203 This is used by *movtf_internal and *movti_internal. */
1205 ia64_split_tmode_move (rtx operands[])
1207 rtx in[2], out[2], insn;
1210 bool reversed = false;
1212 /* It is possible for reload to decide to overwrite a pointer with
1213 the value it points to. In that case we have to do the loads in
1214 the appropriate order so that the pointer is not destroyed too
1215 early. Also we must not generate a postmodify for that second
1216 load, or rws_access_regno will die. */
1217 if (GET_CODE (operands[1]) == MEM
1218 && reg_overlap_mentioned_p (operands[0], operands[1]))
1220 rtx base = XEXP (operands[1], 0);
1221 while (GET_CODE (base) != REG)
1222 base = XEXP (base, 0);
1224 if (REGNO (base) == REGNO (operands[0]))
1228 /* Another reason to do the moves in reversed order is if the first
1229 element of the target register pair is also the second element of
1230 the source register pair. */
1231 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1232 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1235 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1236 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1238 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1239 if (GET_CODE (EXP) == MEM \
1240 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1241 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1242 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1243 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1244 XEXP (XEXP (EXP, 0), 0), \
1247 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1248 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1249 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1251 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1252 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1253 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1256 emit_insn (fixup[0]);
1258 emit_insn (fixup[1]);
1260 #undef MAYBE_ADD_REG_INC_NOTE
1263 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1264 through memory plus an extra GR scratch register. Except that you can
1265 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1266 SECONDARY_RELOAD_CLASS, but not both.
1268 We got into problems in the first place by allowing a construct like
1269 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1270 This solution attempts to prevent this situation from occurring. When
1271 we see something like the above, we spill the inner register to memory. */
1274 spill_xfmode_operand (rtx in, int force)
1276 if (GET_CODE (in) == SUBREG
1277 && GET_MODE (SUBREG_REG (in)) == TImode
1278 && GET_CODE (SUBREG_REG (in)) == REG)
1280 rtx memt = assign_stack_temp (TImode, 16, 0);
1281 emit_move_insn (memt, SUBREG_REG (in));
1282 return adjust_address (memt, XFmode, 0);
1284 else if (force && GET_CODE (in) == REG)
1286 rtx memx = assign_stack_temp (XFmode, 16, 0);
1287 emit_move_insn (memx, in);
1294 /* Emit comparison instruction if necessary, returning the expression
1295 that holds the compare result in the proper mode. */
1297 static GTY(()) rtx cmptf_libfunc;
1300 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1302 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1305 /* If we have a BImode input, then we already have a compare result, and
1306 do not need to emit another comparison. */
1307 if (GET_MODE (op0) == BImode)
1309 gcc_assert ((code == NE || code == EQ) && op1 == const0_rtx);
1312 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1313 magic number as its third argument, that indicates what to do.
1314 The return value is an integer to be compared against zero. */
1315 else if (GET_MODE (op0) == TFmode)
1318 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1324 enum rtx_code ncode;
1327 gcc_assert (cmptf_libfunc && GET_MODE (op1) == TFmode);
1330 /* 1 = equal, 0 = not equal. Equality operators do
1331 not raise FP_INVALID when given an SNaN operand. */
1332 case EQ: magic = QCMP_EQ; ncode = NE; break;
1333 case NE: magic = QCMP_EQ; ncode = EQ; break;
1334 /* isunordered() from C99. */
1335 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1336 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1337 /* Relational operators raise FP_INVALID when given
1339 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1340 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1341 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1342 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1343 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1344 Expanders for buneq etc. weuld have to be added to ia64.md
1345 for this to be useful. */
1346 default: gcc_unreachable ();
1351 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1352 op0, TFmode, op1, TFmode,
1353 GEN_INT (magic), DImode);
1354 cmp = gen_reg_rtx (BImode);
1355 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1356 gen_rtx_fmt_ee (ncode, BImode,
1359 insns = get_insns ();
1362 emit_libcall_block (insns, cmp, cmp,
1363 gen_rtx_fmt_ee (code, BImode, op0, op1));
1368 cmp = gen_reg_rtx (BImode);
1369 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1370 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1374 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1377 /* Generate an integral vector comparison. */
1380 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1381 rtx dest, rtx op0, rtx op1)
1383 bool negate = false;
1418 rtx w0h, w0l, w1h, w1l, ch, cl;
1419 enum machine_mode wmode;
1420 rtx (*unpack_l) (rtx, rtx, rtx);
1421 rtx (*unpack_h) (rtx, rtx, rtx);
1422 rtx (*pack) (rtx, rtx, rtx);
1424 /* We don't have native unsigned comparisons, but we can generate
1425 them better than generic code can. */
1427 gcc_assert (mode != V2SImode);
1432 pack = gen_pack2_sss;
1433 unpack_l = gen_unpack1_l;
1434 unpack_h = gen_unpack1_h;
1439 pack = gen_pack4_sss;
1440 unpack_l = gen_unpack2_l;
1441 unpack_h = gen_unpack2_h;
1448 /* Unpack into wider vectors, zero extending the elements. */
1450 w0l = gen_reg_rtx (wmode);
1451 w0h = gen_reg_rtx (wmode);
1452 w1l = gen_reg_rtx (wmode);
1453 w1h = gen_reg_rtx (wmode);
1454 emit_insn (unpack_l (gen_lowpart (mode, w0l), op0, CONST0_RTX (mode)));
1455 emit_insn (unpack_h (gen_lowpart (mode, w0h), op0, CONST0_RTX (mode)));
1456 emit_insn (unpack_l (gen_lowpart (mode, w1l), op1, CONST0_RTX (mode)));
1457 emit_insn (unpack_h (gen_lowpart (mode, w1h), op1, CONST0_RTX (mode)));
1459 /* Compare in the wider mode. */
1461 cl = gen_reg_rtx (wmode);
1462 ch = gen_reg_rtx (wmode);
1463 code = signed_condition (code);
1464 ia64_expand_vecint_compare (code, wmode, cl, w0l, w1l);
1465 negate = ia64_expand_vecint_compare (code, wmode, ch, w0h, w1h);
1467 /* Repack into a single narrower vector. */
1469 emit_insn (pack (dest, cl, ch));
1477 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1478 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1484 ia64_expand_vcondu_v2si (enum rtx_code code, rtx operands[])
1486 rtx dl, dh, bl, bh, op1l, op1h, op2l, op2h, op4l, op4h, op5l, op5h, x;
1488 /* In this case, we extract the two SImode quantities and generate
1489 normal comparisons for each of them. */
1491 op1l = gen_lowpart (SImode, operands[1]);
1492 op2l = gen_lowpart (SImode, operands[2]);
1493 op4l = gen_lowpart (SImode, operands[4]);
1494 op5l = gen_lowpart (SImode, operands[5]);
1496 op1h = gen_reg_rtx (SImode);
1497 op2h = gen_reg_rtx (SImode);
1498 op4h = gen_reg_rtx (SImode);
1499 op5h = gen_reg_rtx (SImode);
1501 emit_insn (gen_lshrdi3 (gen_lowpart (DImode, op1h),
1502 gen_lowpart (DImode, operands[1]), GEN_INT (32)));
1503 emit_insn (gen_lshrdi3 (gen_lowpart (DImode, op2h),
1504 gen_lowpart (DImode, operands[2]), GEN_INT (32)));
1505 emit_insn (gen_lshrdi3 (gen_lowpart (DImode, op4h),
1506 gen_lowpart (DImode, operands[4]), GEN_INT (32)));
1507 emit_insn (gen_lshrdi3 (gen_lowpart (DImode, op5h),
1508 gen_lowpart (DImode, operands[5]), GEN_INT (32)));
1510 bl = gen_reg_rtx (BImode);
1511 x = gen_rtx_fmt_ee (code, BImode, op4l, op5l);
1512 emit_insn (gen_rtx_SET (VOIDmode, bl, x));
1514 bh = gen_reg_rtx (BImode);
1515 x = gen_rtx_fmt_ee (code, BImode, op4h, op5h);
1516 emit_insn (gen_rtx_SET (VOIDmode, bh, x));
1518 /* With the results of the comparisons, emit conditional moves. */
1520 dl = gen_reg_rtx (SImode);
1521 x = gen_rtx_IF_THEN_ELSE (SImode, bl, op1l, op2l);
1522 emit_insn (gen_rtx_SET (VOIDmode, dl, x));
1524 dh = gen_reg_rtx (SImode);
1525 x = gen_rtx_IF_THEN_ELSE (SImode, bh, op1h, op2h);
1526 emit_insn (gen_rtx_SET (VOIDmode, dh, x));
1528 /* Merge the two partial results back into a vector. */
1530 x = gen_rtx_VEC_CONCAT (V2SImode, dl, dh);
1531 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1534 /* Emit an integral vector conditional move. */
1537 ia64_expand_vecint_cmov (rtx operands[])
1539 enum machine_mode mode = GET_MODE (operands[0]);
1540 enum rtx_code code = GET_CODE (operands[3]);
1544 /* Since we don't have unsigned V2SImode comparisons, it's more efficient
1545 to special-case them entirely. */
1546 if (mode == V2SImode
1547 && (code == GTU || code == GEU || code == LEU || code == LTU))
1549 ia64_expand_vcondu_v2si (code, operands);
1553 cmp = gen_reg_rtx (mode);
1554 negate = ia64_expand_vecint_compare (code, mode, cmp,
1555 operands[4], operands[5]);
1557 ot = operands[1+negate];
1558 of = operands[2-negate];
1560 if (ot == CONST0_RTX (mode))
1562 if (of == CONST0_RTX (mode))
1564 emit_move_insn (operands[0], ot);
1568 x = gen_rtx_NOT (mode, cmp);
1569 x = gen_rtx_AND (mode, x, of);
1570 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1572 else if (of == CONST0_RTX (mode))
1574 x = gen_rtx_AND (mode, cmp, ot);
1575 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1581 t = gen_reg_rtx (mode);
1582 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1583 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1585 f = gen_reg_rtx (mode);
1586 x = gen_rtx_NOT (mode, cmp);
1587 x = gen_rtx_AND (mode, x, operands[2-negate]);
1588 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1590 x = gen_rtx_IOR (mode, t, f);
1591 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1595 /* Emit an integral vector min or max operation. Return true if all done. */
1598 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1603 /* These four combinations are supported directly. */
1604 if (mode == V8QImode && (code == UMIN || code == UMAX))
1606 if (mode == V4HImode && (code == SMIN || code == SMAX))
1609 /* Everything else implemented via vector comparisons. */
1610 xops[0] = operands[0];
1611 xops[4] = xops[1] = operands[1];
1612 xops[5] = xops[2] = operands[2];
1631 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
1633 ia64_expand_vecint_cmov (xops);
1637 /* Emit the appropriate sequence for a call. */
1640 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1645 addr = XEXP (addr, 0);
1646 addr = convert_memory_address (DImode, addr);
1647 b0 = gen_rtx_REG (DImode, R_BR (0));
1649 /* ??? Should do this for functions known to bind local too. */
1650 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1653 insn = gen_sibcall_nogp (addr);
1655 insn = gen_call_nogp (addr, b0);
1657 insn = gen_call_value_nogp (retval, addr, b0);
1658 insn = emit_call_insn (insn);
1663 insn = gen_sibcall_gp (addr);
1665 insn = gen_call_gp (addr, b0);
1667 insn = gen_call_value_gp (retval, addr, b0);
1668 insn = emit_call_insn (insn);
1670 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1674 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1678 ia64_reload_gp (void)
1682 if (current_frame_info.reg_save_gp)
1683 tmp = gen_rtx_REG (DImode, current_frame_info.reg_save_gp);
1686 HOST_WIDE_INT offset;
1688 offset = (current_frame_info.spill_cfa_off
1689 + current_frame_info.spill_size);
1690 if (frame_pointer_needed)
1692 tmp = hard_frame_pointer_rtx;
1697 tmp = stack_pointer_rtx;
1698 offset = current_frame_info.total_size - offset;
1701 if (CONST_OK_FOR_I (offset))
1702 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1703 tmp, GEN_INT (offset)));
1706 emit_move_insn (pic_offset_table_rtx, GEN_INT (offset));
1707 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1708 pic_offset_table_rtx, tmp));
1711 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1714 emit_move_insn (pic_offset_table_rtx, tmp);
1718 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1719 rtx scratch_b, int noreturn_p, int sibcall_p)
1722 bool is_desc = false;
1724 /* If we find we're calling through a register, then we're actually
1725 calling through a descriptor, so load up the values. */
1726 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1731 /* ??? We are currently constrained to *not* use peep2, because
1732 we can legitimately change the global lifetime of the GP
1733 (in the form of killing where previously live). This is
1734 because a call through a descriptor doesn't use the previous
1735 value of the GP, while a direct call does, and we do not
1736 commit to either form until the split here.
1738 That said, this means that we lack precise life info for
1739 whether ADDR is dead after this call. This is not terribly
1740 important, since we can fix things up essentially for free
1741 with the POST_DEC below, but it's nice to not use it when we
1742 can immediately tell it's not necessary. */
1743 addr_dead_p = ((noreturn_p || sibcall_p
1744 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1746 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1748 /* Load the code address into scratch_b. */
1749 tmp = gen_rtx_POST_INC (Pmode, addr);
1750 tmp = gen_rtx_MEM (Pmode, tmp);
1751 emit_move_insn (scratch_r, tmp);
1752 emit_move_insn (scratch_b, scratch_r);
1754 /* Load the GP address. If ADDR is not dead here, then we must
1755 revert the change made above via the POST_INCREMENT. */
1757 tmp = gen_rtx_POST_DEC (Pmode, addr);
1760 tmp = gen_rtx_MEM (Pmode, tmp);
1761 emit_move_insn (pic_offset_table_rtx, tmp);
1768 insn = gen_sibcall_nogp (addr);
1770 insn = gen_call_value_nogp (retval, addr, retaddr);
1772 insn = gen_call_nogp (addr, retaddr);
1773 emit_call_insn (insn);
1775 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
1779 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
1781 This differs from the generic code in that we know about the zero-extending
1782 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
1783 also know that ld.acq+cmpxchg.rel equals a full barrier.
1785 The loop we want to generate looks like
1790 new_reg = cmp_reg op val;
1791 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
1792 if (cmp_reg != old_reg)
1795 Note that we only do the plain load from memory once. Subsequent
1796 iterations use the value loaded by the compare-and-swap pattern. */
1799 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
1800 rtx old_dst, rtx new_dst)
1802 enum machine_mode mode = GET_MODE (mem);
1803 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
1804 enum insn_code icode;
1806 /* Special case for using fetchadd. */
1807 if ((mode == SImode || mode == DImode) && fetchadd_operand (val, mode))
1810 old_dst = gen_reg_rtx (mode);
1812 emit_insn (gen_memory_barrier ());
1815 icode = CODE_FOR_fetchadd_acq_si;
1817 icode = CODE_FOR_fetchadd_acq_di;
1818 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
1822 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
1824 if (new_reg != new_dst)
1825 emit_move_insn (new_dst, new_reg);
1830 /* Because of the volatile mem read, we get an ld.acq, which is the
1831 front half of the full barrier. The end half is the cmpxchg.rel. */
1832 gcc_assert (MEM_VOLATILE_P (mem));
1834 old_reg = gen_reg_rtx (DImode);
1835 cmp_reg = gen_reg_rtx (DImode);
1836 label = gen_label_rtx ();
1840 val = simplify_gen_subreg (DImode, val, mode, 0);
1841 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
1844 emit_move_insn (cmp_reg, mem);
1848 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
1849 emit_move_insn (old_reg, cmp_reg);
1850 emit_move_insn (ar_ccv, cmp_reg);
1853 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
1858 new_reg = expand_simple_unop (DImode, NOT, new_reg, NULL_RTX, true);
1861 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
1862 true, OPTAB_DIRECT);
1865 new_reg = gen_lowpart (mode, new_reg);
1867 emit_move_insn (new_dst, new_reg);
1871 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
1872 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
1873 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
1874 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
1879 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
1881 emit_cmp_and_jump_insns (cmp_reg, old_reg, EQ, NULL, DImode, true, label);
1884 /* Begin the assembly file. */
1887 ia64_file_start (void)
1889 /* Variable tracking should be run after all optimizations which change order
1890 of insns. It also needs a valid CFG. This can't be done in
1891 ia64_override_options, because flag_var_tracking is finalized after
1893 ia64_flag_var_tracking = flag_var_tracking;
1894 flag_var_tracking = 0;
1896 default_file_start ();
1897 emit_safe_across_calls ();
1901 emit_safe_across_calls (void)
1903 unsigned int rs, re;
1910 while (rs < 64 && call_used_regs[PR_REG (rs)])
1914 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
1918 fputs ("\t.pred.safe_across_calls ", asm_out_file);
1922 fputc (',', asm_out_file);
1924 fprintf (asm_out_file, "p%u", rs);
1926 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
1930 fputc ('\n', asm_out_file);
1933 /* Helper function for ia64_compute_frame_size: find an appropriate general
1934 register to spill some special register to. SPECIAL_SPILL_MASK contains
1935 bits in GR0 to GR31 that have already been allocated by this routine.
1936 TRY_LOCALS is true if we should attempt to locate a local regnum. */
1939 find_gr_spill (int try_locals)
1943 /* If this is a leaf function, first try an otherwise unused
1944 call-clobbered register. */
1945 if (current_function_is_leaf)
1947 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1948 if (! regs_ever_live[regno]
1949 && call_used_regs[regno]
1950 && ! fixed_regs[regno]
1951 && ! global_regs[regno]
1952 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1954 current_frame_info.gr_used_mask |= 1 << regno;
1961 regno = current_frame_info.n_local_regs;
1962 /* If there is a frame pointer, then we can't use loc79, because
1963 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
1964 reg_name switching code in ia64_expand_prologue. */
1965 if (regno < (80 - frame_pointer_needed))
1967 current_frame_info.n_local_regs = regno + 1;
1968 return LOC_REG (0) + regno;
1972 /* Failed to find a general register to spill to. Must use stack. */
1976 /* In order to make for nice schedules, we try to allocate every temporary
1977 to a different register. We must of course stay away from call-saved,
1978 fixed, and global registers. We must also stay away from registers
1979 allocated in current_frame_info.gr_used_mask, since those include regs
1980 used all through the prologue.
1982 Any register allocated here must be used immediately. The idea is to
1983 aid scheduling, not to solve data flow problems. */
1985 static int last_scratch_gr_reg;
1988 next_scratch_gr_reg (void)
1992 for (i = 0; i < 32; ++i)
1994 regno = (last_scratch_gr_reg + i + 1) & 31;
1995 if (call_used_regs[regno]
1996 && ! fixed_regs[regno]
1997 && ! global_regs[regno]
1998 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2000 last_scratch_gr_reg = regno;
2005 /* There must be _something_ available. */
2009 /* Helper function for ia64_compute_frame_size, called through
2010 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2013 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2015 unsigned int regno = REGNO (reg);
2018 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2019 for (i = 0; i < n; ++i)
2020 current_frame_info.gr_used_mask |= 1 << (regno + i);
2024 /* Returns the number of bytes offset between the frame pointer and the stack
2025 pointer for the current function. SIZE is the number of bytes of space
2026 needed for local variables. */
2029 ia64_compute_frame_size (HOST_WIDE_INT size)
2031 HOST_WIDE_INT total_size;
2032 HOST_WIDE_INT spill_size = 0;
2033 HOST_WIDE_INT extra_spill_size = 0;
2034 HOST_WIDE_INT pretend_args_size;
2037 int spilled_gr_p = 0;
2038 int spilled_fr_p = 0;
2042 if (current_frame_info.initialized)
2045 memset (¤t_frame_info, 0, sizeof current_frame_info);
2046 CLEAR_HARD_REG_SET (mask);
2048 /* Don't allocate scratches to the return register. */
2049 diddle_return_value (mark_reg_gr_used_mask, NULL);
2051 /* Don't allocate scratches to the EH scratch registers. */
2052 if (cfun->machine->ia64_eh_epilogue_sp)
2053 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2054 if (cfun->machine->ia64_eh_epilogue_bsp)
2055 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2057 /* Find the size of the register stack frame. We have only 80 local
2058 registers, because we reserve 8 for the inputs and 8 for the
2061 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2062 since we'll be adjusting that down later. */
2063 regno = LOC_REG (78) + ! frame_pointer_needed;
2064 for (; regno >= LOC_REG (0); regno--)
2065 if (regs_ever_live[regno])
2067 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2069 /* For functions marked with the syscall_linkage attribute, we must mark
2070 all eight input registers as in use, so that locals aren't visible to
2073 if (cfun->machine->n_varargs > 0
2074 || lookup_attribute ("syscall_linkage",
2075 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2076 current_frame_info.n_input_regs = 8;
2079 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2080 if (regs_ever_live[regno])
2082 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2085 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2086 if (regs_ever_live[regno])
2088 i = regno - OUT_REG (0) + 1;
2090 #ifndef PROFILE_HOOK
2091 /* When -p profiling, we need one output register for the mcount argument.
2092 Likewise for -a profiling for the bb_init_func argument. For -ax
2093 profiling, we need two output registers for the two bb_init_trace_func
2095 if (current_function_profile)
2098 current_frame_info.n_output_regs = i;
2100 /* ??? No rotating register support yet. */
2101 current_frame_info.n_rotate_regs = 0;
2103 /* Discover which registers need spilling, and how much room that
2104 will take. Begin with floating point and general registers,
2105 which will always wind up on the stack. */
2107 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2108 if (regs_ever_live[regno] && ! call_used_regs[regno])
2110 SET_HARD_REG_BIT (mask, regno);
2116 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2117 if (regs_ever_live[regno] && ! call_used_regs[regno])
2119 SET_HARD_REG_BIT (mask, regno);
2125 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2126 if (regs_ever_live[regno] && ! call_used_regs[regno])
2128 SET_HARD_REG_BIT (mask, regno);
2133 /* Now come all special registers that might get saved in other
2134 general registers. */
2136 if (frame_pointer_needed)
2138 current_frame_info.reg_fp = find_gr_spill (1);
2139 /* If we did not get a register, then we take LOC79. This is guaranteed
2140 to be free, even if regs_ever_live is already set, because this is
2141 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2142 as we don't count loc79 above. */
2143 if (current_frame_info.reg_fp == 0)
2145 current_frame_info.reg_fp = LOC_REG (79);
2146 current_frame_info.n_local_regs++;
2150 if (! current_function_is_leaf)
2152 /* Emit a save of BR0 if we call other functions. Do this even
2153 if this function doesn't return, as EH depends on this to be
2154 able to unwind the stack. */
2155 SET_HARD_REG_BIT (mask, BR_REG (0));
2157 current_frame_info.reg_save_b0 = find_gr_spill (1);
2158 if (current_frame_info.reg_save_b0 == 0)
2164 /* Similarly for ar.pfs. */
2165 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2166 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
2167 if (current_frame_info.reg_save_ar_pfs == 0)
2169 extra_spill_size += 8;
2173 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2174 registers are clobbered, so we fall back to the stack. */
2175 current_frame_info.reg_save_gp
2176 = (current_function_calls_setjmp ? 0 : find_gr_spill (1));
2177 if (current_frame_info.reg_save_gp == 0)
2179 SET_HARD_REG_BIT (mask, GR_REG (1));
2186 if (regs_ever_live[BR_REG (0)] && ! call_used_regs[BR_REG (0)])
2188 SET_HARD_REG_BIT (mask, BR_REG (0));
2193 if (regs_ever_live[AR_PFS_REGNUM])
2195 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2196 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
2197 if (current_frame_info.reg_save_ar_pfs == 0)
2199 extra_spill_size += 8;
2205 /* Unwind descriptor hackery: things are most efficient if we allocate
2206 consecutive GR save registers for RP, PFS, FP in that order. However,
2207 it is absolutely critical that FP get the only hard register that's
2208 guaranteed to be free, so we allocated it first. If all three did
2209 happen to be allocated hard regs, and are consecutive, rearrange them
2210 into the preferred order now. */
2211 if (current_frame_info.reg_fp != 0
2212 && current_frame_info.reg_save_b0 == current_frame_info.reg_fp + 1
2213 && current_frame_info.reg_save_ar_pfs == current_frame_info.reg_fp + 2)
2215 current_frame_info.reg_save_b0 = current_frame_info.reg_fp;
2216 current_frame_info.reg_save_ar_pfs = current_frame_info.reg_fp + 1;
2217 current_frame_info.reg_fp = current_frame_info.reg_fp + 2;
2220 /* See if we need to store the predicate register block. */
2221 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2222 if (regs_ever_live[regno] && ! call_used_regs[regno])
2224 if (regno <= PR_REG (63))
2226 SET_HARD_REG_BIT (mask, PR_REG (0));
2227 current_frame_info.reg_save_pr = find_gr_spill (1);
2228 if (current_frame_info.reg_save_pr == 0)
2230 extra_spill_size += 8;
2234 /* ??? Mark them all as used so that register renaming and such
2235 are free to use them. */
2236 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2237 regs_ever_live[regno] = 1;
2240 /* If we're forced to use st8.spill, we're forced to save and restore
2241 ar.unat as well. The check for existing liveness allows inline asm
2242 to touch ar.unat. */
2243 if (spilled_gr_p || cfun->machine->n_varargs
2244 || regs_ever_live[AR_UNAT_REGNUM])
2246 regs_ever_live[AR_UNAT_REGNUM] = 1;
2247 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2248 current_frame_info.reg_save_ar_unat = find_gr_spill (spill_size == 0);
2249 if (current_frame_info.reg_save_ar_unat == 0)
2251 extra_spill_size += 8;
2256 if (regs_ever_live[AR_LC_REGNUM])
2258 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2259 current_frame_info.reg_save_ar_lc = find_gr_spill (spill_size == 0);
2260 if (current_frame_info.reg_save_ar_lc == 0)
2262 extra_spill_size += 8;
2267 /* If we have an odd number of words of pretend arguments written to
2268 the stack, then the FR save area will be unaligned. We round the
2269 size of this area up to keep things 16 byte aligned. */
2271 pretend_args_size = IA64_STACK_ALIGN (current_function_pretend_args_size);
2273 pretend_args_size = current_function_pretend_args_size;
2275 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2276 + current_function_outgoing_args_size);
2277 total_size = IA64_STACK_ALIGN (total_size);
2279 /* We always use the 16-byte scratch area provided by the caller, but
2280 if we are a leaf function, there's no one to which we need to provide
2282 if (current_function_is_leaf)
2283 total_size = MAX (0, total_size - 16);
2285 current_frame_info.total_size = total_size;
2286 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2287 current_frame_info.spill_size = spill_size;
2288 current_frame_info.extra_spill_size = extra_spill_size;
2289 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2290 current_frame_info.n_spilled = n_spilled;
2291 current_frame_info.initialized = reload_completed;
2294 /* Compute the initial difference between the specified pair of registers. */
2297 ia64_initial_elimination_offset (int from, int to)
2299 HOST_WIDE_INT offset;
2301 ia64_compute_frame_size (get_frame_size ());
2304 case FRAME_POINTER_REGNUM:
2307 case HARD_FRAME_POINTER_REGNUM:
2308 if (current_function_is_leaf)
2309 offset = -current_frame_info.total_size;
2311 offset = -(current_frame_info.total_size
2312 - current_function_outgoing_args_size - 16);
2315 case STACK_POINTER_REGNUM:
2316 if (current_function_is_leaf)
2319 offset = 16 + current_function_outgoing_args_size;
2327 case ARG_POINTER_REGNUM:
2328 /* Arguments start above the 16 byte save area, unless stdarg
2329 in which case we store through the 16 byte save area. */
2332 case HARD_FRAME_POINTER_REGNUM:
2333 offset = 16 - current_function_pretend_args_size;
2336 case STACK_POINTER_REGNUM:
2337 offset = (current_frame_info.total_size
2338 + 16 - current_function_pretend_args_size);
2353 /* If there are more than a trivial number of register spills, we use
2354 two interleaved iterators so that we can get two memory references
2357 In order to simplify things in the prologue and epilogue expanders,
2358 we use helper functions to fix up the memory references after the
2359 fact with the appropriate offsets to a POST_MODIFY memory mode.
2360 The following data structure tracks the state of the two iterators
2361 while insns are being emitted. */
2363 struct spill_fill_data
2365 rtx init_after; /* point at which to emit initializations */
2366 rtx init_reg[2]; /* initial base register */
2367 rtx iter_reg[2]; /* the iterator registers */
2368 rtx *prev_addr[2]; /* address of last memory use */
2369 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2370 HOST_WIDE_INT prev_off[2]; /* last offset */
2371 int n_iter; /* number of iterators in use */
2372 int next_iter; /* next iterator to use */
2373 unsigned int save_gr_used_mask;
2376 static struct spill_fill_data spill_fill_data;
2379 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2383 spill_fill_data.init_after = get_last_insn ();
2384 spill_fill_data.init_reg[0] = init_reg;
2385 spill_fill_data.init_reg[1] = init_reg;
2386 spill_fill_data.prev_addr[0] = NULL;
2387 spill_fill_data.prev_addr[1] = NULL;
2388 spill_fill_data.prev_insn[0] = NULL;
2389 spill_fill_data.prev_insn[1] = NULL;
2390 spill_fill_data.prev_off[0] = cfa_off;
2391 spill_fill_data.prev_off[1] = cfa_off;
2392 spill_fill_data.next_iter = 0;
2393 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2395 spill_fill_data.n_iter = 1 + (n_spills > 2);
2396 for (i = 0; i < spill_fill_data.n_iter; ++i)
2398 int regno = next_scratch_gr_reg ();
2399 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2400 current_frame_info.gr_used_mask |= 1 << regno;
2405 finish_spill_pointers (void)
2407 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
2411 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
2413 int iter = spill_fill_data.next_iter;
2414 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
2415 rtx disp_rtx = GEN_INT (disp);
2418 if (spill_fill_data.prev_addr[iter])
2420 if (CONST_OK_FOR_N (disp))
2422 *spill_fill_data.prev_addr[iter]
2423 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2424 gen_rtx_PLUS (DImode,
2425 spill_fill_data.iter_reg[iter],
2427 REG_NOTES (spill_fill_data.prev_insn[iter])
2428 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
2429 REG_NOTES (spill_fill_data.prev_insn[iter]));
2433 /* ??? Could use register post_modify for loads. */
2434 if (! CONST_OK_FOR_I (disp))
2436 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2437 emit_move_insn (tmp, disp_rtx);
2440 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2441 spill_fill_data.iter_reg[iter], disp_rtx));
2444 /* Micro-optimization: if we've created a frame pointer, it's at
2445 CFA 0, which may allow the real iterator to be initialized lower,
2446 slightly increasing parallelism. Also, if there are few saves
2447 it may eliminate the iterator entirely. */
2449 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
2450 && frame_pointer_needed)
2452 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
2453 set_mem_alias_set (mem, get_varargs_alias_set ());
2461 seq = gen_movdi (spill_fill_data.iter_reg[iter],
2462 spill_fill_data.init_reg[iter]);
2467 if (! CONST_OK_FOR_I (disp))
2469 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2470 emit_move_insn (tmp, disp_rtx);
2474 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2475 spill_fill_data.init_reg[iter],
2482 /* Careful for being the first insn in a sequence. */
2483 if (spill_fill_data.init_after)
2484 insn = emit_insn_after (seq, spill_fill_data.init_after);
2487 rtx first = get_insns ();
2489 insn = emit_insn_before (seq, first);
2491 insn = emit_insn (seq);
2493 spill_fill_data.init_after = insn;
2495 /* If DISP is 0, we may or may not have a further adjustment
2496 afterward. If we do, then the load/store insn may be modified
2497 to be a post-modify. If we don't, then this copy may be
2498 eliminated by copyprop_hardreg_forward, which makes this
2499 insn garbage, which runs afoul of the sanity check in
2500 propagate_one_insn. So mark this insn as legal to delete. */
2502 REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx,
2506 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
2508 /* ??? Not all of the spills are for varargs, but some of them are.
2509 The rest of the spills belong in an alias set of their own. But
2510 it doesn't actually hurt to include them here. */
2511 set_mem_alias_set (mem, get_varargs_alias_set ());
2513 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
2514 spill_fill_data.prev_off[iter] = cfa_off;
2516 if (++iter >= spill_fill_data.n_iter)
2518 spill_fill_data.next_iter = iter;
2524 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
2527 int iter = spill_fill_data.next_iter;
2530 mem = spill_restore_mem (reg, cfa_off);
2531 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
2532 spill_fill_data.prev_insn[iter] = insn;
2539 RTX_FRAME_RELATED_P (insn) = 1;
2541 /* Don't even pretend that the unwind code can intuit its way
2542 through a pair of interleaved post_modify iterators. Just
2543 provide the correct answer. */
2545 if (frame_pointer_needed)
2547 base = hard_frame_pointer_rtx;
2552 base = stack_pointer_rtx;
2553 off = current_frame_info.total_size - cfa_off;
2557 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2558 gen_rtx_SET (VOIDmode,
2559 gen_rtx_MEM (GET_MODE (reg),
2560 plus_constant (base, off)),
2567 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
2569 int iter = spill_fill_data.next_iter;
2572 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
2573 GEN_INT (cfa_off)));
2574 spill_fill_data.prev_insn[iter] = insn;
2577 /* Wrapper functions that discards the CONST_INT spill offset. These
2578 exist so that we can give gr_spill/gr_fill the offset they need and
2579 use a consistent function interface. */
2582 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2584 return gen_movdi (dest, src);
2588 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2590 return gen_fr_spill (dest, src);
2594 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2596 return gen_fr_restore (dest, src);
2599 /* Called after register allocation to add any instructions needed for the
2600 prologue. Using a prologue insn is favored compared to putting all of the
2601 instructions in output_function_prologue(), since it allows the scheduler
2602 to intermix instructions with the saves of the caller saved registers. In
2603 some cases, it might be necessary to emit a barrier instruction as the last
2604 insn to prevent such scheduling.
2606 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2607 so that the debug info generation code can handle them properly.
2609 The register save area is layed out like so:
2611 [ varargs spill area ]
2612 [ fr register spill area ]
2613 [ br register spill area ]
2614 [ ar register spill area ]
2615 [ pr register spill area ]
2616 [ gr register spill area ] */
2618 /* ??? Get inefficient code when the frame size is larger than can fit in an
2619 adds instruction. */
2622 ia64_expand_prologue (void)
2624 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2625 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2628 ia64_compute_frame_size (get_frame_size ());
2629 last_scratch_gr_reg = 15;
2631 /* If there is no epilogue, then we don't need some prologue insns.
2632 We need to avoid emitting the dead prologue insns, because flow
2633 will complain about them. */
2639 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
2640 if ((e->flags & EDGE_FAKE) == 0
2641 && (e->flags & EDGE_FALLTHRU) != 0)
2643 epilogue_p = (e != NULL);
2648 /* Set the local, input, and output register names. We need to do this
2649 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2650 half. If we use in/loc/out register names, then we get assembler errors
2651 in crtn.S because there is no alloc insn or regstk directive in there. */
2652 if (! TARGET_REG_NAMES)
2654 int inputs = current_frame_info.n_input_regs;
2655 int locals = current_frame_info.n_local_regs;
2656 int outputs = current_frame_info.n_output_regs;
2658 for (i = 0; i < inputs; i++)
2659 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2660 for (i = 0; i < locals; i++)
2661 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2662 for (i = 0; i < outputs; i++)
2663 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2666 /* Set the frame pointer register name. The regnum is logically loc79,
2667 but of course we'll not have allocated that many locals. Rather than
2668 worrying about renumbering the existing rtxs, we adjust the name. */
2669 /* ??? This code means that we can never use one local register when
2670 there is a frame pointer. loc79 gets wasted in this case, as it is
2671 renamed to a register that will never be used. See also the try_locals
2672 code in find_gr_spill. */
2673 if (current_frame_info.reg_fp)
2675 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2676 reg_names[HARD_FRAME_POINTER_REGNUM]
2677 = reg_names[current_frame_info.reg_fp];
2678 reg_names[current_frame_info.reg_fp] = tmp;
2681 /* We don't need an alloc instruction if we've used no outputs or locals. */
2682 if (current_frame_info.n_local_regs == 0
2683 && current_frame_info.n_output_regs == 0
2684 && current_frame_info.n_input_regs <= current_function_args_info.int_regs
2685 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2687 /* If there is no alloc, but there are input registers used, then we
2688 need a .regstk directive. */
2689 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
2690 ar_pfs_save_reg = NULL_RTX;
2694 current_frame_info.need_regstk = 0;
2696 if (current_frame_info.reg_save_ar_pfs)
2697 regno = current_frame_info.reg_save_ar_pfs;
2699 regno = next_scratch_gr_reg ();
2700 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
2702 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
2703 GEN_INT (current_frame_info.n_input_regs),
2704 GEN_INT (current_frame_info.n_local_regs),
2705 GEN_INT (current_frame_info.n_output_regs),
2706 GEN_INT (current_frame_info.n_rotate_regs)));
2707 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_pfs != 0);
2710 /* Set up frame pointer, stack pointer, and spill iterators. */
2712 n_varargs = cfun->machine->n_varargs;
2713 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
2714 stack_pointer_rtx, 0);
2716 if (frame_pointer_needed)
2718 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
2719 RTX_FRAME_RELATED_P (insn) = 1;
2722 if (current_frame_info.total_size != 0)
2724 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
2727 if (CONST_OK_FOR_I (- current_frame_info.total_size))
2728 offset = frame_size_rtx;
2731 regno = next_scratch_gr_reg ();
2732 offset = gen_rtx_REG (DImode, regno);
2733 emit_move_insn (offset, frame_size_rtx);
2736 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
2737 stack_pointer_rtx, offset));
2739 if (! frame_pointer_needed)
2741 RTX_FRAME_RELATED_P (insn) = 1;
2742 if (GET_CODE (offset) != CONST_INT)
2745 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2746 gen_rtx_SET (VOIDmode,
2748 gen_rtx_PLUS (DImode,
2755 /* ??? At this point we must generate a magic insn that appears to
2756 modify the stack pointer, the frame pointer, and all spill
2757 iterators. This would allow the most scheduling freedom. For
2758 now, just hard stop. */
2759 emit_insn (gen_blockage ());
2762 /* Must copy out ar.unat before doing any integer spills. */
2763 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2765 if (current_frame_info.reg_save_ar_unat)
2767 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2770 alt_regno = next_scratch_gr_reg ();
2771 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2772 current_frame_info.gr_used_mask |= 1 << alt_regno;
2775 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2776 insn = emit_move_insn (ar_unat_save_reg, reg);
2777 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_unat != 0);
2779 /* Even if we're not going to generate an epilogue, we still
2780 need to save the register so that EH works. */
2781 if (! epilogue_p && current_frame_info.reg_save_ar_unat)
2782 emit_insn (gen_prologue_use (ar_unat_save_reg));
2785 ar_unat_save_reg = NULL_RTX;
2787 /* Spill all varargs registers. Do this before spilling any GR registers,
2788 since we want the UNAT bits for the GR registers to override the UNAT
2789 bits from varargs, which we don't care about. */
2792 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
2794 reg = gen_rtx_REG (DImode, regno);
2795 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
2798 /* Locate the bottom of the register save area. */
2799 cfa_off = (current_frame_info.spill_cfa_off
2800 + current_frame_info.spill_size
2801 + current_frame_info.extra_spill_size);
2803 /* Save the predicate register block either in a register or in memory. */
2804 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2806 reg = gen_rtx_REG (DImode, PR_REG (0));
2807 if (current_frame_info.reg_save_pr != 0)
2809 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2810 insn = emit_move_insn (alt_reg, reg);
2812 /* ??? Denote pr spill/fill by a DImode move that modifies all
2813 64 hard registers. */
2814 RTX_FRAME_RELATED_P (insn) = 1;
2816 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2817 gen_rtx_SET (VOIDmode, alt_reg, reg),
2820 /* Even if we're not going to generate an epilogue, we still
2821 need to save the register so that EH works. */
2823 emit_insn (gen_prologue_use (alt_reg));
2827 alt_regno = next_scratch_gr_reg ();
2828 alt_reg = gen_rtx_REG (DImode, alt_regno);
2829 insn = emit_move_insn (alt_reg, reg);
2830 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2835 /* Handle AR regs in numerical order. All of them get special handling. */
2836 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
2837 && current_frame_info.reg_save_ar_unat == 0)
2839 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2840 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
2844 /* The alloc insn already copied ar.pfs into a general register. The
2845 only thing we have to do now is copy that register to a stack slot
2846 if we'd not allocated a local register for the job. */
2847 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
2848 && current_frame_info.reg_save_ar_pfs == 0)
2850 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2851 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
2855 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2857 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2858 if (current_frame_info.reg_save_ar_lc != 0)
2860 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2861 insn = emit_move_insn (alt_reg, reg);
2862 RTX_FRAME_RELATED_P (insn) = 1;
2864 /* Even if we're not going to generate an epilogue, we still
2865 need to save the register so that EH works. */
2867 emit_insn (gen_prologue_use (alt_reg));
2871 alt_regno = next_scratch_gr_reg ();
2872 alt_reg = gen_rtx_REG (DImode, alt_regno);
2873 emit_move_insn (alt_reg, reg);
2874 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2879 if (current_frame_info.reg_save_gp)
2881 insn = emit_move_insn (gen_rtx_REG (DImode,
2882 current_frame_info.reg_save_gp),
2883 pic_offset_table_rtx);
2884 /* We don't know for sure yet if this is actually needed, since
2885 we've not split the PIC call patterns. If all of the calls
2886 are indirect, and not followed by any uses of the gp, then
2887 this save is dead. Allow it to go away. */
2889 = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, REG_NOTES (insn));
2892 /* We should now be at the base of the gr/br/fr spill area. */
2893 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
2894 + current_frame_info.spill_size));
2896 /* Spill all general registers. */
2897 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
2898 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2900 reg = gen_rtx_REG (DImode, regno);
2901 do_spill (gen_gr_spill, reg, cfa_off, reg);
2905 /* Handle BR0 specially -- it may be getting stored permanently in
2906 some GR register. */
2907 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2909 reg = gen_rtx_REG (DImode, BR_REG (0));
2910 if (current_frame_info.reg_save_b0 != 0)
2912 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2913 insn = emit_move_insn (alt_reg, reg);
2914 RTX_FRAME_RELATED_P (insn) = 1;
2916 /* Even if we're not going to generate an epilogue, we still
2917 need to save the register so that EH works. */
2919 emit_insn (gen_prologue_use (alt_reg));
2923 alt_regno = next_scratch_gr_reg ();
2924 alt_reg = gen_rtx_REG (DImode, alt_regno);
2925 emit_move_insn (alt_reg, reg);
2926 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2931 /* Spill the rest of the BR registers. */
2932 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2933 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2935 alt_regno = next_scratch_gr_reg ();
2936 alt_reg = gen_rtx_REG (DImode, alt_regno);
2937 reg = gen_rtx_REG (DImode, regno);
2938 emit_move_insn (alt_reg, reg);
2939 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2943 /* Align the frame and spill all FR registers. */
2944 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2945 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2947 gcc_assert (!(cfa_off & 15));
2948 reg = gen_rtx_REG (XFmode, regno);
2949 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
2953 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
2955 finish_spill_pointers ();
2958 /* Called after register allocation to add any instructions needed for the
2959 epilogue. Using an epilogue insn is favored compared to putting all of the
2960 instructions in output_function_prologue(), since it allows the scheduler
2961 to intermix instructions with the saves of the caller saved registers. In
2962 some cases, it might be necessary to emit a barrier instruction as the last
2963 insn to prevent such scheduling. */
2966 ia64_expand_epilogue (int sibcall_p)
2968 rtx insn, reg, alt_reg, ar_unat_save_reg;
2969 int regno, alt_regno, cfa_off;
2971 ia64_compute_frame_size (get_frame_size ());
2973 /* If there is a frame pointer, then we use it instead of the stack
2974 pointer, so that the stack pointer does not need to be valid when
2975 the epilogue starts. See EXIT_IGNORE_STACK. */
2976 if (frame_pointer_needed)
2977 setup_spill_pointers (current_frame_info.n_spilled,
2978 hard_frame_pointer_rtx, 0);
2980 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
2981 current_frame_info.total_size);
2983 if (current_frame_info.total_size != 0)
2985 /* ??? At this point we must generate a magic insn that appears to
2986 modify the spill iterators and the frame pointer. This would
2987 allow the most scheduling freedom. For now, just hard stop. */
2988 emit_insn (gen_blockage ());
2991 /* Locate the bottom of the register save area. */
2992 cfa_off = (current_frame_info.spill_cfa_off
2993 + current_frame_info.spill_size
2994 + current_frame_info.extra_spill_size);
2996 /* Restore the predicate registers. */
2997 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2999 if (current_frame_info.reg_save_pr != 0)
3000 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
3003 alt_regno = next_scratch_gr_reg ();
3004 alt_reg = gen_rtx_REG (DImode, alt_regno);
3005 do_restore (gen_movdi_x, alt_reg, cfa_off);
3008 reg = gen_rtx_REG (DImode, PR_REG (0));
3009 emit_move_insn (reg, alt_reg);
3012 /* Restore the application registers. */
3014 /* Load the saved unat from the stack, but do not restore it until
3015 after the GRs have been restored. */
3016 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3018 if (current_frame_info.reg_save_ar_unat != 0)
3020 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
3023 alt_regno = next_scratch_gr_reg ();
3024 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3025 current_frame_info.gr_used_mask |= 1 << alt_regno;
3026 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3031 ar_unat_save_reg = NULL_RTX;
3033 if (current_frame_info.reg_save_ar_pfs != 0)
3035 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_pfs);
3036 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3037 emit_move_insn (reg, alt_reg);
3039 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3041 alt_regno = next_scratch_gr_reg ();
3042 alt_reg = gen_rtx_REG (DImode, alt_regno);
3043 do_restore (gen_movdi_x, alt_reg, cfa_off);
3045 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3046 emit_move_insn (reg, alt_reg);
3049 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3051 if (current_frame_info.reg_save_ar_lc != 0)
3052 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
3055 alt_regno = next_scratch_gr_reg ();
3056 alt_reg = gen_rtx_REG (DImode, alt_regno);
3057 do_restore (gen_movdi_x, alt_reg, cfa_off);
3060 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3061 emit_move_insn (reg, alt_reg);
3064 /* We should now be at the base of the gr/br/fr spill area. */
3065 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3066 + current_frame_info.spill_size));
3068 /* The GP may be stored on the stack in the prologue, but it's
3069 never restored in the epilogue. Skip the stack slot. */
3070 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3073 /* Restore all general registers. */
3074 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3075 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3077 reg = gen_rtx_REG (DImode, regno);
3078 do_restore (gen_gr_restore, reg, cfa_off);
3082 /* Restore the branch registers. Handle B0 specially, as it may
3083 have gotten stored in some GR register. */
3084 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3086 if (current_frame_info.reg_save_b0 != 0)
3087 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3090 alt_regno = next_scratch_gr_reg ();
3091 alt_reg = gen_rtx_REG (DImode, alt_regno);
3092 do_restore (gen_movdi_x, alt_reg, cfa_off);
3095 reg = gen_rtx_REG (DImode, BR_REG (0));
3096 emit_move_insn (reg, alt_reg);
3099 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3100 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3102 alt_regno = next_scratch_gr_reg ();
3103 alt_reg = gen_rtx_REG (DImode, alt_regno);
3104 do_restore (gen_movdi_x, alt_reg, cfa_off);
3106 reg = gen_rtx_REG (DImode, regno);
3107 emit_move_insn (reg, alt_reg);
3110 /* Restore floating point registers. */
3111 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3112 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3114 gcc_assert (!(cfa_off & 15));
3115 reg = gen_rtx_REG (XFmode, regno);
3116 do_restore (gen_fr_restore_x, reg, cfa_off);
3120 /* Restore ar.unat for real. */
3121 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3123 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3124 emit_move_insn (reg, ar_unat_save_reg);
3127 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3129 finish_spill_pointers ();
3131 if (current_frame_info.total_size || cfun->machine->ia64_eh_epilogue_sp)
3133 /* ??? At this point we must generate a magic insn that appears to
3134 modify the spill iterators, the stack pointer, and the frame
3135 pointer. This would allow the most scheduling freedom. For now,
3137 emit_insn (gen_blockage ());
3140 if (cfun->machine->ia64_eh_epilogue_sp)
3141 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
3142 else if (frame_pointer_needed)
3144 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
3145 RTX_FRAME_RELATED_P (insn) = 1;
3147 else if (current_frame_info.total_size)
3149 rtx offset, frame_size_rtx;
3151 frame_size_rtx = GEN_INT (current_frame_info.total_size);
3152 if (CONST_OK_FOR_I (current_frame_info.total_size))
3153 offset = frame_size_rtx;
3156 regno = next_scratch_gr_reg ();
3157 offset = gen_rtx_REG (DImode, regno);
3158 emit_move_insn (offset, frame_size_rtx);
3161 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3164 RTX_FRAME_RELATED_P (insn) = 1;
3165 if (GET_CODE (offset) != CONST_INT)
3168 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3169 gen_rtx_SET (VOIDmode,
3171 gen_rtx_PLUS (DImode,
3178 if (cfun->machine->ia64_eh_epilogue_bsp)
3179 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3182 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
3185 int fp = GR_REG (2);
3186 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
3187 first available call clobbered register. If there was a frame_pointer
3188 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
3189 so we have to make sure we're using the string "r2" when emitting
3190 the register name for the assembler. */
3191 if (current_frame_info.reg_fp && current_frame_info.reg_fp == GR_REG (2))
3192 fp = HARD_FRAME_POINTER_REGNUM;
3194 /* We must emit an alloc to force the input registers to become output
3195 registers. Otherwise, if the callee tries to pass its parameters
3196 through to another call without an intervening alloc, then these
3198 /* ??? We don't need to preserve all input registers. We only need to
3199 preserve those input registers used as arguments to the sibling call.
3200 It is unclear how to compute that number here. */
3201 if (current_frame_info.n_input_regs != 0)
3203 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
3204 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
3205 const0_rtx, const0_rtx,
3206 n_inputs, const0_rtx));
3207 RTX_FRAME_RELATED_P (insn) = 1;
3212 /* Return 1 if br.ret can do all the work required to return from a
3216 ia64_direct_return (void)
3218 if (reload_completed && ! frame_pointer_needed)
3220 ia64_compute_frame_size (get_frame_size ());
3222 return (current_frame_info.total_size == 0
3223 && current_frame_info.n_spilled == 0
3224 && current_frame_info.reg_save_b0 == 0
3225 && current_frame_info.reg_save_pr == 0
3226 && current_frame_info.reg_save_ar_pfs == 0
3227 && current_frame_info.reg_save_ar_unat == 0
3228 && current_frame_info.reg_save_ar_lc == 0);
3233 /* Return the magic cookie that we use to hold the return address
3234 during early compilation. */
3237 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3241 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3244 /* Split this value after reload, now that we know where the return
3245 address is saved. */
3248 ia64_split_return_addr_rtx (rtx dest)
3252 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3254 if (current_frame_info.reg_save_b0 != 0)
3255 src = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3261 /* Compute offset from CFA for BR0. */
3262 /* ??? Must be kept in sync with ia64_expand_prologue. */
3263 off = (current_frame_info.spill_cfa_off
3264 + current_frame_info.spill_size);
3265 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3266 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3269 /* Convert CFA offset to a register based offset. */
3270 if (frame_pointer_needed)
3271 src = hard_frame_pointer_rtx;
3274 src = stack_pointer_rtx;
3275 off += current_frame_info.total_size;
3278 /* Load address into scratch register. */
3279 if (CONST_OK_FOR_I (off))
3280 emit_insn (gen_adddi3 (dest, src, GEN_INT (off)));
3283 emit_move_insn (dest, GEN_INT (off));
3284 emit_insn (gen_adddi3 (dest, src, dest));
3287 src = gen_rtx_MEM (Pmode, dest);
3291 src = gen_rtx_REG (DImode, BR_REG (0));
3293 emit_move_insn (dest, src);
3297 ia64_hard_regno_rename_ok (int from, int to)
3299 /* Don't clobber any of the registers we reserved for the prologue. */
3300 if (to == current_frame_info.reg_fp
3301 || to == current_frame_info.reg_save_b0
3302 || to == current_frame_info.reg_save_pr
3303 || to == current_frame_info.reg_save_ar_pfs
3304 || to == current_frame_info.reg_save_ar_unat
3305 || to == current_frame_info.reg_save_ar_lc)
3308 if (from == current_frame_info.reg_fp
3309 || from == current_frame_info.reg_save_b0
3310 || from == current_frame_info.reg_save_pr
3311 || from == current_frame_info.reg_save_ar_pfs
3312 || from == current_frame_info.reg_save_ar_unat
3313 || from == current_frame_info.reg_save_ar_lc)
3316 /* Don't use output registers outside the register frame. */
3317 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3320 /* Retain even/oddness on predicate register pairs. */
3321 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3322 return (from & 1) == (to & 1);
3327 /* Target hook for assembling integer objects. Handle word-sized
3328 aligned objects and detect the cases when @fptr is needed. */
3331 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3333 if (size == POINTER_SIZE / BITS_PER_UNIT
3334 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3335 && GET_CODE (x) == SYMBOL_REF
3336 && SYMBOL_REF_FUNCTION_P (x))
3338 static const char * const directive[2][2] = {
3339 /* 64-bit pointer */ /* 32-bit pointer */
3340 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
3341 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
3343 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
3344 output_addr_const (asm_out_file, x);
3345 fputs (")\n", asm_out_file);
3348 return default_assemble_integer (x, size, aligned_p);
3351 /* Emit the function prologue. */
3354 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3356 int mask, grsave, grsave_prev;
3358 if (current_frame_info.need_regstk)
3359 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
3360 current_frame_info.n_input_regs,
3361 current_frame_info.n_local_regs,
3362 current_frame_info.n_output_regs,
3363 current_frame_info.n_rotate_regs);
3365 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3368 /* Emit the .prologue directive. */
3371 grsave = grsave_prev = 0;
3372 if (current_frame_info.reg_save_b0 != 0)
3375 grsave = grsave_prev = current_frame_info.reg_save_b0;
3377 if (current_frame_info.reg_save_ar_pfs != 0
3378 && (grsave_prev == 0
3379 || current_frame_info.reg_save_ar_pfs == grsave_prev + 1))
3382 if (grsave_prev == 0)
3383 grsave = current_frame_info.reg_save_ar_pfs;
3384 grsave_prev = current_frame_info.reg_save_ar_pfs;
3386 if (current_frame_info.reg_fp != 0
3387 && (grsave_prev == 0
3388 || current_frame_info.reg_fp == grsave_prev + 1))
3391 if (grsave_prev == 0)
3392 grsave = HARD_FRAME_POINTER_REGNUM;
3393 grsave_prev = current_frame_info.reg_fp;
3395 if (current_frame_info.reg_save_pr != 0
3396 && (grsave_prev == 0
3397 || current_frame_info.reg_save_pr == grsave_prev + 1))
3400 if (grsave_prev == 0)
3401 grsave = current_frame_info.reg_save_pr;
3404 if (mask && TARGET_GNU_AS)
3405 fprintf (file, "\t.prologue %d, %d\n", mask,
3406 ia64_dbx_register_number (grsave));
3408 fputs ("\t.prologue\n", file);
3410 /* Emit a .spill directive, if necessary, to relocate the base of
3411 the register spill area. */
3412 if (current_frame_info.spill_cfa_off != -16)
3413 fprintf (file, "\t.spill %ld\n",
3414 (long) (current_frame_info.spill_cfa_off
3415 + current_frame_info.spill_size));
3418 /* Emit the .body directive at the scheduled end of the prologue. */
3421 ia64_output_function_end_prologue (FILE *file)
3423 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3426 fputs ("\t.body\n", file);
3429 /* Emit the function epilogue. */
3432 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
3433 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3437 if (current_frame_info.reg_fp)
3439 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3440 reg_names[HARD_FRAME_POINTER_REGNUM]
3441 = reg_names[current_frame_info.reg_fp];
3442 reg_names[current_frame_info.reg_fp] = tmp;
3444 if (! TARGET_REG_NAMES)
3446 for (i = 0; i < current_frame_info.n_input_regs; i++)
3447 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
3448 for (i = 0; i < current_frame_info.n_local_regs; i++)
3449 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
3450 for (i = 0; i < current_frame_info.n_output_regs; i++)
3451 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
3454 current_frame_info.initialized = 0;
3458 ia64_dbx_register_number (int regno)
3460 /* In ia64_expand_prologue we quite literally renamed the frame pointer
3461 from its home at loc79 to something inside the register frame. We
3462 must perform the same renumbering here for the debug info. */
3463 if (current_frame_info.reg_fp)
3465 if (regno == HARD_FRAME_POINTER_REGNUM)
3466 regno = current_frame_info.reg_fp;
3467 else if (regno == current_frame_info.reg_fp)
3468 regno = HARD_FRAME_POINTER_REGNUM;
3471 if (IN_REGNO_P (regno))
3472 return 32 + regno - IN_REG (0);
3473 else if (LOC_REGNO_P (regno))
3474 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
3475 else if (OUT_REGNO_P (regno))
3476 return (32 + current_frame_info.n_input_regs
3477 + current_frame_info.n_local_regs + regno - OUT_REG (0));
3483 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
3485 rtx addr_reg, eight = GEN_INT (8);
3487 /* The Intel assembler requires that the global __ia64_trampoline symbol
3488 be declared explicitly */
3491 static bool declared_ia64_trampoline = false;
3493 if (!declared_ia64_trampoline)
3495 declared_ia64_trampoline = true;
3496 (*targetm.asm_out.globalize_label) (asm_out_file,
3497 "__ia64_trampoline");
3501 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
3502 addr = convert_memory_address (Pmode, addr);
3503 fnaddr = convert_memory_address (Pmode, fnaddr);
3504 static_chain = convert_memory_address (Pmode, static_chain);
3506 /* Load up our iterator. */
3507 addr_reg = gen_reg_rtx (Pmode);
3508 emit_move_insn (addr_reg, addr);
3510 /* The first two words are the fake descriptor:
3511 __ia64_trampoline, ADDR+16. */
3512 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3513 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
3514 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3516 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3517 copy_to_reg (plus_constant (addr, 16)));
3518 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3520 /* The third word is the target descriptor. */
3521 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
3522 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3524 /* The fourth word is the static chain. */
3525 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
3528 /* Do any needed setup for a variadic function. CUM has not been updated
3529 for the last named argument which has type TYPE and mode MODE.
3531 We generate the actual spill instructions during prologue generation. */
3534 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3535 tree type, int * pretend_size,
3536 int second_time ATTRIBUTE_UNUSED)
3538 CUMULATIVE_ARGS next_cum = *cum;
3540 /* Skip the current argument. */
3541 ia64_function_arg_advance (&next_cum, mode, type, 1);
3543 if (next_cum.words < MAX_ARGUMENT_SLOTS)
3545 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
3546 *pretend_size = n * UNITS_PER_WORD;
3547 cfun->machine->n_varargs = n;
3551 /* Check whether TYPE is a homogeneous floating point aggregate. If
3552 it is, return the mode of the floating point type that appears
3553 in all leafs. If it is not, return VOIDmode.
3555 An aggregate is a homogeneous floating point aggregate is if all
3556 fields/elements in it have the same floating point type (e.g,
3557 SFmode). 128-bit quad-precision floats are excluded.
3559 Variable sized aggregates should never arrive here, since we should
3560 have already decided to pass them by reference. Top-level zero-sized
3561 aggregates are excluded because our parallels crash the middle-end. */
3563 static enum machine_mode
3564 hfa_element_mode (tree type, bool nested)
3566 enum machine_mode element_mode = VOIDmode;
3567 enum machine_mode mode;
3568 enum tree_code code = TREE_CODE (type);
3569 int know_element_mode = 0;
3572 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
3577 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
3578 case BOOLEAN_TYPE: case CHAR_TYPE: case POINTER_TYPE:
3579 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
3580 case LANG_TYPE: case FUNCTION_TYPE:
3583 /* Fortran complex types are supposed to be HFAs, so we need to handle
3584 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
3587 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
3588 && TYPE_MODE (type) != TCmode)
3589 return GET_MODE_INNER (TYPE_MODE (type));
3594 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
3595 mode if this is contained within an aggregate. */
3596 if (nested && TYPE_MODE (type) != TFmode)
3597 return TYPE_MODE (type);
3602 return hfa_element_mode (TREE_TYPE (type), 1);
3606 case QUAL_UNION_TYPE:
3607 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3609 if (TREE_CODE (t) != FIELD_DECL)
3612 mode = hfa_element_mode (TREE_TYPE (t), 1);
3613 if (know_element_mode)
3615 if (mode != element_mode)
3618 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3622 know_element_mode = 1;
3623 element_mode = mode;
3626 return element_mode;
3629 /* If we reach here, we probably have some front-end specific type
3630 that the backend doesn't know about. This can happen via the
3631 aggregate_value_p call in init_function_start. All we can do is
3632 ignore unknown tree types. */
3639 /* Return the number of words required to hold a quantity of TYPE and MODE
3640 when passed as an argument. */
3642 ia64_function_arg_words (tree type, enum machine_mode mode)
3646 if (mode == BLKmode)
3647 words = int_size_in_bytes (type);
3649 words = GET_MODE_SIZE (mode);
3651 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
3654 /* Return the number of registers that should be skipped so the current
3655 argument (described by TYPE and WORDS) will be properly aligned.
3657 Integer and float arguments larger than 8 bytes start at the next
3658 even boundary. Aggregates larger than 8 bytes start at the next
3659 even boundary if the aggregate has 16 byte alignment. Note that
3660 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
3661 but are still to be aligned in registers.
3663 ??? The ABI does not specify how to handle aggregates with
3664 alignment from 9 to 15 bytes, or greater than 16. We handle them
3665 all as if they had 16 byte alignment. Such aggregates can occur
3666 only if gcc extensions are used. */
3668 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
3670 if ((cum->words & 1) == 0)
3674 && TREE_CODE (type) != INTEGER_TYPE
3675 && TREE_CODE (type) != REAL_TYPE)
3676 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
3681 /* Return rtx for register where argument is passed, or zero if it is passed
3683 /* ??? 128-bit quad-precision floats are always passed in general
3687 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
3688 int named, int incoming)
3690 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
3691 int words = ia64_function_arg_words (type, mode);
3692 int offset = ia64_function_arg_offset (cum, type, words);
3693 enum machine_mode hfa_mode = VOIDmode;
3695 /* If all argument slots are used, then it must go on the stack. */
3696 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3699 /* Check for and handle homogeneous FP aggregates. */
3701 hfa_mode = hfa_element_mode (type, 0);
3703 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3704 and unprototyped hfas are passed specially. */
3705 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3709 int fp_regs = cum->fp_regs;
3710 int int_regs = cum->words + offset;
3711 int hfa_size = GET_MODE_SIZE (hfa_mode);
3715 /* If prototyped, pass it in FR regs then GR regs.
3716 If not prototyped, pass it in both FR and GR regs.
3718 If this is an SFmode aggregate, then it is possible to run out of
3719 FR regs while GR regs are still left. In that case, we pass the
3720 remaining part in the GR regs. */
3722 /* Fill the FP regs. We do this always. We stop if we reach the end
3723 of the argument, the last FP register, or the last argument slot. */
3725 byte_size = ((mode == BLKmode)
3726 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3727 args_byte_size = int_regs * UNITS_PER_WORD;
3729 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3730 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
3732 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3733 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
3737 args_byte_size += hfa_size;
3741 /* If no prototype, then the whole thing must go in GR regs. */
3742 if (! cum->prototype)
3744 /* If this is an SFmode aggregate, then we might have some left over
3745 that needs to go in GR regs. */
3746 else if (byte_size != offset)
3747 int_regs += offset / UNITS_PER_WORD;
3749 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
3751 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
3753 enum machine_mode gr_mode = DImode;
3754 unsigned int gr_size;
3756 /* If we have an odd 4 byte hunk because we ran out of FR regs,
3757 then this goes in a GR reg left adjusted/little endian, right
3758 adjusted/big endian. */
3759 /* ??? Currently this is handled wrong, because 4-byte hunks are
3760 always right adjusted/little endian. */
3763 /* If we have an even 4 byte hunk because the aggregate is a
3764 multiple of 4 bytes in size, then this goes in a GR reg right
3765 adjusted/little endian. */
3766 else if (byte_size - offset == 4)
3769 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3770 gen_rtx_REG (gr_mode, (basereg
3774 gr_size = GET_MODE_SIZE (gr_mode);
3776 if (gr_size == UNITS_PER_WORD
3777 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
3779 else if (gr_size > UNITS_PER_WORD)
3780 int_regs += gr_size / UNITS_PER_WORD;
3782 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3785 /* Integral and aggregates go in general registers. If we have run out of
3786 FR registers, then FP values must also go in general registers. This can
3787 happen when we have a SFmode HFA. */
3788 else if (mode == TFmode || mode == TCmode
3789 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
3791 int byte_size = ((mode == BLKmode)
3792 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3793 if (BYTES_BIG_ENDIAN
3794 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
3795 && byte_size < UNITS_PER_WORD
3798 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3799 gen_rtx_REG (DImode,
3800 (basereg + cum->words
3803 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
3806 return gen_rtx_REG (mode, basereg + cum->words + offset);
3810 /* If there is a prototype, then FP values go in a FR register when
3811 named, and in a GR register when unnamed. */
3812 else if (cum->prototype)
3815 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
3816 /* In big-endian mode, an anonymous SFmode value must be represented
3817 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
3818 the value into the high half of the general register. */
3819 else if (BYTES_BIG_ENDIAN && mode == SFmode)
3820 return gen_rtx_PARALLEL (mode,
3822 gen_rtx_EXPR_LIST (VOIDmode,
3823 gen_rtx_REG (DImode, basereg + cum->words + offset),
3825 /* Similarly, an anonymous XFmode value must be split into two
3826 registers and padded appropriately. */
3827 else if (BYTES_BIG_ENDIAN && mode == XFmode)
3830 loc[0] = gen_rtx_EXPR_LIST (VOIDmode,
3831 gen_rtx_REG (DImode, basereg + cum->words + offset),
3833 loc[1] = gen_rtx_EXPR_LIST (VOIDmode,
3834 gen_rtx_REG (DImode, basereg + cum->words + offset + 1),
3835 GEN_INT (UNITS_PER_WORD));
3836 return gen_rtx_PARALLEL (mode, gen_rtvec_v (2, loc));
3839 return gen_rtx_REG (mode, basereg + cum->words + offset);
3841 /* If there is no prototype, then FP values go in both FR and GR
3845 /* See comment above. */
3846 enum machine_mode inner_mode =
3847 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
3849 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
3850 gen_rtx_REG (mode, (FR_ARG_FIRST
3853 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3854 gen_rtx_REG (inner_mode,
3855 (basereg + cum->words
3859 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
3863 /* Return number of bytes, at the beginning of the argument, that must be
3864 put in registers. 0 is the argument is entirely in registers or entirely
3868 ia64_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3869 tree type, bool named ATTRIBUTE_UNUSED)
3871 int words = ia64_function_arg_words (type, mode);
3872 int offset = ia64_function_arg_offset (cum, type, words);
3874 /* If all argument slots are used, then it must go on the stack. */
3875 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3878 /* It doesn't matter whether the argument goes in FR or GR regs. If
3879 it fits within the 8 argument slots, then it goes entirely in
3880 registers. If it extends past the last argument slot, then the rest
3881 goes on the stack. */
3883 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
3886 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
3889 /* Update CUM to point after this argument. This is patterned after
3890 ia64_function_arg. */
3893 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3894 tree type, int named)
3896 int words = ia64_function_arg_words (type, mode);
3897 int offset = ia64_function_arg_offset (cum, type, words);
3898 enum machine_mode hfa_mode = VOIDmode;
3900 /* If all arg slots are already full, then there is nothing to do. */
3901 if (cum->words >= MAX_ARGUMENT_SLOTS)
3904 cum->words += words + offset;
3906 /* Check for and handle homogeneous FP aggregates. */
3908 hfa_mode = hfa_element_mode (type, 0);
3910 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3911 and unprototyped hfas are passed specially. */
3912 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3914 int fp_regs = cum->fp_regs;
3915 /* This is the original value of cum->words + offset. */
3916 int int_regs = cum->words - words;
3917 int hfa_size = GET_MODE_SIZE (hfa_mode);
3921 /* If prototyped, pass it in FR regs then GR regs.
3922 If not prototyped, pass it in both FR and GR regs.
3924 If this is an SFmode aggregate, then it is possible to run out of
3925 FR regs while GR regs are still left. In that case, we pass the
3926 remaining part in the GR regs. */
3928 /* Fill the FP regs. We do this always. We stop if we reach the end
3929 of the argument, the last FP register, or the last argument slot. */
3931 byte_size = ((mode == BLKmode)
3932 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3933 args_byte_size = int_regs * UNITS_PER_WORD;
3935 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3936 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
3939 args_byte_size += hfa_size;
3943 cum->fp_regs = fp_regs;
3946 /* Integral and aggregates go in general registers. So do TFmode FP values.
3947 If we have run out of FR registers, then other FP values must also go in
3948 general registers. This can happen when we have a SFmode HFA. */
3949 else if (mode == TFmode || mode == TCmode
3950 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
3951 cum->int_regs = cum->words;
3953 /* If there is a prototype, then FP values go in a FR register when
3954 named, and in a GR register when unnamed. */
3955 else if (cum->prototype)
3958 cum->int_regs = cum->words;
3960 /* ??? Complex types should not reach here. */
3961 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3963 /* If there is no prototype, then FP values go in both FR and GR
3967 /* ??? Complex types should not reach here. */
3968 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3969 cum->int_regs = cum->words;
3973 /* Arguments with alignment larger than 8 bytes start at the next even
3974 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
3975 even though their normal alignment is 8 bytes. See ia64_function_arg. */
3978 ia64_function_arg_boundary (enum machine_mode mode, tree type)
3981 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
3982 return PARM_BOUNDARY * 2;
3986 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
3987 return PARM_BOUNDARY * 2;
3989 return PARM_BOUNDARY;
3992 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
3993 return PARM_BOUNDARY * 2;
3995 return PARM_BOUNDARY;
3998 /* Variable sized types are passed by reference. */
3999 /* ??? At present this is a GCC extension to the IA-64 ABI. */
4002 ia64_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4003 enum machine_mode mode ATTRIBUTE_UNUSED,
4004 tree type, bool named ATTRIBUTE_UNUSED)
4006 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
4009 /* True if it is OK to do sibling call optimization for the specified
4010 call expression EXP. DECL will be the called function, or NULL if
4011 this is an indirect call. */
4013 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4015 /* We can't perform a sibcall if the current function has the syscall_linkage
4017 if (lookup_attribute ("syscall_linkage",
4018 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
4021 /* We must always return with our current GP. This means we can
4022 only sibcall to functions defined in the current module. */
4023 return decl && (*targetm.binds_local_p) (decl);
4027 /* Implement va_arg. */
4030 ia64_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
4032 /* Variable sized types are passed by reference. */
4033 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
4035 tree ptrtype = build_pointer_type (type);
4036 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
4037 return build_va_arg_indirect_ref (addr);
4040 /* Aggregate arguments with alignment larger than 8 bytes start at
4041 the next even boundary. Integer and floating point arguments
4042 do so if they are larger than 8 bytes, whether or not they are
4043 also aligned larger than 8 bytes. */
4044 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
4045 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
4047 tree t = build (PLUS_EXPR, TREE_TYPE (valist), valist,
4048 build_int_cst (NULL_TREE, 2 * UNITS_PER_WORD - 1));
4049 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
4050 build_int_cst (NULL_TREE, -2 * UNITS_PER_WORD));
4051 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
4052 gimplify_and_add (t, pre_p);
4055 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4058 /* Return 1 if function return value returned in memory. Return 0 if it is
4062 ia64_return_in_memory (tree valtype, tree fntype ATTRIBUTE_UNUSED)
4064 enum machine_mode mode;
4065 enum machine_mode hfa_mode;
4066 HOST_WIDE_INT byte_size;
4068 mode = TYPE_MODE (valtype);
4069 byte_size = GET_MODE_SIZE (mode);
4070 if (mode == BLKmode)
4072 byte_size = int_size_in_bytes (valtype);
4077 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4079 hfa_mode = hfa_element_mode (valtype, 0);
4080 if (hfa_mode != VOIDmode)
4082 int hfa_size = GET_MODE_SIZE (hfa_mode);
4084 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
4089 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
4095 /* Return rtx for register that holds the function return value. */
4098 ia64_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
4100 enum machine_mode mode;
4101 enum machine_mode hfa_mode;
4103 mode = TYPE_MODE (valtype);
4104 hfa_mode = hfa_element_mode (valtype, 0);
4106 if (hfa_mode != VOIDmode)
4114 hfa_size = GET_MODE_SIZE (hfa_mode);
4115 byte_size = ((mode == BLKmode)
4116 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
4118 for (i = 0; offset < byte_size; i++)
4120 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4121 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
4125 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4127 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
4128 return gen_rtx_REG (mode, FR_ARG_FIRST);
4131 bool need_parallel = false;
4133 /* In big-endian mode, we need to manage the layout of aggregates
4134 in the registers so that we get the bits properly aligned in
4135 the highpart of the registers. */
4136 if (BYTES_BIG_ENDIAN
4137 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
4138 need_parallel = true;
4140 /* Something like struct S { long double x; char a[0] } is not an
4141 HFA structure, and therefore doesn't go in fp registers. But
4142 the middle-end will give it XFmode anyway, and XFmode values
4143 don't normally fit in integer registers. So we need to smuggle
4144 the value inside a parallel. */
4145 else if (mode == XFmode || mode == XCmode)
4146 need_parallel = true;
4156 bytesize = int_size_in_bytes (valtype);
4157 /* An empty PARALLEL is invalid here, but the return value
4158 doesn't matter for empty structs. */
4160 return gen_rtx_REG (mode, GR_RET_FIRST);
4161 for (i = 0; offset < bytesize; i++)
4163 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4164 gen_rtx_REG (DImode,
4167 offset += UNITS_PER_WORD;
4169 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4172 return gen_rtx_REG (mode, GR_RET_FIRST);
4176 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4177 We need to emit DTP-relative relocations. */
4180 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
4182 gcc_assert (size == 8);
4183 fputs ("\tdata8.ua\t@dtprel(", file);
4184 output_addr_const (file, x);
4188 /* Print a memory address as an operand to reference that memory location. */
4190 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
4191 also call this from ia64_print_operand for memory addresses. */
4194 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
4195 rtx address ATTRIBUTE_UNUSED)
4199 /* Print an operand to an assembler instruction.
4200 C Swap and print a comparison operator.
4201 D Print an FP comparison operator.
4202 E Print 32 - constant, for SImode shifts as extract.
4203 e Print 64 - constant, for DImode rotates.
4204 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4205 a floating point register emitted normally.
4206 I Invert a predicate register by adding 1.
4207 J Select the proper predicate register for a condition.
4208 j Select the inverse predicate register for a condition.
4209 O Append .acq for volatile load.
4210 P Postincrement of a MEM.
4211 Q Append .rel for volatile store.
4212 S Shift amount for shladd instruction.
4213 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
4214 for Intel assembler.
4215 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
4216 for Intel assembler.
4217 r Print register name, or constant 0 as r0. HP compatibility for
4219 v Print vector constant value as an 8-byte integer value. */
4222 ia64_print_operand (FILE * file, rtx x, int code)
4229 /* Handled below. */
4234 enum rtx_code c = swap_condition (GET_CODE (x));
4235 fputs (GET_RTX_NAME (c), file);
4240 switch (GET_CODE (x))
4252 str = GET_RTX_NAME (GET_CODE (x));
4259 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
4263 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
4267 if (x == CONST0_RTX (GET_MODE (x)))
4268 str = reg_names [FR_REG (0)];
4269 else if (x == CONST1_RTX (GET_MODE (x)))
4270 str = reg_names [FR_REG (1)];
4273 gcc_assert (GET_CODE (x) == REG);
4274 str = reg_names [REGNO (x)];
4280 fputs (reg_names [REGNO (x) + 1], file);
4286 unsigned int regno = REGNO (XEXP (x, 0));
4287 if (GET_CODE (x) == EQ)
4291 fputs (reg_names [regno], file);
4296 if (MEM_VOLATILE_P (x))
4297 fputs(".acq", file);
4302 HOST_WIDE_INT value;
4304 switch (GET_CODE (XEXP (x, 0)))
4310 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
4311 if (GET_CODE (x) == CONST_INT)
4315 gcc_assert (GET_CODE (x) == REG);
4316 fprintf (file, ", %s", reg_names[REGNO (x)]);
4322 value = GET_MODE_SIZE (GET_MODE (x));
4326 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
4330 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
4335 if (MEM_VOLATILE_P (x))
4336 fputs(".rel", file);
4340 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4344 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4346 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
4352 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4354 const char *prefix = "0x";
4355 if (INTVAL (x) & 0x80000000)
4357 fprintf (file, "0xffffffff");
4360 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
4366 /* If this operand is the constant zero, write it as register zero.
4367 Any register, zero, or CONST_INT value is OK here. */
4368 if (GET_CODE (x) == REG)
4369 fputs (reg_names[REGNO (x)], file);
4370 else if (x == CONST0_RTX (GET_MODE (x)))
4372 else if (GET_CODE (x) == CONST_INT)
4373 output_addr_const (file, x);
4375 output_operand_lossage ("invalid %%r value");
4379 gcc_assert (GET_CODE (x) == CONST_VECTOR);
4380 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
4387 /* For conditional branches, returns or calls, substitute
4388 sptk, dptk, dpnt, or spnt for %s. */
4389 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
4392 int pred_val = INTVAL (XEXP (x, 0));
4394 /* Guess top and bottom 10% statically predicted. */
4395 if (pred_val < REG_BR_PROB_BASE / 50)
4397 else if (pred_val < REG_BR_PROB_BASE / 2)
4399 else if (pred_val < REG_BR_PROB_BASE / 100 * 98)
4404 else if (GET_CODE (current_output_insn) == CALL_INSN)
4409 fputs (which, file);
4414 x = current_insn_predicate;
4417 unsigned int regno = REGNO (XEXP (x, 0));
4418 if (GET_CODE (x) == EQ)
4420 fprintf (file, "(%s) ", reg_names [regno]);
4425 output_operand_lossage ("ia64_print_operand: unknown code");
4429 switch (GET_CODE (x))
4431 /* This happens for the spill/restore instructions. */
4436 /* ... fall through ... */
4439 fputs (reg_names [REGNO (x)], file);
4444 rtx addr = XEXP (x, 0);
4445 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
4446 addr = XEXP (addr, 0);
4447 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
4452 output_addr_const (file, x);
4459 /* Compute a (partial) cost for rtx X. Return true if the complete
4460 cost has been computed, and false if subexpressions should be
4461 scanned. In either case, *TOTAL contains the cost result. */
4462 /* ??? This is incomplete. */
4465 ia64_rtx_costs (rtx x, int code, int outer_code, int *total)
4473 *total = CONST_OK_FOR_J (INTVAL (x)) ? 0 : COSTS_N_INSNS (1);
4476 if (CONST_OK_FOR_I (INTVAL (x)))
4478 else if (CONST_OK_FOR_J (INTVAL (x)))
4481 *total = COSTS_N_INSNS (1);
4484 if (CONST_OK_FOR_K (INTVAL (x)) || CONST_OK_FOR_L (INTVAL (x)))
4487 *total = COSTS_N_INSNS (1);
4492 *total = COSTS_N_INSNS (1);
4498 *total = COSTS_N_INSNS (3);
4502 /* For multiplies wider than HImode, we have to go to the FPU,
4503 which normally involves copies. Plus there's the latency
4504 of the multiply itself, and the latency of the instructions to
4505 transfer integer regs to FP regs. */
4506 /* ??? Check for FP mode. */
4507 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
4508 *total = COSTS_N_INSNS (10);
4510 *total = COSTS_N_INSNS (2);
4518 *total = COSTS_N_INSNS (1);
4525 /* We make divide expensive, so that divide-by-constant will be
4526 optimized to a multiply. */
4527 *total = COSTS_N_INSNS (60);
4535 /* Calculate the cost of moving data from a register in class FROM to
4536 one in class TO, using MODE. */
4539 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
4542 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
4543 if (to == ADDL_REGS)
4545 if (from == ADDL_REGS)
4548 /* All costs are symmetric, so reduce cases by putting the
4549 lower number class as the destination. */
4552 enum reg_class tmp = to;
4553 to = from, from = tmp;
4556 /* Moving from FR<->GR in XFmode must be more expensive than 2,
4557 so that we get secondary memory reloads. Between FR_REGS,
4558 we have to make this at least as expensive as MEMORY_MOVE_COST
4559 to avoid spectacularly poor register class preferencing. */
4562 if (to != GR_REGS || from != GR_REGS)
4563 return MEMORY_MOVE_COST (mode, to, 0);
4571 /* Moving between PR registers takes two insns. */
4572 if (from == PR_REGS)
4574 /* Moving between PR and anything but GR is impossible. */
4575 if (from != GR_REGS)
4576 return MEMORY_MOVE_COST (mode, to, 0);
4580 /* Moving between BR and anything but GR is impossible. */
4581 if (from != GR_REGS && from != GR_AND_BR_REGS)
4582 return MEMORY_MOVE_COST (mode, to, 0);
4587 /* Moving between AR and anything but GR is impossible. */
4588 if (from != GR_REGS)
4589 return MEMORY_MOVE_COST (mode, to, 0);
4594 case GR_AND_FR_REGS:
4595 case GR_AND_BR_REGS:
4606 /* Implement PREFERRED_RELOAD_CLASS. Place additional restrictions on CLASS
4607 to use when copying X into that class. */
4610 ia64_preferred_reload_class (rtx x, enum reg_class class)
4615 /* Don't allow volatile mem reloads into floating point registers.
4616 This is defined to force reload to choose the r/m case instead
4617 of the f/f case when reloading (set (reg fX) (mem/v)). */
4618 if (MEM_P (x) && MEM_VOLATILE_P (x))
4621 /* Force all unrecognized constants into the constant pool. */
4639 /* This function returns the register class required for a secondary
4640 register when copying between one of the registers in CLASS, and X,
4641 using MODE. A return value of NO_REGS means that no secondary register
4645 ia64_secondary_reload_class (enum reg_class class,
4646 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
4650 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
4651 regno = true_regnum (x);
4658 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
4659 interaction. We end up with two pseudos with overlapping lifetimes
4660 both of which are equiv to the same constant, and both which need
4661 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
4662 changes depending on the path length, which means the qty_first_reg
4663 check in make_regs_eqv can give different answers at different times.
4664 At some point I'll probably need a reload_indi pattern to handle
4667 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
4668 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
4669 non-general registers for good measure. */
4670 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
4673 /* This is needed if a pseudo used as a call_operand gets spilled to a
4675 if (GET_CODE (x) == MEM)
4680 /* Need to go through general registers to get to other class regs. */
4681 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
4684 /* This can happen when a paradoxical subreg is an operand to the
4686 /* ??? This shouldn't be necessary after instruction scheduling is
4687 enabled, because paradoxical subregs are not accepted by
4688 register_operand when INSN_SCHEDULING is defined. Or alternatively,
4689 stop the paradoxical subreg stupidity in the *_operand functions
4691 if (GET_CODE (x) == MEM
4692 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
4693 || GET_MODE (x) == QImode))
4696 /* This can happen because of the ior/and/etc patterns that accept FP
4697 registers as operands. If the third operand is a constant, then it
4698 needs to be reloaded into a FP register. */
4699 if (GET_CODE (x) == CONST_INT)
4702 /* This can happen because of register elimination in a muldi3 insn.
4703 E.g. `26107 * (unsigned long)&u'. */
4704 if (GET_CODE (x) == PLUS)
4709 /* ??? This happens if we cse/gcse a BImode value across a call,
4710 and the function has a nonlocal goto. This is because global
4711 does not allocate call crossing pseudos to hard registers when
4712 current_function_has_nonlocal_goto is true. This is relatively
4713 common for C++ programs that use exceptions. To reproduce,
4714 return NO_REGS and compile libstdc++. */
4715 if (GET_CODE (x) == MEM)
4718 /* This can happen when we take a BImode subreg of a DImode value,
4719 and that DImode value winds up in some non-GR register. */
4720 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
4732 /* Emit text to declare externally defined variables and functions, because
4733 the Intel assembler does not support undefined externals. */
4736 ia64_asm_output_external (FILE *file, tree decl, const char *name)
4738 int save_referenced;
4740 /* GNU as does not need anything here, but the HP linker does need
4741 something for external functions. */
4745 || TREE_CODE (decl) != FUNCTION_DECL
4746 || strstr (name, "__builtin_") == name))
4749 /* ??? The Intel assembler creates a reference that needs to be satisfied by
4750 the linker when we do this, so we need to be careful not to do this for
4751 builtin functions which have no library equivalent. Unfortunately, we
4752 can't tell here whether or not a function will actually be called by
4753 expand_expr, so we pull in library functions even if we may not need
4755 if (! strcmp (name, "__builtin_next_arg")
4756 || ! strcmp (name, "alloca")
4757 || ! strcmp (name, "__builtin_constant_p")
4758 || ! strcmp (name, "__builtin_args_info"))
4762 ia64_hpux_add_extern_decl (decl);
4765 /* assemble_name will set TREE_SYMBOL_REFERENCED, so we must save and
4767 save_referenced = TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl));
4768 if (TREE_CODE (decl) == FUNCTION_DECL)
4769 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
4770 (*targetm.asm_out.globalize_label) (file, name);
4771 TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)) = save_referenced;
4775 /* Parse the -mfixed-range= option string. */
4778 fix_range (const char *const_str)
4781 char *str, *dash, *comma;
4783 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
4784 REG2 are either register names or register numbers. The effect
4785 of this option is to mark the registers in the range from REG1 to
4786 REG2 as ``fixed'' so they won't be used by the compiler. This is
4787 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
4789 i = strlen (const_str);
4790 str = (char *) alloca (i + 1);
4791 memcpy (str, const_str, i + 1);
4795 dash = strchr (str, '-');
4798 warning (0, "value of -mfixed-range must have form REG1-REG2");
4803 comma = strchr (dash + 1, ',');
4807 first = decode_reg_name (str);
4810 warning (0, "unknown register name: %s", str);
4814 last = decode_reg_name (dash + 1);
4817 warning (0, "unknown register name: %s", dash + 1);
4825 warning (0, "%s-%s is an empty range", str, dash + 1);
4829 for (i = first; i <= last; ++i)
4830 fixed_regs[i] = call_used_regs[i] = 1;
4840 /* Implement TARGET_HANDLE_OPTION. */
4843 ia64_handle_option (size_t code, const char *arg, int value)
4847 case OPT_mfixed_range_:
4851 case OPT_mtls_size_:
4852 if (value != 14 && value != 22 && value != 64)
4853 error ("bad value %<%s%> for -mtls-size= switch", arg);
4860 const char *name; /* processor name or nickname. */
4861 enum processor_type processor;
4863 const processor_alias_table[] =
4865 {"itanium", PROCESSOR_ITANIUM},
4866 {"itanium1", PROCESSOR_ITANIUM},
4867 {"merced", PROCESSOR_ITANIUM},
4868 {"itanium2", PROCESSOR_ITANIUM2},
4869 {"mckinley", PROCESSOR_ITANIUM2},
4871 int const pta_size = ARRAY_SIZE (processor_alias_table);
4874 for (i = 0; i < pta_size; i++)
4875 if (!strcmp (arg, processor_alias_table[i].name))
4877 ia64_tune = processor_alias_table[i].processor;
4881 error ("bad value %<%s%> for -mtune= switch", arg);
4890 /* Implement OVERRIDE_OPTIONS. */
4893 ia64_override_options (void)
4895 if (TARGET_AUTO_PIC)
4896 target_flags |= MASK_CONST_GP;
4898 if (TARGET_INLINE_SQRT == INL_MIN_LAT)
4900 warning (0, "not yet implemented: latency-optimized inline square root");
4901 TARGET_INLINE_SQRT = INL_MAX_THR;
4904 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
4905 flag_schedule_insns_after_reload = 0;
4907 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
4909 init_machine_status = ia64_init_machine_status;
4912 static struct machine_function *
4913 ia64_init_machine_status (void)
4915 return ggc_alloc_cleared (sizeof (struct machine_function));
4918 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
4919 static enum attr_type ia64_safe_type (rtx);
4921 static enum attr_itanium_class
4922 ia64_safe_itanium_class (rtx insn)
4924 if (recog_memoized (insn) >= 0)
4925 return get_attr_itanium_class (insn);
4927 return ITANIUM_CLASS_UNKNOWN;
4930 static enum attr_type
4931 ia64_safe_type (rtx insn)
4933 if (recog_memoized (insn) >= 0)
4934 return get_attr_type (insn);
4936 return TYPE_UNKNOWN;
4939 /* The following collection of routines emit instruction group stop bits as
4940 necessary to avoid dependencies. */
4942 /* Need to track some additional registers as far as serialization is
4943 concerned so we can properly handle br.call and br.ret. We could
4944 make these registers visible to gcc, but since these registers are
4945 never explicitly used in gcc generated code, it seems wasteful to
4946 do so (plus it would make the call and return patterns needlessly
4948 #define REG_RP (BR_REG (0))
4949 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
4950 /* This is used for volatile asms which may require a stop bit immediately
4951 before and after them. */
4952 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
4953 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
4954 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
4956 /* For each register, we keep track of how it has been written in the
4957 current instruction group.
4959 If a register is written unconditionally (no qualifying predicate),
4960 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
4962 If a register is written if its qualifying predicate P is true, we
4963 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
4964 may be written again by the complement of P (P^1) and when this happens,
4965 WRITE_COUNT gets set to 2.
4967 The result of this is that whenever an insn attempts to write a register
4968 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
4970 If a predicate register is written by a floating-point insn, we set
4971 WRITTEN_BY_FP to true.
4973 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
4974 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
4976 struct reg_write_state
4978 unsigned int write_count : 2;
4979 unsigned int first_pred : 16;
4980 unsigned int written_by_fp : 1;
4981 unsigned int written_by_and : 1;
4982 unsigned int written_by_or : 1;
4985 /* Cumulative info for the current instruction group. */
4986 struct reg_write_state rws_sum[NUM_REGS];
4987 /* Info for the current instruction. This gets copied to rws_sum after a
4988 stop bit is emitted. */
4989 struct reg_write_state rws_insn[NUM_REGS];
4991 /* Indicates whether this is the first instruction after a stop bit,
4992 in which case we don't need another stop bit. Without this,
4993 ia64_variable_issue will die when scheduling an alloc. */
4994 static int first_instruction;
4996 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
4997 RTL for one instruction. */
5000 unsigned int is_write : 1; /* Is register being written? */
5001 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
5002 unsigned int is_branch : 1; /* Is register used as part of a branch? */
5003 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
5004 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
5005 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
5008 static void rws_update (struct reg_write_state *, int, struct reg_flags, int);
5009 static int rws_access_regno (int, struct reg_flags, int);
5010 static int rws_access_reg (rtx, struct reg_flags, int);
5011 static void update_set_flags (rtx, struct reg_flags *);
5012 static int set_src_needs_barrier (rtx, struct reg_flags, int);
5013 static int rtx_needs_barrier (rtx, struct reg_flags, int);
5014 static void init_insn_group_barriers (void);
5015 static int group_barrier_needed (rtx);
5016 static int safe_group_barrier_needed (rtx);
5018 /* Update *RWS for REGNO, which is being written by the current instruction,
5019 with predicate PRED, and associated register flags in FLAGS. */
5022 rws_update (struct reg_write_state *rws, int regno, struct reg_flags flags, int pred)
5025 rws[regno].write_count++;
5027 rws[regno].write_count = 2;
5028 rws[regno].written_by_fp |= flags.is_fp;
5029 /* ??? Not tracking and/or across differing predicates. */
5030 rws[regno].written_by_and = flags.is_and;
5031 rws[regno].written_by_or = flags.is_or;
5032 rws[regno].first_pred = pred;
5035 /* Handle an access to register REGNO of type FLAGS using predicate register
5036 PRED. Update rws_insn and rws_sum arrays. Return 1 if this access creates
5037 a dependency with an earlier instruction in the same group. */
5040 rws_access_regno (int regno, struct reg_flags flags, int pred)
5042 int need_barrier = 0;
5044 gcc_assert (regno < NUM_REGS);
5046 if (! PR_REGNO_P (regno))
5047 flags.is_and = flags.is_or = 0;
5053 /* One insn writes same reg multiple times? */
5054 gcc_assert (!rws_insn[regno].write_count);
5056 /* Update info for current instruction. */
5057 rws_update (rws_insn, regno, flags, pred);
5058 write_count = rws_sum[regno].write_count;
5060 switch (write_count)
5063 /* The register has not been written yet. */
5064 rws_update (rws_sum, regno, flags, pred);
5068 /* The register has been written via a predicate. If this is
5069 not a complementary predicate, then we need a barrier. */
5070 /* ??? This assumes that P and P+1 are always complementary
5071 predicates for P even. */
5072 if (flags.is_and && rws_sum[regno].written_by_and)
5074 else if (flags.is_or && rws_sum[regno].written_by_or)
5076 else if ((rws_sum[regno].first_pred ^ 1) != pred)
5078 rws_update (rws_sum, regno, flags, pred);
5082 /* The register has been unconditionally written already. We
5084 if (flags.is_and && rws_sum[regno].written_by_and)
5086 else if (flags.is_or && rws_sum[regno].written_by_or)
5090 rws_sum[regno].written_by_and = flags.is_and;
5091 rws_sum[regno].written_by_or = flags.is_or;
5100 if (flags.is_branch)
5102 /* Branches have several RAW exceptions that allow to avoid
5105 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
5106 /* RAW dependencies on branch regs are permissible as long
5107 as the writer is a non-branch instruction. Since we
5108 never generate code that uses a branch register written
5109 by a branch instruction, handling this case is
5113 if (REGNO_REG_CLASS (regno) == PR_REGS
5114 && ! rws_sum[regno].written_by_fp)
5115 /* The predicates of a branch are available within the
5116 same insn group as long as the predicate was written by
5117 something other than a floating-point instruction. */
5121 if (flags.is_and && rws_sum[regno].written_by_and)
5123 if (flags.is_or && rws_sum[regno].written_by_or)
5126 switch (rws_sum[regno].write_count)
5129 /* The register has not been written yet. */
5133 /* The register has been written via a predicate. If this is
5134 not a complementary predicate, then we need a barrier. */
5135 /* ??? This assumes that P and P+1 are always complementary
5136 predicates for P even. */
5137 if ((rws_sum[regno].first_pred ^ 1) != pred)
5142 /* The register has been unconditionally written already. We
5152 return need_barrier;
5156 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
5158 int regno = REGNO (reg);
5159 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
5162 return rws_access_regno (regno, flags, pred);
5165 int need_barrier = 0;
5167 need_barrier |= rws_access_regno (regno + n, flags, pred);
5168 return need_barrier;
5172 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
5173 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
5176 update_set_flags (rtx x, struct reg_flags *pflags)
5178 rtx src = SET_SRC (x);
5180 switch (GET_CODE (src))
5186 /* There are three cases here:
5187 (1) The destination is (pc), in which case this is a branch,
5188 nothing here applies.
5189 (2) The destination is ar.lc, in which case this is a
5190 doloop_end_internal,
5191 (3) The destination is an fp register, in which case this is
5192 an fselect instruction.
5193 In all cases, nothing we do in this function applies. */
5197 if (COMPARISON_P (src)
5198 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
5199 /* Set pflags->is_fp to 1 so that we know we're dealing
5200 with a floating point comparison when processing the
5201 destination of the SET. */
5204 /* Discover if this is a parallel comparison. We only handle
5205 and.orcm and or.andcm at present, since we must retain a
5206 strict inverse on the predicate pair. */
5207 else if (GET_CODE (src) == AND)
5209 else if (GET_CODE (src) == IOR)
5216 /* Subroutine of rtx_needs_barrier; this function determines whether the
5217 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
5218 are as in rtx_needs_barrier. COND is an rtx that holds the condition
5222 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
5224 int need_barrier = 0;
5226 rtx src = SET_SRC (x);
5228 if (GET_CODE (src) == CALL)
5229 /* We don't need to worry about the result registers that
5230 get written by subroutine call. */
5231 return rtx_needs_barrier (src, flags, pred);
5232 else if (SET_DEST (x) == pc_rtx)
5234 /* X is a conditional branch. */
5235 /* ??? This seems redundant, as the caller sets this bit for
5237 flags.is_branch = 1;
5238 return rtx_needs_barrier (src, flags, pred);
5241 need_barrier = rtx_needs_barrier (src, flags, pred);
5244 if (GET_CODE (dst) == ZERO_EXTRACT)
5246 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
5247 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
5248 dst = XEXP (dst, 0);
5250 return need_barrier;
5253 /* Handle an access to rtx X of type FLAGS using predicate register
5254 PRED. Return 1 if this access creates a dependency with an earlier
5255 instruction in the same group. */
5258 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
5261 int is_complemented = 0;
5262 int need_barrier = 0;
5263 const char *format_ptr;
5264 struct reg_flags new_flags;
5272 switch (GET_CODE (x))
5275 update_set_flags (x, &new_flags);
5276 need_barrier = set_src_needs_barrier (x, new_flags, pred);
5277 if (GET_CODE (SET_SRC (x)) != CALL)
5279 new_flags.is_write = 1;
5280 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
5285 new_flags.is_write = 0;
5286 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5288 /* Avoid multiple register writes, in case this is a pattern with
5289 multiple CALL rtx. This avoids a failure in rws_access_reg. */
5290 if (! flags.is_sibcall && ! rws_insn[REG_AR_CFM].write_count)
5292 new_flags.is_write = 1;
5293 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
5294 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
5295 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5300 /* X is a predicated instruction. */
5302 cond = COND_EXEC_TEST (x);
5304 need_barrier = rtx_needs_barrier (cond, flags, 0);
5306 if (GET_CODE (cond) == EQ)
5307 is_complemented = 1;
5308 cond = XEXP (cond, 0);
5309 gcc_assert (GET_CODE (cond) == REG
5310 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
5311 pred = REGNO (cond);
5312 if (is_complemented)
5315 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
5316 return need_barrier;
5320 /* Clobber & use are for earlier compiler-phases only. */
5325 /* We always emit stop bits for traditional asms. We emit stop bits
5326 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
5327 if (GET_CODE (x) != ASM_OPERANDS
5328 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
5330 /* Avoid writing the register multiple times if we have multiple
5331 asm outputs. This avoids a failure in rws_access_reg. */
5332 if (! rws_insn[REG_VOLATILE].write_count)
5334 new_flags.is_write = 1;
5335 rws_access_regno (REG_VOLATILE, new_flags, pred);
5340 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
5341 We cannot just fall through here since then we would be confused
5342 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
5343 traditional asms unlike their normal usage. */
5345 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
5346 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
5351 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5353 rtx pat = XVECEXP (x, 0, i);
5354 switch (GET_CODE (pat))
5357 update_set_flags (pat, &new_flags);
5358 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
5364 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5375 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5377 rtx pat = XVECEXP (x, 0, i);
5378 if (GET_CODE (pat) == SET)
5380 if (GET_CODE (SET_SRC (pat)) != CALL)
5382 new_flags.is_write = 1;
5383 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
5387 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
5388 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5393 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
5396 if (REGNO (x) == AR_UNAT_REGNUM)
5398 for (i = 0; i < 64; ++i)
5399 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
5402 need_barrier = rws_access_reg (x, flags, pred);
5406 /* Find the regs used in memory address computation. */
5407 new_flags.is_write = 0;
5408 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5411 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
5412 case SYMBOL_REF: case LABEL_REF: case CONST:
5415 /* Operators with side-effects. */
5416 case POST_INC: case POST_DEC:
5417 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5419 new_flags.is_write = 0;
5420 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5421 new_flags.is_write = 1;
5422 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5426 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5428 new_flags.is_write = 0;
5429 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5430 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5431 new_flags.is_write = 1;
5432 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5435 /* Handle common unary and binary ops for efficiency. */
5436 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
5437 case MOD: case UDIV: case UMOD: case AND: case IOR:
5438 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
5439 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
5440 case NE: case EQ: case GE: case GT: case LE:
5441 case LT: case GEU: case GTU: case LEU: case LTU:
5442 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5443 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5446 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
5447 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
5448 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
5449 case SQRT: case FFS: case POPCOUNT:
5450 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5454 /* VEC_SELECT's second argument is a PARALLEL with integers that
5455 describe the elements selected. On ia64, those integers are
5456 always constants. Avoid walking the PARALLEL so that we don't
5457 get confused with "normal" parallels and then die. */
5458 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5462 switch (XINT (x, 1))
5464 case UNSPEC_LTOFF_DTPMOD:
5465 case UNSPEC_LTOFF_DTPREL:
5467 case UNSPEC_LTOFF_TPREL:
5469 case UNSPEC_PRED_REL_MUTEX:
5470 case UNSPEC_PIC_CALL:
5472 case UNSPEC_FETCHADD_ACQ:
5473 case UNSPEC_BSP_VALUE:
5474 case UNSPEC_FLUSHRS:
5475 case UNSPEC_BUNDLE_SELECTOR:
5478 case UNSPEC_GR_SPILL:
5479 case UNSPEC_GR_RESTORE:
5481 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
5482 HOST_WIDE_INT bit = (offset >> 3) & 63;
5484 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5485 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
5486 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
5491 case UNSPEC_FR_SPILL:
5492 case UNSPEC_FR_RESTORE:
5493 case UNSPEC_GETF_EXP:
5494 case UNSPEC_SETF_EXP:
5496 case UNSPEC_FR_SQRT_RECIP_APPROX:
5497 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5500 case UNSPEC_FR_RECIP_APPROX:
5502 case UNSPEC_COPYSIGN:
5503 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5504 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5507 case UNSPEC_CMPXCHG_ACQ:
5508 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5509 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
5517 case UNSPEC_VOLATILE:
5518 switch (XINT (x, 1))
5521 /* Alloc must always be the first instruction of a group.
5522 We force this by always returning true. */
5523 /* ??? We might get better scheduling if we explicitly check for
5524 input/local/output register dependencies, and modify the
5525 scheduler so that alloc is always reordered to the start of
5526 the current group. We could then eliminate all of the
5527 first_instruction code. */
5528 rws_access_regno (AR_PFS_REGNUM, flags, pred);
5530 new_flags.is_write = 1;
5531 rws_access_regno (REG_AR_CFM, new_flags, pred);
5534 case UNSPECV_SET_BSP:
5538 case UNSPECV_BLOCKAGE:
5539 case UNSPECV_INSN_GROUP_BARRIER:
5541 case UNSPECV_PSAC_ALL:
5542 case UNSPECV_PSAC_NORMAL:
5551 new_flags.is_write = 0;
5552 need_barrier = rws_access_regno (REG_RP, flags, pred);
5553 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
5555 new_flags.is_write = 1;
5556 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5557 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5561 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
5562 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5563 switch (format_ptr[i])
5565 case '0': /* unused field */
5566 case 'i': /* integer */
5567 case 'n': /* note */
5568 case 'w': /* wide integer */
5569 case 's': /* pointer to string */
5570 case 'S': /* optional pointer to string */
5574 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
5579 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
5580 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
5589 return need_barrier;
5592 /* Clear out the state for group_barrier_needed at the start of a
5593 sequence of insns. */
5596 init_insn_group_barriers (void)
5598 memset (rws_sum, 0, sizeof (rws_sum));
5599 first_instruction = 1;
5602 /* Given the current state, determine whether a group barrier (a stop bit) is
5603 necessary before INSN. Return nonzero if so. This modifies the state to
5604 include the effects of INSN as a side-effect. */
5607 group_barrier_needed (rtx insn)
5610 int need_barrier = 0;
5611 struct reg_flags flags;
5613 memset (&flags, 0, sizeof (flags));
5614 switch (GET_CODE (insn))
5620 /* A barrier doesn't imply an instruction group boundary. */
5624 memset (rws_insn, 0, sizeof (rws_insn));
5628 flags.is_branch = 1;
5629 flags.is_sibcall = SIBLING_CALL_P (insn);
5630 memset (rws_insn, 0, sizeof (rws_insn));
5632 /* Don't bundle a call following another call. */
5633 if ((pat = prev_active_insn (insn))
5634 && GET_CODE (pat) == CALL_INSN)
5640 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
5644 flags.is_branch = 1;
5646 /* Don't bundle a jump following a call. */
5647 if ((pat = prev_active_insn (insn))
5648 && GET_CODE (pat) == CALL_INSN)
5656 if (GET_CODE (PATTERN (insn)) == USE
5657 || GET_CODE (PATTERN (insn)) == CLOBBER)
5658 /* Don't care about USE and CLOBBER "insns"---those are used to
5659 indicate to the optimizer that it shouldn't get rid of
5660 certain operations. */
5663 pat = PATTERN (insn);
5665 /* Ug. Hack hacks hacked elsewhere. */
5666 switch (recog_memoized (insn))
5668 /* We play dependency tricks with the epilogue in order
5669 to get proper schedules. Undo this for dv analysis. */
5670 case CODE_FOR_epilogue_deallocate_stack:
5671 case CODE_FOR_prologue_allocate_stack:
5672 pat = XVECEXP (pat, 0, 0);
5675 /* The pattern we use for br.cloop confuses the code above.
5676 The second element of the vector is representative. */
5677 case CODE_FOR_doloop_end_internal:
5678 pat = XVECEXP (pat, 0, 1);
5681 /* Doesn't generate code. */
5682 case CODE_FOR_pred_rel_mutex:
5683 case CODE_FOR_prologue_use:
5690 memset (rws_insn, 0, sizeof (rws_insn));
5691 need_barrier = rtx_needs_barrier (pat, flags, 0);
5693 /* Check to see if the previous instruction was a volatile
5696 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
5703 if (first_instruction && INSN_P (insn)
5704 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
5705 && GET_CODE (PATTERN (insn)) != USE
5706 && GET_CODE (PATTERN (insn)) != CLOBBER)
5709 first_instruction = 0;
5712 return need_barrier;
5715 /* Like group_barrier_needed, but do not clobber the current state. */
5718 safe_group_barrier_needed (rtx insn)
5720 struct reg_write_state rws_saved[NUM_REGS];
5721 int saved_first_instruction;
5724 memcpy (rws_saved, rws_sum, NUM_REGS * sizeof *rws_saved);
5725 saved_first_instruction = first_instruction;
5727 t = group_barrier_needed (insn);
5729 memcpy (rws_sum, rws_saved, NUM_REGS * sizeof *rws_saved);
5730 first_instruction = saved_first_instruction;
5735 /* Scan the current function and insert stop bits as necessary to
5736 eliminate dependencies. This function assumes that a final
5737 instruction scheduling pass has been run which has already
5738 inserted most of the necessary stop bits. This function only
5739 inserts new ones at basic block boundaries, since these are
5740 invisible to the scheduler. */
5743 emit_insn_group_barriers (FILE *dump)
5747 int insns_since_last_label = 0;
5749 init_insn_group_barriers ();
5751 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5753 if (GET_CODE (insn) == CODE_LABEL)
5755 if (insns_since_last_label)
5757 insns_since_last_label = 0;
5759 else if (GET_CODE (insn) == NOTE
5760 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
5762 if (insns_since_last_label)
5764 insns_since_last_label = 0;
5766 else if (GET_CODE (insn) == INSN
5767 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
5768 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
5770 init_insn_group_barriers ();
5773 else if (INSN_P (insn))
5775 insns_since_last_label = 1;
5777 if (group_barrier_needed (insn))
5782 fprintf (dump, "Emitting stop before label %d\n",
5783 INSN_UID (last_label));
5784 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
5787 init_insn_group_barriers ();
5795 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
5796 This function has to emit all necessary group barriers. */
5799 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
5803 init_insn_group_barriers ();
5805 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5807 if (GET_CODE (insn) == BARRIER)
5809 rtx last = prev_active_insn (insn);
5813 if (GET_CODE (last) == JUMP_INSN
5814 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
5815 last = prev_active_insn (last);
5816 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
5817 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
5819 init_insn_group_barriers ();
5821 else if (INSN_P (insn))
5823 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
5824 init_insn_group_barriers ();
5825 else if (group_barrier_needed (insn))
5827 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5828 init_insn_group_barriers ();
5829 group_barrier_needed (insn);
5837 /* Instruction scheduling support. */
5839 #define NR_BUNDLES 10
5841 /* A list of names of all available bundles. */
5843 static const char *bundle_name [NR_BUNDLES] =
5849 #if NR_BUNDLES == 10
5859 /* Nonzero if we should insert stop bits into the schedule. */
5861 int ia64_final_schedule = 0;
5863 /* Codes of the corresponding queried units: */
5865 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
5866 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
5868 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
5869 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
5871 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
5873 /* The following variable value is an insn group barrier. */
5875 static rtx dfa_stop_insn;
5877 /* The following variable value is the last issued insn. */
5879 static rtx last_scheduled_insn;
5881 /* The following variable value is size of the DFA state. */
5883 static size_t dfa_state_size;
5885 /* The following variable value is pointer to a DFA state used as
5886 temporary variable. */
5888 static state_t temp_dfa_state = NULL;
5890 /* The following variable value is DFA state after issuing the last
5893 static state_t prev_cycle_state = NULL;
5895 /* The following array element values are TRUE if the corresponding
5896 insn requires to add stop bits before it. */
5898 static char *stops_p;
5900 /* The following variable is used to set up the mentioned above array. */
5902 static int stop_before_p = 0;
5904 /* The following variable value is length of the arrays `clocks' and
5907 static int clocks_length;
5909 /* The following array element values are cycles on which the
5910 corresponding insn will be issued. The array is used only for
5915 /* The following array element values are numbers of cycles should be
5916 added to improve insn scheduling for MM_insns for Itanium1. */
5918 static int *add_cycles;
5920 static rtx ia64_single_set (rtx);
5921 static void ia64_emit_insn_before (rtx, rtx);
5923 /* Map a bundle number to its pseudo-op. */
5926 get_bundle_name (int b)
5928 return bundle_name[b];
5932 /* Return the maximum number of instructions a cpu can issue. */
5935 ia64_issue_rate (void)
5940 /* Helper function - like single_set, but look inside COND_EXEC. */
5943 ia64_single_set (rtx insn)
5945 rtx x = PATTERN (insn), ret;
5946 if (GET_CODE (x) == COND_EXEC)
5947 x = COND_EXEC_CODE (x);
5948 if (GET_CODE (x) == SET)
5951 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
5952 Although they are not classical single set, the second set is there just
5953 to protect it from moving past FP-relative stack accesses. */
5954 switch (recog_memoized (insn))
5956 case CODE_FOR_prologue_allocate_stack:
5957 case CODE_FOR_epilogue_deallocate_stack:
5958 ret = XVECEXP (x, 0, 0);
5962 ret = single_set_2 (insn, x);
5969 /* Adjust the cost of a scheduling dependency. Return the new cost of
5970 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
5973 ia64_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
5975 enum attr_itanium_class dep_class;
5976 enum attr_itanium_class insn_class;
5978 if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT)
5981 insn_class = ia64_safe_itanium_class (insn);
5982 dep_class = ia64_safe_itanium_class (dep_insn);
5983 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
5984 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
5990 /* Like emit_insn_before, but skip cycle_display notes.
5991 ??? When cycle display notes are implemented, update this. */
5994 ia64_emit_insn_before (rtx insn, rtx before)
5996 emit_insn_before (insn, before);
5999 /* The following function marks insns who produce addresses for load
6000 and store insns. Such insns will be placed into M slots because it
6001 decrease latency time for Itanium1 (see function
6002 `ia64_produce_address_p' and the DFA descriptions). */
6005 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
6007 rtx insn, link, next, next_tail;
6009 /* Before reload, which_alternative is not set, which means that
6010 ia64_safe_itanium_class will produce wrong results for (at least)
6011 move instructions. */
6012 if (!reload_completed)
6015 next_tail = NEXT_INSN (tail);
6016 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6019 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6021 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
6023 for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
6025 if (REG_NOTE_KIND (link) != REG_DEP_TRUE)
6027 next = XEXP (link, 0);
6028 if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_ST
6029 || ia64_safe_itanium_class (next) == ITANIUM_CLASS_STF)
6030 && ia64_st_address_bypass_p (insn, next))
6032 else if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_LD
6033 || ia64_safe_itanium_class (next)
6034 == ITANIUM_CLASS_FLD)
6035 && ia64_ld_address_bypass_p (insn, next))
6038 insn->call = link != 0;
6042 /* We're beginning a new block. Initialize data structures as necessary. */
6045 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
6046 int sched_verbose ATTRIBUTE_UNUSED,
6047 int max_ready ATTRIBUTE_UNUSED)
6049 #ifdef ENABLE_CHECKING
6052 if (reload_completed)
6053 for (insn = NEXT_INSN (current_sched_info->prev_head);
6054 insn != current_sched_info->next_tail;
6055 insn = NEXT_INSN (insn))
6056 gcc_assert (!SCHED_GROUP_P (insn));
6058 last_scheduled_insn = NULL_RTX;
6059 init_insn_group_barriers ();
6062 /* We are about to being issuing insns for this clock cycle.
6063 Override the default sort algorithm to better slot instructions. */
6066 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
6067 int *pn_ready, int clock_var ATTRIBUTE_UNUSED,
6071 int n_ready = *pn_ready;
6072 rtx *e_ready = ready + n_ready;
6076 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
6078 if (reorder_type == 0)
6080 /* First, move all USEs, CLOBBERs and other crud out of the way. */
6082 for (insnp = ready; insnp < e_ready; insnp++)
6083 if (insnp < e_ready)
6086 enum attr_type t = ia64_safe_type (insn);
6087 if (t == TYPE_UNKNOWN)
6089 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6090 || asm_noperands (PATTERN (insn)) >= 0)
6092 rtx lowest = ready[n_asms];
6093 ready[n_asms] = insn;
6099 rtx highest = ready[n_ready - 1];
6100 ready[n_ready - 1] = insn;
6107 if (n_asms < n_ready)
6109 /* Some normal insns to process. Skip the asms. */
6113 else if (n_ready > 0)
6117 if (ia64_final_schedule)
6120 int nr_need_stop = 0;
6122 for (insnp = ready; insnp < e_ready; insnp++)
6123 if (safe_group_barrier_needed (*insnp))
6126 if (reorder_type == 1 && n_ready == nr_need_stop)
6128 if (reorder_type == 0)
6131 /* Move down everything that needs a stop bit, preserving
6133 while (insnp-- > ready + deleted)
6134 while (insnp >= ready + deleted)
6137 if (! safe_group_barrier_needed (insn))
6139 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
6150 /* We are about to being issuing insns for this clock cycle. Override
6151 the default sort algorithm to better slot instructions. */
6154 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
6157 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
6158 pn_ready, clock_var, 0);
6161 /* Like ia64_sched_reorder, but called after issuing each insn.
6162 Override the default sort algorithm to better slot instructions. */
6165 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
6166 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
6167 int *pn_ready, int clock_var)
6169 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
6170 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
6171 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
6175 /* We are about to issue INSN. Return the number of insns left on the
6176 ready queue that can be issued this cycle. */
6179 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
6180 int sched_verbose ATTRIBUTE_UNUSED,
6181 rtx insn ATTRIBUTE_UNUSED,
6182 int can_issue_more ATTRIBUTE_UNUSED)
6184 last_scheduled_insn = insn;
6185 memcpy (prev_cycle_state, curr_state, dfa_state_size);
6186 if (reload_completed)
6188 int needed = group_barrier_needed (insn);
6190 gcc_assert (!needed);
6191 if (GET_CODE (insn) == CALL_INSN)
6192 init_insn_group_barriers ();
6193 stops_p [INSN_UID (insn)] = stop_before_p;
6199 /* We are choosing insn from the ready queue. Return nonzero if INSN
6203 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
6205 gcc_assert (insn && INSN_P (insn));
6206 return (!reload_completed
6207 || !safe_group_barrier_needed (insn));
6210 /* The following variable value is pseudo-insn used by the DFA insn
6211 scheduler to change the DFA state when the simulated clock is
6214 static rtx dfa_pre_cycle_insn;
6216 /* We are about to being issuing INSN. Return nonzero if we cannot
6217 issue it on given cycle CLOCK and return zero if we should not sort
6218 the ready queue on the next clock start. */
6221 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
6222 int clock, int *sort_p)
6224 int setup_clocks_p = FALSE;
6226 gcc_assert (insn && INSN_P (insn));
6227 if ((reload_completed && safe_group_barrier_needed (insn))
6228 || (last_scheduled_insn
6229 && (GET_CODE (last_scheduled_insn) == CALL_INSN
6230 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6231 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
6233 init_insn_group_barriers ();
6234 if (verbose && dump)
6235 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
6236 last_clock == clock ? " + cycle advance" : "");
6238 if (last_clock == clock)
6240 state_transition (curr_state, dfa_stop_insn);
6241 if (TARGET_EARLY_STOP_BITS)
6242 *sort_p = (last_scheduled_insn == NULL_RTX
6243 || GET_CODE (last_scheduled_insn) != CALL_INSN);
6248 else if (reload_completed)
6249 setup_clocks_p = TRUE;
6250 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6251 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
6252 state_reset (curr_state);
6255 memcpy (curr_state, prev_cycle_state, dfa_state_size);
6256 state_transition (curr_state, dfa_stop_insn);
6257 state_transition (curr_state, dfa_pre_cycle_insn);
6258 state_transition (curr_state, NULL);
6261 else if (reload_completed)
6262 setup_clocks_p = TRUE;
6263 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM
6264 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6265 && asm_noperands (PATTERN (insn)) < 0)
6267 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
6269 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
6274 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
6275 if (REG_NOTE_KIND (link) == 0)
6277 enum attr_itanium_class dep_class;
6278 rtx dep_insn = XEXP (link, 0);
6280 dep_class = ia64_safe_itanium_class (dep_insn);
6281 if ((dep_class == ITANIUM_CLASS_MMMUL
6282 || dep_class == ITANIUM_CLASS_MMSHF)
6283 && last_clock - clocks [INSN_UID (dep_insn)] < 4
6285 || last_clock - clocks [INSN_UID (dep_insn)] < d))
6286 d = last_clock - clocks [INSN_UID (dep_insn)];
6289 add_cycles [INSN_UID (insn)] = 3 - d;
6297 /* The following page contains abstract data `bundle states' which are
6298 used for bundling insns (inserting nops and template generation). */
6300 /* The following describes state of insn bundling. */
6304 /* Unique bundle state number to identify them in the debugging
6307 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
6308 /* number nops before and after the insn */
6309 short before_nops_num, after_nops_num;
6310 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
6312 int cost; /* cost of the state in cycles */
6313 int accumulated_insns_num; /* number of all previous insns including
6314 nops. L is considered as 2 insns */
6315 int branch_deviation; /* deviation of previous branches from 3rd slots */
6316 struct bundle_state *next; /* next state with the same insn_num */
6317 struct bundle_state *originator; /* originator (previous insn state) */
6318 /* All bundle states are in the following chain. */
6319 struct bundle_state *allocated_states_chain;
6320 /* The DFA State after issuing the insn and the nops. */
6324 /* The following is map insn number to the corresponding bundle state. */
6326 static struct bundle_state **index_to_bundle_states;
6328 /* The unique number of next bundle state. */
6330 static int bundle_states_num;
6332 /* All allocated bundle states are in the following chain. */
6334 static struct bundle_state *allocated_bundle_states_chain;
6336 /* All allocated but not used bundle states are in the following
6339 static struct bundle_state *free_bundle_state_chain;
6342 /* The following function returns a free bundle state. */
6344 static struct bundle_state *
6345 get_free_bundle_state (void)
6347 struct bundle_state *result;
6349 if (free_bundle_state_chain != NULL)
6351 result = free_bundle_state_chain;
6352 free_bundle_state_chain = result->next;
6356 result = xmalloc (sizeof (struct bundle_state));
6357 result->dfa_state = xmalloc (dfa_state_size);
6358 result->allocated_states_chain = allocated_bundle_states_chain;
6359 allocated_bundle_states_chain = result;
6361 result->unique_num = bundle_states_num++;
6366 /* The following function frees given bundle state. */
6369 free_bundle_state (struct bundle_state *state)
6371 state->next = free_bundle_state_chain;
6372 free_bundle_state_chain = state;
6375 /* Start work with abstract data `bundle states'. */
6378 initiate_bundle_states (void)
6380 bundle_states_num = 0;
6381 free_bundle_state_chain = NULL;
6382 allocated_bundle_states_chain = NULL;
6385 /* Finish work with abstract data `bundle states'. */
6388 finish_bundle_states (void)
6390 struct bundle_state *curr_state, *next_state;
6392 for (curr_state = allocated_bundle_states_chain;
6394 curr_state = next_state)
6396 next_state = curr_state->allocated_states_chain;
6397 free (curr_state->dfa_state);
6402 /* Hash table of the bundle states. The key is dfa_state and insn_num
6403 of the bundle states. */
6405 static htab_t bundle_state_table;
6407 /* The function returns hash of BUNDLE_STATE. */
6410 bundle_state_hash (const void *bundle_state)
6412 const struct bundle_state *state = (struct bundle_state *) bundle_state;
6415 for (result = i = 0; i < dfa_state_size; i++)
6416 result += (((unsigned char *) state->dfa_state) [i]
6417 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
6418 return result + state->insn_num;
6421 /* The function returns nonzero if the bundle state keys are equal. */
6424 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
6426 const struct bundle_state * state1 = (struct bundle_state *) bundle_state_1;
6427 const struct bundle_state * state2 = (struct bundle_state *) bundle_state_2;
6429 return (state1->insn_num == state2->insn_num
6430 && memcmp (state1->dfa_state, state2->dfa_state,
6431 dfa_state_size) == 0);
6434 /* The function inserts the BUNDLE_STATE into the hash table. The
6435 function returns nonzero if the bundle has been inserted into the
6436 table. The table contains the best bundle state with given key. */
6439 insert_bundle_state (struct bundle_state *bundle_state)
6443 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
6444 if (*entry_ptr == NULL)
6446 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
6447 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
6448 *entry_ptr = (void *) bundle_state;
6451 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
6452 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
6453 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
6454 > bundle_state->accumulated_insns_num
6455 || (((struct bundle_state *)
6456 *entry_ptr)->accumulated_insns_num
6457 == bundle_state->accumulated_insns_num
6458 && ((struct bundle_state *)
6459 *entry_ptr)->branch_deviation
6460 > bundle_state->branch_deviation))))
6463 struct bundle_state temp;
6465 temp = *(struct bundle_state *) *entry_ptr;
6466 *(struct bundle_state *) *entry_ptr = *bundle_state;
6467 ((struct bundle_state *) *entry_ptr)->next = temp.next;
6468 *bundle_state = temp;
6473 /* Start work with the hash table. */
6476 initiate_bundle_state_table (void)
6478 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
6482 /* Finish work with the hash table. */
6485 finish_bundle_state_table (void)
6487 htab_delete (bundle_state_table);
6492 /* The following variable is a insn `nop' used to check bundle states
6493 with different number of inserted nops. */
6495 static rtx ia64_nop;
6497 /* The following function tries to issue NOPS_NUM nops for the current
6498 state without advancing processor cycle. If it failed, the
6499 function returns FALSE and frees the current state. */
6502 try_issue_nops (struct bundle_state *curr_state, int nops_num)
6506 for (i = 0; i < nops_num; i++)
6507 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
6509 free_bundle_state (curr_state);
6515 /* The following function tries to issue INSN for the current
6516 state without advancing processor cycle. If it failed, the
6517 function returns FALSE and frees the current state. */
6520 try_issue_insn (struct bundle_state *curr_state, rtx insn)
6522 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
6524 free_bundle_state (curr_state);
6530 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
6531 starting with ORIGINATOR without advancing processor cycle. If
6532 TRY_BUNDLE_END_P is TRUE, the function also/only (if
6533 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
6534 If it was successful, the function creates new bundle state and
6535 insert into the hash table and into `index_to_bundle_states'. */
6538 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
6539 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
6541 struct bundle_state *curr_state;
6543 curr_state = get_free_bundle_state ();
6544 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
6545 curr_state->insn = insn;
6546 curr_state->insn_num = originator->insn_num + 1;
6547 curr_state->cost = originator->cost;
6548 curr_state->originator = originator;
6549 curr_state->before_nops_num = before_nops_num;
6550 curr_state->after_nops_num = 0;
6551 curr_state->accumulated_insns_num
6552 = originator->accumulated_insns_num + before_nops_num;
6553 curr_state->branch_deviation = originator->branch_deviation;
6555 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
6557 gcc_assert (GET_MODE (insn) != TImode);
6558 if (!try_issue_nops (curr_state, before_nops_num))
6560 if (!try_issue_insn (curr_state, insn))
6562 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
6563 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
6564 && curr_state->accumulated_insns_num % 3 != 0)
6566 free_bundle_state (curr_state);
6570 else if (GET_MODE (insn) != TImode)
6572 if (!try_issue_nops (curr_state, before_nops_num))
6574 if (!try_issue_insn (curr_state, insn))
6576 curr_state->accumulated_insns_num++;
6577 gcc_assert (GET_CODE (PATTERN (insn)) != ASM_INPUT
6578 && asm_noperands (PATTERN (insn)) < 0);
6580 if (ia64_safe_type (insn) == TYPE_L)
6581 curr_state->accumulated_insns_num++;
6585 /* If this is an insn that must be first in a group, then don't allow
6586 nops to be emitted before it. Currently, alloc is the only such
6587 supported instruction. */
6588 /* ??? The bundling automatons should handle this for us, but they do
6589 not yet have support for the first_insn attribute. */
6590 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
6592 free_bundle_state (curr_state);
6596 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
6597 state_transition (curr_state->dfa_state, NULL);
6599 if (!try_issue_nops (curr_state, before_nops_num))
6601 if (!try_issue_insn (curr_state, insn))
6603 curr_state->accumulated_insns_num++;
6604 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6605 || asm_noperands (PATTERN (insn)) >= 0)
6607 /* Finish bundle containing asm insn. */
6608 curr_state->after_nops_num
6609 = 3 - curr_state->accumulated_insns_num % 3;
6610 curr_state->accumulated_insns_num
6611 += 3 - curr_state->accumulated_insns_num % 3;
6613 else if (ia64_safe_type (insn) == TYPE_L)
6614 curr_state->accumulated_insns_num++;
6616 if (ia64_safe_type (insn) == TYPE_B)
6617 curr_state->branch_deviation
6618 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
6619 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
6621 if (!only_bundle_end_p && insert_bundle_state (curr_state))
6624 struct bundle_state *curr_state1;
6625 struct bundle_state *allocated_states_chain;
6627 curr_state1 = get_free_bundle_state ();
6628 dfa_state = curr_state1->dfa_state;
6629 allocated_states_chain = curr_state1->allocated_states_chain;
6630 *curr_state1 = *curr_state;
6631 curr_state1->dfa_state = dfa_state;
6632 curr_state1->allocated_states_chain = allocated_states_chain;
6633 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
6635 curr_state = curr_state1;
6637 if (!try_issue_nops (curr_state,
6638 3 - curr_state->accumulated_insns_num % 3))
6640 curr_state->after_nops_num
6641 = 3 - curr_state->accumulated_insns_num % 3;
6642 curr_state->accumulated_insns_num
6643 += 3 - curr_state->accumulated_insns_num % 3;
6645 if (!insert_bundle_state (curr_state))
6646 free_bundle_state (curr_state);
6650 /* The following function returns position in the two window bundle
6654 get_max_pos (state_t state)
6656 if (cpu_unit_reservation_p (state, pos_6))
6658 else if (cpu_unit_reservation_p (state, pos_5))
6660 else if (cpu_unit_reservation_p (state, pos_4))
6662 else if (cpu_unit_reservation_p (state, pos_3))
6664 else if (cpu_unit_reservation_p (state, pos_2))
6666 else if (cpu_unit_reservation_p (state, pos_1))
6672 /* The function returns code of a possible template for given position
6673 and state. The function should be called only with 2 values of
6674 position equal to 3 or 6. We avoid generating F NOPs by putting
6675 templates containing F insns at the end of the template search
6676 because undocumented anomaly in McKinley derived cores which can
6677 cause stalls if an F-unit insn (including a NOP) is issued within a
6678 six-cycle window after reading certain application registers (such
6679 as ar.bsp). Furthermore, power-considerations also argue against
6680 the use of F-unit instructions unless they're really needed. */
6683 get_template (state_t state, int pos)
6688 if (cpu_unit_reservation_p (state, _0mmi_))
6690 else if (cpu_unit_reservation_p (state, _0mii_))
6692 else if (cpu_unit_reservation_p (state, _0mmb_))
6694 else if (cpu_unit_reservation_p (state, _0mib_))
6696 else if (cpu_unit_reservation_p (state, _0mbb_))
6698 else if (cpu_unit_reservation_p (state, _0bbb_))
6700 else if (cpu_unit_reservation_p (state, _0mmf_))
6702 else if (cpu_unit_reservation_p (state, _0mfi_))
6704 else if (cpu_unit_reservation_p (state, _0mfb_))
6706 else if (cpu_unit_reservation_p (state, _0mlx_))
6711 if (cpu_unit_reservation_p (state, _1mmi_))
6713 else if (cpu_unit_reservation_p (state, _1mii_))
6715 else if (cpu_unit_reservation_p (state, _1mmb_))
6717 else if (cpu_unit_reservation_p (state, _1mib_))
6719 else if (cpu_unit_reservation_p (state, _1mbb_))
6721 else if (cpu_unit_reservation_p (state, _1bbb_))
6723 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
6725 else if (cpu_unit_reservation_p (state, _1mfi_))
6727 else if (cpu_unit_reservation_p (state, _1mfb_))
6729 else if (cpu_unit_reservation_p (state, _1mlx_))
6738 /* The following function returns an insn important for insn bundling
6739 followed by INSN and before TAIL. */
6742 get_next_important_insn (rtx insn, rtx tail)
6744 for (; insn && insn != tail; insn = NEXT_INSN (insn))
6746 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6747 && GET_CODE (PATTERN (insn)) != USE
6748 && GET_CODE (PATTERN (insn)) != CLOBBER)
6753 /* The following function does insn bundling. Bundling means
6754 inserting templates and nop insns to fit insn groups into permitted
6755 templates. Instruction scheduling uses NDFA (non-deterministic
6756 finite automata) encoding informations about the templates and the
6757 inserted nops. Nondeterminism of the automata permits follows
6758 all possible insn sequences very fast.
6760 Unfortunately it is not possible to get information about inserting
6761 nop insns and used templates from the automata states. The
6762 automata only says that we can issue an insn possibly inserting
6763 some nops before it and using some template. Therefore insn
6764 bundling in this function is implemented by using DFA
6765 (deterministic finite automata). We follows all possible insn
6766 sequences by inserting 0-2 nops (that is what the NDFA describe for
6767 insn scheduling) before/after each insn being bundled. We know the
6768 start of simulated processor cycle from insn scheduling (insn
6769 starting a new cycle has TImode).
6771 Simple implementation of insn bundling would create enormous
6772 number of possible insn sequences satisfying information about new
6773 cycle ticks taken from the insn scheduling. To make the algorithm
6774 practical we use dynamic programming. Each decision (about
6775 inserting nops and implicitly about previous decisions) is described
6776 by structure bundle_state (see above). If we generate the same
6777 bundle state (key is automaton state after issuing the insns and
6778 nops for it), we reuse already generated one. As consequence we
6779 reject some decisions which cannot improve the solution and
6780 reduce memory for the algorithm.
6782 When we reach the end of EBB (extended basic block), we choose the
6783 best sequence and then, moving back in EBB, insert templates for
6784 the best alternative. The templates are taken from querying
6785 automaton state for each insn in chosen bundle states.
6787 So the algorithm makes two (forward and backward) passes through
6788 EBB. There is an additional forward pass through EBB for Itanium1
6789 processor. This pass inserts more nops to make dependency between
6790 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
6793 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
6795 struct bundle_state *curr_state, *next_state, *best_state;
6796 rtx insn, next_insn;
6798 int i, bundle_end_p, only_bundle_end_p, asm_p;
6799 int pos = 0, max_pos, template0, template1;
6802 enum attr_type type;
6805 /* Count insns in the EBB. */
6806 for (insn = NEXT_INSN (prev_head_insn);
6807 insn && insn != tail;
6808 insn = NEXT_INSN (insn))
6814 dfa_clean_insn_cache ();
6815 initiate_bundle_state_table ();
6816 index_to_bundle_states = xmalloc ((insn_num + 2)
6817 * sizeof (struct bundle_state *));
6818 /* First (forward) pass -- generation of bundle states. */
6819 curr_state = get_free_bundle_state ();
6820 curr_state->insn = NULL;
6821 curr_state->before_nops_num = 0;
6822 curr_state->after_nops_num = 0;
6823 curr_state->insn_num = 0;
6824 curr_state->cost = 0;
6825 curr_state->accumulated_insns_num = 0;
6826 curr_state->branch_deviation = 0;
6827 curr_state->next = NULL;
6828 curr_state->originator = NULL;
6829 state_reset (curr_state->dfa_state);
6830 index_to_bundle_states [0] = curr_state;
6832 /* Shift cycle mark if it is put on insn which could be ignored. */
6833 for (insn = NEXT_INSN (prev_head_insn);
6835 insn = NEXT_INSN (insn))
6837 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6838 || GET_CODE (PATTERN (insn)) == USE
6839 || GET_CODE (PATTERN (insn)) == CLOBBER)
6840 && GET_MODE (insn) == TImode)
6842 PUT_MODE (insn, VOIDmode);
6843 for (next_insn = NEXT_INSN (insn);
6845 next_insn = NEXT_INSN (next_insn))
6846 if (INSN_P (next_insn)
6847 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
6848 && GET_CODE (PATTERN (next_insn)) != USE
6849 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
6851 PUT_MODE (next_insn, TImode);
6855 /* Froward pass: generation of bundle states. */
6856 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
6860 gcc_assert (INSN_P (insn)
6861 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6862 && GET_CODE (PATTERN (insn)) != USE
6863 && GET_CODE (PATTERN (insn)) != CLOBBER);
6864 type = ia64_safe_type (insn);
6865 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
6867 index_to_bundle_states [insn_num] = NULL;
6868 for (curr_state = index_to_bundle_states [insn_num - 1];
6870 curr_state = next_state)
6872 pos = curr_state->accumulated_insns_num % 3;
6873 next_state = curr_state->next;
6874 /* We must fill up the current bundle in order to start a
6875 subsequent asm insn in a new bundle. Asm insn is always
6876 placed in a separate bundle. */
6878 = (next_insn != NULL_RTX
6879 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
6880 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
6881 /* We may fill up the current bundle if it is the cycle end
6882 without a group barrier. */
6884 = (only_bundle_end_p || next_insn == NULL_RTX
6885 || (GET_MODE (next_insn) == TImode
6886 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
6887 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
6889 /* We need to insert 2 nops for cases like M_MII. To
6890 guarantee issuing all insns on the same cycle for
6891 Itanium 1, we need to issue 2 nops after the first M
6892 insn (MnnMII where n is a nop insn). */
6893 || ((type == TYPE_M || type == TYPE_A)
6894 && ia64_tune == PROCESSOR_ITANIUM
6895 && !bundle_end_p && pos == 1))
6896 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
6898 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
6900 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
6903 gcc_assert (index_to_bundle_states [insn_num]);
6904 for (curr_state = index_to_bundle_states [insn_num];
6906 curr_state = curr_state->next)
6907 if (verbose >= 2 && dump)
6909 /* This structure is taken from generated code of the
6910 pipeline hazard recognizer (see file insn-attrtab.c).
6911 Please don't forget to change the structure if a new
6912 automaton is added to .md file. */
6915 unsigned short one_automaton_state;
6916 unsigned short oneb_automaton_state;
6917 unsigned short two_automaton_state;
6918 unsigned short twob_automaton_state;
6923 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
6924 curr_state->unique_num,
6925 (curr_state->originator == NULL
6926 ? -1 : curr_state->originator->unique_num),
6928 curr_state->before_nops_num, curr_state->after_nops_num,
6929 curr_state->accumulated_insns_num, curr_state->branch_deviation,
6930 (ia64_tune == PROCESSOR_ITANIUM
6931 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
6932 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
6937 /* We should find a solution because the 2nd insn scheduling has
6939 gcc_assert (index_to_bundle_states [insn_num]);
6940 /* Find a state corresponding to the best insn sequence. */
6942 for (curr_state = index_to_bundle_states [insn_num];
6944 curr_state = curr_state->next)
6945 /* We are just looking at the states with fully filled up last
6946 bundle. The first we prefer insn sequences with minimal cost
6947 then with minimal inserted nops and finally with branch insns
6948 placed in the 3rd slots. */
6949 if (curr_state->accumulated_insns_num % 3 == 0
6950 && (best_state == NULL || best_state->cost > curr_state->cost
6951 || (best_state->cost == curr_state->cost
6952 && (curr_state->accumulated_insns_num
6953 < best_state->accumulated_insns_num
6954 || (curr_state->accumulated_insns_num
6955 == best_state->accumulated_insns_num
6956 && curr_state->branch_deviation
6957 < best_state->branch_deviation)))))
6958 best_state = curr_state;
6959 /* Second (backward) pass: adding nops and templates. */
6960 insn_num = best_state->before_nops_num;
6961 template0 = template1 = -1;
6962 for (curr_state = best_state;
6963 curr_state->originator != NULL;
6964 curr_state = curr_state->originator)
6966 insn = curr_state->insn;
6967 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
6968 || asm_noperands (PATTERN (insn)) >= 0);
6970 if (verbose >= 2 && dump)
6974 unsigned short one_automaton_state;
6975 unsigned short oneb_automaton_state;
6976 unsigned short two_automaton_state;
6977 unsigned short twob_automaton_state;
6982 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
6983 curr_state->unique_num,
6984 (curr_state->originator == NULL
6985 ? -1 : curr_state->originator->unique_num),
6987 curr_state->before_nops_num, curr_state->after_nops_num,
6988 curr_state->accumulated_insns_num, curr_state->branch_deviation,
6989 (ia64_tune == PROCESSOR_ITANIUM
6990 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
6991 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
6994 /* Find the position in the current bundle window. The window can
6995 contain at most two bundles. Two bundle window means that
6996 the processor will make two bundle rotation. */
6997 max_pos = get_max_pos (curr_state->dfa_state);
6999 /* The following (negative template number) means that the
7000 processor did one bundle rotation. */
7001 || (max_pos == 3 && template0 < 0))
7003 /* We are at the end of the window -- find template(s) for
7007 template0 = get_template (curr_state->dfa_state, 3);
7010 template1 = get_template (curr_state->dfa_state, 3);
7011 template0 = get_template (curr_state->dfa_state, 6);
7014 if (max_pos > 3 && template1 < 0)
7015 /* It may happen when we have the stop inside a bundle. */
7017 gcc_assert (pos <= 3);
7018 template1 = get_template (curr_state->dfa_state, 3);
7022 /* Emit nops after the current insn. */
7023 for (i = 0; i < curr_state->after_nops_num; i++)
7026 emit_insn_after (nop, insn);
7028 gcc_assert (pos >= 0);
7031 /* We are at the start of a bundle: emit the template
7032 (it should be defined). */
7033 gcc_assert (template0 >= 0);
7034 b = gen_bundle_selector (GEN_INT (template0));
7035 ia64_emit_insn_before (b, nop);
7036 /* If we have two bundle window, we make one bundle
7037 rotation. Otherwise template0 will be undefined
7038 (negative value). */
7039 template0 = template1;
7043 /* Move the position backward in the window. Group barrier has
7044 no slot. Asm insn takes all bundle. */
7045 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
7046 && GET_CODE (PATTERN (insn)) != ASM_INPUT
7047 && asm_noperands (PATTERN (insn)) < 0)
7049 /* Long insn takes 2 slots. */
7050 if (ia64_safe_type (insn) == TYPE_L)
7052 gcc_assert (pos >= 0);
7054 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
7055 && GET_CODE (PATTERN (insn)) != ASM_INPUT
7056 && asm_noperands (PATTERN (insn)) < 0)
7058 /* The current insn is at the bundle start: emit the
7060 gcc_assert (template0 >= 0);
7061 b = gen_bundle_selector (GEN_INT (template0));
7062 ia64_emit_insn_before (b, insn);
7063 b = PREV_INSN (insn);
7065 /* See comment above in analogous place for emitting nops
7067 template0 = template1;
7070 /* Emit nops after the current insn. */
7071 for (i = 0; i < curr_state->before_nops_num; i++)
7074 ia64_emit_insn_before (nop, insn);
7075 nop = PREV_INSN (insn);
7078 gcc_assert (pos >= 0);
7081 /* See comment above in analogous place for emitting nops
7083 gcc_assert (template0 >= 0);
7084 b = gen_bundle_selector (GEN_INT (template0));
7085 ia64_emit_insn_before (b, insn);
7086 b = PREV_INSN (insn);
7088 template0 = template1;
7093 if (ia64_tune == PROCESSOR_ITANIUM)
7094 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
7095 Itanium1 has a strange design, if the distance between an insn
7096 and dependent MM-insn is less 4 then we have a 6 additional
7097 cycles stall. So we make the distance equal to 4 cycles if it
7099 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
7103 gcc_assert (INSN_P (insn)
7104 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
7105 && GET_CODE (PATTERN (insn)) != USE
7106 && GET_CODE (PATTERN (insn)) != CLOBBER);
7107 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
7108 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
7109 /* We found a MM-insn which needs additional cycles. */
7115 /* Now we are searching for a template of the bundle in
7116 which the MM-insn is placed and the position of the
7117 insn in the bundle (0, 1, 2). Also we are searching
7118 for that there is a stop before the insn. */
7119 last = prev_active_insn (insn);
7120 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
7122 last = prev_active_insn (last);
7124 for (;; last = prev_active_insn (last))
7125 if (recog_memoized (last) == CODE_FOR_bundle_selector)
7127 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
7129 /* The insn is in MLX bundle. Change the template
7130 onto MFI because we will add nops before the
7131 insn. It simplifies subsequent code a lot. */
7133 = gen_bundle_selector (const2_rtx); /* -> MFI */
7136 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier
7137 && (ia64_safe_itanium_class (last)
7138 != ITANIUM_CLASS_IGNORE))
7140 /* Some check of correctness: the stop is not at the
7141 bundle start, there are no more 3 insns in the bundle,
7142 and the MM-insn is not at the start of bundle with
7144 gcc_assert ((!pred_stop_p || n)
7146 && (template0 != 9 || !n));
7147 /* Put nops after the insn in the bundle. */
7148 for (j = 3 - n; j > 0; j --)
7149 ia64_emit_insn_before (gen_nop (), insn);
7150 /* It takes into account that we will add more N nops
7151 before the insn lately -- please see code below. */
7152 add_cycles [INSN_UID (insn)]--;
7153 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
7154 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7157 add_cycles [INSN_UID (insn)]--;
7158 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
7160 /* Insert "MII;" template. */
7161 ia64_emit_insn_before (gen_bundle_selector (const0_rtx),
7163 ia64_emit_insn_before (gen_nop (), insn);
7164 ia64_emit_insn_before (gen_nop (), insn);
7167 /* To decrease code size, we use "MI;I;"
7169 ia64_emit_insn_before
7170 (gen_insn_group_barrier (GEN_INT (3)), insn);
7173 ia64_emit_insn_before (gen_nop (), insn);
7174 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7177 /* Put the MM-insn in the same slot of a bundle with the
7178 same template as the original one. */
7179 ia64_emit_insn_before (gen_bundle_selector (GEN_INT (template0)),
7181 /* To put the insn in the same slot, add necessary number
7183 for (j = n; j > 0; j --)
7184 ia64_emit_insn_before (gen_nop (), insn);
7185 /* Put the stop if the original bundle had it. */
7187 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7191 free (index_to_bundle_states);
7192 finish_bundle_state_table ();
7194 dfa_clean_insn_cache ();
7197 /* The following function is called at the end of scheduling BB or
7198 EBB. After reload, it inserts stop bits and does insn bundling. */
7201 ia64_sched_finish (FILE *dump, int sched_verbose)
7204 fprintf (dump, "// Finishing schedule.\n");
7205 if (!reload_completed)
7207 if (reload_completed)
7209 final_emit_insn_group_barriers (dump);
7210 bundling (dump, sched_verbose, current_sched_info->prev_head,
7211 current_sched_info->next_tail);
7212 if (sched_verbose && dump)
7213 fprintf (dump, "// finishing %d-%d\n",
7214 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
7215 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
7221 /* The following function inserts stop bits in scheduled BB or EBB. */
7224 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
7227 int need_barrier_p = 0;
7228 rtx prev_insn = NULL_RTX;
7230 init_insn_group_barriers ();
7232 for (insn = NEXT_INSN (current_sched_info->prev_head);
7233 insn != current_sched_info->next_tail;
7234 insn = NEXT_INSN (insn))
7236 if (GET_CODE (insn) == BARRIER)
7238 rtx last = prev_active_insn (insn);
7242 if (GET_CODE (last) == JUMP_INSN
7243 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
7244 last = prev_active_insn (last);
7245 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
7246 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
7248 init_insn_group_barriers ();
7250 prev_insn = NULL_RTX;
7252 else if (INSN_P (insn))
7254 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
7256 init_insn_group_barriers ();
7258 prev_insn = NULL_RTX;
7260 else if (need_barrier_p || group_barrier_needed (insn))
7262 if (TARGET_EARLY_STOP_BITS)
7267 last != current_sched_info->prev_head;
7268 last = PREV_INSN (last))
7269 if (INSN_P (last) && GET_MODE (last) == TImode
7270 && stops_p [INSN_UID (last)])
7272 if (last == current_sched_info->prev_head)
7274 last = prev_active_insn (last);
7276 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
7277 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
7279 init_insn_group_barriers ();
7280 for (last = NEXT_INSN (last);
7282 last = NEXT_INSN (last))
7284 group_barrier_needed (last);
7288 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7290 init_insn_group_barriers ();
7292 group_barrier_needed (insn);
7293 prev_insn = NULL_RTX;
7295 else if (recog_memoized (insn) >= 0)
7297 need_barrier_p = (GET_CODE (insn) == CALL_INSN
7298 || GET_CODE (PATTERN (insn)) == ASM_INPUT
7299 || asm_noperands (PATTERN (insn)) >= 0);
7306 /* If the following function returns TRUE, we will use the the DFA
7310 ia64_first_cycle_multipass_dfa_lookahead (void)
7312 return (reload_completed ? 6 : 4);
7315 /* The following function initiates variable `dfa_pre_cycle_insn'. */
7318 ia64_init_dfa_pre_cycle_insn (void)
7320 if (temp_dfa_state == NULL)
7322 dfa_state_size = state_size ();
7323 temp_dfa_state = xmalloc (dfa_state_size);
7324 prev_cycle_state = xmalloc (dfa_state_size);
7326 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
7327 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
7328 recog_memoized (dfa_pre_cycle_insn);
7329 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
7330 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
7331 recog_memoized (dfa_stop_insn);
7334 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
7335 used by the DFA insn scheduler. */
7338 ia64_dfa_pre_cycle_insn (void)
7340 return dfa_pre_cycle_insn;
7343 /* The following function returns TRUE if PRODUCER (of type ilog or
7344 ld) produces address for CONSUMER (of type st or stf). */
7347 ia64_st_address_bypass_p (rtx producer, rtx consumer)
7351 gcc_assert (producer && consumer);
7352 dest = ia64_single_set (producer);
7354 reg = SET_DEST (dest);
7356 if (GET_CODE (reg) == SUBREG)
7357 reg = SUBREG_REG (reg);
7358 gcc_assert (GET_CODE (reg) == REG);
7360 dest = ia64_single_set (consumer);
7362 mem = SET_DEST (dest);
7363 gcc_assert (mem && GET_CODE (mem) == MEM);
7364 return reg_mentioned_p (reg, mem);
7367 /* The following function returns TRUE if PRODUCER (of type ilog or
7368 ld) produces address for CONSUMER (of type ld or fld). */
7371 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
7373 rtx dest, src, reg, mem;
7375 gcc_assert (producer && consumer);
7376 dest = ia64_single_set (producer);
7378 reg = SET_DEST (dest);
7380 if (GET_CODE (reg) == SUBREG)
7381 reg = SUBREG_REG (reg);
7382 gcc_assert (GET_CODE (reg) == REG);
7384 src = ia64_single_set (consumer);
7386 mem = SET_SRC (src);
7388 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
7389 mem = XVECEXP (mem, 0, 0);
7390 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
7391 mem = XEXP (mem, 0);
7393 /* Note that LO_SUM is used for GOT loads. */
7394 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
7396 return reg_mentioned_p (reg, mem);
7399 /* The following function returns TRUE if INSN produces address for a
7400 load/store insn. We will place such insns into M slot because it
7401 decreases its latency time. */
7404 ia64_produce_address_p (rtx insn)
7410 /* Emit pseudo-ops for the assembler to describe predicate relations.
7411 At present this assumes that we only consider predicate pairs to
7412 be mutex, and that the assembler can deduce proper values from
7413 straight-line code. */
7416 emit_predicate_relation_info (void)
7420 FOR_EACH_BB_REVERSE (bb)
7423 rtx head = BB_HEAD (bb);
7425 /* We only need such notes at code labels. */
7426 if (GET_CODE (head) != CODE_LABEL)
7428 if (GET_CODE (NEXT_INSN (head)) == NOTE
7429 && NOTE_LINE_NUMBER (NEXT_INSN (head)) == NOTE_INSN_BASIC_BLOCK)
7430 head = NEXT_INSN (head);
7432 /* Skip p0, which may be thought to be live due to (reg:DI p0)
7433 grabbing the entire block of predicate registers. */
7434 for (r = PR_REG (2); r < PR_REG (64); r += 2)
7435 if (REGNO_REG_SET_P (bb->il.rtl->global_live_at_start, r))
7437 rtx p = gen_rtx_REG (BImode, r);
7438 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
7439 if (head == BB_END (bb))
7445 /* Look for conditional calls that do not return, and protect predicate
7446 relations around them. Otherwise the assembler will assume the call
7447 returns, and complain about uses of call-clobbered predicates after
7449 FOR_EACH_BB_REVERSE (bb)
7451 rtx insn = BB_HEAD (bb);
7455 if (GET_CODE (insn) == CALL_INSN
7456 && GET_CODE (PATTERN (insn)) == COND_EXEC
7457 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
7459 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
7460 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
7461 if (BB_HEAD (bb) == insn)
7463 if (BB_END (bb) == insn)
7467 if (insn == BB_END (bb))
7469 insn = NEXT_INSN (insn);
7474 /* Perform machine dependent operations on the rtl chain INSNS. */
7479 /* We are freeing block_for_insn in the toplev to keep compatibility
7480 with old MDEP_REORGS that are not CFG based. Recompute it now. */
7481 compute_bb_for_insn ();
7483 /* If optimizing, we'll have split before scheduling. */
7485 split_all_insns (0);
7487 /* ??? update_life_info_in_dirty_blocks fails to terminate during
7488 non-optimizing bootstrap. */
7489 update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES);
7491 if (ia64_flag_schedule_insns2)
7493 timevar_push (TV_SCHED2);
7494 ia64_final_schedule = 1;
7496 initiate_bundle_states ();
7497 ia64_nop = make_insn_raw (gen_nop ());
7498 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
7499 recog_memoized (ia64_nop);
7500 clocks_length = get_max_uid () + 1;
7501 stops_p = xcalloc (1, clocks_length);
7502 if (ia64_tune == PROCESSOR_ITANIUM)
7504 clocks = xcalloc (clocks_length, sizeof (int));
7505 add_cycles = xcalloc (clocks_length, sizeof (int));
7507 if (ia64_tune == PROCESSOR_ITANIUM2)
7509 pos_1 = get_cpu_unit_code ("2_1");
7510 pos_2 = get_cpu_unit_code ("2_2");
7511 pos_3 = get_cpu_unit_code ("2_3");
7512 pos_4 = get_cpu_unit_code ("2_4");
7513 pos_5 = get_cpu_unit_code ("2_5");
7514 pos_6 = get_cpu_unit_code ("2_6");
7515 _0mii_ = get_cpu_unit_code ("2b_0mii.");
7516 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
7517 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
7518 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
7519 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
7520 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
7521 _0mib_ = get_cpu_unit_code ("2b_0mib.");
7522 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
7523 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
7524 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
7525 _1mii_ = get_cpu_unit_code ("2b_1mii.");
7526 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
7527 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
7528 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
7529 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
7530 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
7531 _1mib_ = get_cpu_unit_code ("2b_1mib.");
7532 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
7533 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
7534 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
7538 pos_1 = get_cpu_unit_code ("1_1");
7539 pos_2 = get_cpu_unit_code ("1_2");
7540 pos_3 = get_cpu_unit_code ("1_3");
7541 pos_4 = get_cpu_unit_code ("1_4");
7542 pos_5 = get_cpu_unit_code ("1_5");
7543 pos_6 = get_cpu_unit_code ("1_6");
7544 _0mii_ = get_cpu_unit_code ("1b_0mii.");
7545 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
7546 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
7547 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
7548 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
7549 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
7550 _0mib_ = get_cpu_unit_code ("1b_0mib.");
7551 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
7552 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
7553 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
7554 _1mii_ = get_cpu_unit_code ("1b_1mii.");
7555 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
7556 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
7557 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
7558 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
7559 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
7560 _1mib_ = get_cpu_unit_code ("1b_1mib.");
7561 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
7562 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
7563 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
7565 schedule_ebbs (dump_file);
7566 finish_bundle_states ();
7567 if (ia64_tune == PROCESSOR_ITANIUM)
7573 emit_insn_group_barriers (dump_file);
7575 ia64_final_schedule = 0;
7576 timevar_pop (TV_SCHED2);
7579 emit_all_insn_group_barriers (dump_file);
7581 /* A call must not be the last instruction in a function, so that the
7582 return address is still within the function, so that unwinding works
7583 properly. Note that IA-64 differs from dwarf2 on this point. */
7584 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7589 insn = get_last_insn ();
7590 if (! INSN_P (insn))
7591 insn = prev_active_insn (insn);
7592 /* Skip over insns that expand to nothing. */
7593 while (GET_CODE (insn) == INSN && get_attr_empty (insn) == EMPTY_YES)
7595 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
7596 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
7598 insn = prev_active_insn (insn);
7600 if (GET_CODE (insn) == CALL_INSN)
7603 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7604 emit_insn (gen_break_f ());
7605 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7609 emit_predicate_relation_info ();
7611 if (ia64_flag_var_tracking)
7613 timevar_push (TV_VAR_TRACKING);
7614 variable_tracking_main ();
7615 timevar_pop (TV_VAR_TRACKING);
7619 /* Return true if REGNO is used by the epilogue. */
7622 ia64_epilogue_uses (int regno)
7627 /* With a call to a function in another module, we will write a new
7628 value to "gp". After returning from such a call, we need to make
7629 sure the function restores the original gp-value, even if the
7630 function itself does not use the gp anymore. */
7631 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
7633 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
7634 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
7635 /* For functions defined with the syscall_linkage attribute, all
7636 input registers are marked as live at all function exits. This
7637 prevents the register allocator from using the input registers,
7638 which in turn makes it possible to restart a system call after
7639 an interrupt without having to save/restore the input registers.
7640 This also prevents kernel data from leaking to application code. */
7641 return lookup_attribute ("syscall_linkage",
7642 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
7645 /* Conditional return patterns can't represent the use of `b0' as
7646 the return address, so we force the value live this way. */
7650 /* Likewise for ar.pfs, which is used by br.ret. */
7658 /* Return true if REGNO is used by the frame unwinder. */
7661 ia64_eh_uses (int regno)
7663 if (! reload_completed)
7666 if (current_frame_info.reg_save_b0
7667 && regno == current_frame_info.reg_save_b0)
7669 if (current_frame_info.reg_save_pr
7670 && regno == current_frame_info.reg_save_pr)
7672 if (current_frame_info.reg_save_ar_pfs
7673 && regno == current_frame_info.reg_save_ar_pfs)
7675 if (current_frame_info.reg_save_ar_unat
7676 && regno == current_frame_info.reg_save_ar_unat)
7678 if (current_frame_info.reg_save_ar_lc
7679 && regno == current_frame_info.reg_save_ar_lc)
7685 /* Return true if this goes in small data/bss. */
7687 /* ??? We could also support own long data here. Generating movl/add/ld8
7688 instead of addl,ld8/ld8. This makes the code bigger, but should make the
7689 code faster because there is one less load. This also includes incomplete
7690 types which can't go in sdata/sbss. */
7693 ia64_in_small_data_p (tree exp)
7695 if (TARGET_NO_SDATA)
7698 /* We want to merge strings, so we never consider them small data. */
7699 if (TREE_CODE (exp) == STRING_CST)
7702 /* Functions are never small data. */
7703 if (TREE_CODE (exp) == FUNCTION_DECL)
7706 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
7708 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
7710 if (strcmp (section, ".sdata") == 0
7711 || strncmp (section, ".sdata.", 7) == 0
7712 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
7713 || strcmp (section, ".sbss") == 0
7714 || strncmp (section, ".sbss.", 6) == 0
7715 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
7720 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
7722 /* If this is an incomplete type with size 0, then we can't put it
7723 in sdata because it might be too big when completed. */
7724 if (size > 0 && size <= ia64_section_threshold)
7731 /* Output assembly directives for prologue regions. */
7733 /* The current basic block number. */
7735 static bool last_block;
7737 /* True if we need a copy_state command at the start of the next block. */
7739 static bool need_copy_state;
7741 /* The function emits unwind directives for the start of an epilogue. */
7744 process_epilogue (void)
7746 /* If this isn't the last block of the function, then we need to label the
7747 current state, and copy it back in at the start of the next block. */
7751 fprintf (asm_out_file, "\t.label_state %d\n",
7752 ++cfun->machine->state_num);
7753 need_copy_state = true;
7756 fprintf (asm_out_file, "\t.restore sp\n");
7759 /* This function processes a SET pattern looking for specific patterns
7760 which result in emitting an assembly directive required for unwinding. */
7763 process_set (FILE *asm_out_file, rtx pat)
7765 rtx src = SET_SRC (pat);
7766 rtx dest = SET_DEST (pat);
7767 int src_regno, dest_regno;
7769 /* Look for the ALLOC insn. */
7770 if (GET_CODE (src) == UNSPEC_VOLATILE
7771 && XINT (src, 1) == UNSPECV_ALLOC
7772 && GET_CODE (dest) == REG)
7774 dest_regno = REGNO (dest);
7776 /* If this is the final destination for ar.pfs, then this must
7777 be the alloc in the prologue. */
7778 if (dest_regno == current_frame_info.reg_save_ar_pfs)
7779 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
7780 ia64_dbx_register_number (dest_regno));
7783 /* This must be an alloc before a sibcall. We must drop the
7784 old frame info. The easiest way to drop the old frame
7785 info is to ensure we had a ".restore sp" directive
7786 followed by a new prologue. If the procedure doesn't
7787 have a memory-stack frame, we'll issue a dummy ".restore
7789 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
7790 /* if haven't done process_epilogue() yet, do it now */
7791 process_epilogue ();
7792 fprintf (asm_out_file, "\t.prologue\n");
7797 /* Look for SP = .... */
7798 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
7800 if (GET_CODE (src) == PLUS)
7802 rtx op0 = XEXP (src, 0);
7803 rtx op1 = XEXP (src, 1);
7805 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
7807 if (INTVAL (op1) < 0)
7808 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
7811 process_epilogue ();
7815 gcc_assert (GET_CODE (src) == REG
7816 && REGNO (src) == HARD_FRAME_POINTER_REGNUM);
7817 process_epilogue ();
7823 /* Register move we need to look at. */
7824 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
7826 src_regno = REGNO (src);
7827 dest_regno = REGNO (dest);
7832 /* Saving return address pointer. */
7833 gcc_assert (dest_regno == current_frame_info.reg_save_b0);
7834 fprintf (asm_out_file, "\t.save rp, r%d\n",
7835 ia64_dbx_register_number (dest_regno));
7839 gcc_assert (dest_regno == current_frame_info.reg_save_pr);
7840 fprintf (asm_out_file, "\t.save pr, r%d\n",
7841 ia64_dbx_register_number (dest_regno));
7844 case AR_UNAT_REGNUM:
7845 gcc_assert (dest_regno == current_frame_info.reg_save_ar_unat);
7846 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
7847 ia64_dbx_register_number (dest_regno));
7851 gcc_assert (dest_regno == current_frame_info.reg_save_ar_lc);
7852 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
7853 ia64_dbx_register_number (dest_regno));
7856 case STACK_POINTER_REGNUM:
7857 gcc_assert (dest_regno == HARD_FRAME_POINTER_REGNUM
7858 && frame_pointer_needed);
7859 fprintf (asm_out_file, "\t.vframe r%d\n",
7860 ia64_dbx_register_number (dest_regno));
7864 /* Everything else should indicate being stored to memory. */
7869 /* Memory store we need to look at. */
7870 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
7876 if (GET_CODE (XEXP (dest, 0)) == REG)
7878 base = XEXP (dest, 0);
7883 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
7884 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
7885 base = XEXP (XEXP (dest, 0), 0);
7886 off = INTVAL (XEXP (XEXP (dest, 0), 1));
7889 if (base == hard_frame_pointer_rtx)
7891 saveop = ".savepsp";
7896 gcc_assert (base == stack_pointer_rtx);
7900 src_regno = REGNO (src);
7904 gcc_assert (!current_frame_info.reg_save_b0);
7905 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
7909 gcc_assert (!current_frame_info.reg_save_pr);
7910 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
7914 gcc_assert (!current_frame_info.reg_save_ar_lc);
7915 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
7919 gcc_assert (!current_frame_info.reg_save_ar_pfs);
7920 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
7923 case AR_UNAT_REGNUM:
7924 gcc_assert (!current_frame_info.reg_save_ar_unat);
7925 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
7932 fprintf (asm_out_file, "\t.save.g 0x%x\n",
7933 1 << (src_regno - GR_REG (4)));
7941 fprintf (asm_out_file, "\t.save.b 0x%x\n",
7942 1 << (src_regno - BR_REG (1)));
7949 fprintf (asm_out_file, "\t.save.f 0x%x\n",
7950 1 << (src_regno - FR_REG (2)));
7953 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
7954 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
7955 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
7956 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
7957 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
7958 1 << (src_regno - FR_REG (12)));
7970 /* This function looks at a single insn and emits any directives
7971 required to unwind this insn. */
7973 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
7975 if (flag_unwind_tables
7976 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7980 if (GET_CODE (insn) == NOTE
7981 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
7983 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
7985 /* Restore unwind state from immediately before the epilogue. */
7986 if (need_copy_state)
7988 fprintf (asm_out_file, "\t.body\n");
7989 fprintf (asm_out_file, "\t.copy_state %d\n",
7990 cfun->machine->state_num);
7991 need_copy_state = false;
7995 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
7998 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
8000 pat = XEXP (pat, 0);
8002 pat = PATTERN (insn);
8004 switch (GET_CODE (pat))
8007 process_set (asm_out_file, pat);
8013 int limit = XVECLEN (pat, 0);
8014 for (par_index = 0; par_index < limit; par_index++)
8016 rtx x = XVECEXP (pat, 0, par_index);
8017 if (GET_CODE (x) == SET)
8018 process_set (asm_out_file, x);
8033 IA64_BUILTIN_FLUSHRS
8037 ia64_init_builtins (void)
8042 /* The __fpreg type. */
8043 fpreg_type = make_node (REAL_TYPE);
8044 /* ??? The back end should know to load/save __fpreg variables using
8045 the ldf.fill and stf.spill instructions. */
8046 TYPE_PRECISION (fpreg_type) = 80;
8047 layout_type (fpreg_type);
8048 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
8050 /* The __float80 type. */
8051 float80_type = make_node (REAL_TYPE);
8052 TYPE_PRECISION (float80_type) = 80;
8053 layout_type (float80_type);
8054 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
8056 /* The __float128 type. */
8059 tree float128_type = make_node (REAL_TYPE);
8060 TYPE_PRECISION (float128_type) = 128;
8061 layout_type (float128_type);
8062 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
8065 /* Under HPUX, this is a synonym for "long double". */
8066 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
8069 #define def_builtin(name, type, code) \
8070 lang_hooks.builtin_function ((name), (type), (code), BUILT_IN_MD, \
8073 def_builtin ("__builtin_ia64_bsp",
8074 build_function_type (ptr_type_node, void_list_node),
8077 def_builtin ("__builtin_ia64_flushrs",
8078 build_function_type (void_type_node, void_list_node),
8079 IA64_BUILTIN_FLUSHRS);
8085 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
8086 enum machine_mode mode ATTRIBUTE_UNUSED,
8087 int ignore ATTRIBUTE_UNUSED)
8089 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
8090 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8094 case IA64_BUILTIN_BSP:
8095 if (! target || ! register_operand (target, DImode))
8096 target = gen_reg_rtx (DImode);
8097 emit_insn (gen_bsp_value (target));
8098 #ifdef POINTERS_EXTEND_UNSIGNED
8099 target = convert_memory_address (ptr_mode, target);
8103 case IA64_BUILTIN_FLUSHRS:
8104 emit_insn (gen_flushrs ());
8114 /* For the HP-UX IA64 aggregate parameters are passed stored in the
8115 most significant bits of the stack slot. */
8118 ia64_hpux_function_arg_padding (enum machine_mode mode, tree type)
8120 /* Exception to normal case for structures/unions/etc. */
8122 if (type && AGGREGATE_TYPE_P (type)
8123 && int_size_in_bytes (type) < UNITS_PER_WORD)
8126 /* Fall back to the default. */
8127 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
8130 /* Linked list of all external functions that are to be emitted by GCC.
8131 We output the name if and only if TREE_SYMBOL_REFERENCED is set in
8132 order to avoid putting out names that are never really used. */
8134 struct extern_func_list GTY(())
8136 struct extern_func_list *next;
8140 static GTY(()) struct extern_func_list *extern_func_head;
8143 ia64_hpux_add_extern_decl (tree decl)
8145 struct extern_func_list *p = ggc_alloc (sizeof (struct extern_func_list));
8148 p->next = extern_func_head;
8149 extern_func_head = p;
8152 /* Print out the list of used global functions. */
8155 ia64_hpux_file_end (void)
8157 struct extern_func_list *p;
8159 for (p = extern_func_head; p; p = p->next)
8161 tree decl = p->decl;
8162 tree id = DECL_ASSEMBLER_NAME (decl);
8166 if (!TREE_ASM_WRITTEN (decl) && TREE_SYMBOL_REFERENCED (id))
8168 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
8170 TREE_ASM_WRITTEN (decl) = 1;
8171 (*targetm.asm_out.globalize_label) (asm_out_file, name);
8172 fputs (TYPE_ASM_OP, asm_out_file);
8173 assemble_name (asm_out_file, name);
8174 fprintf (asm_out_file, "," TYPE_OPERAND_FMT "\n", "function");
8178 extern_func_head = 0;
8181 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
8182 modes of word_mode and larger. Rename the TFmode libfuncs using the
8183 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
8184 backward compatibility. */
8187 ia64_init_libfuncs (void)
8189 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
8190 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
8191 set_optab_libfunc (smod_optab, SImode, "__modsi3");
8192 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
8194 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
8195 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
8196 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
8197 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
8198 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
8200 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
8201 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
8202 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
8203 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
8204 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
8205 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
8207 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
8208 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
8209 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
8210 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
8212 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
8213 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
8216 /* Rename all the TFmode libfuncs using the HPUX conventions. */
8219 ia64_hpux_init_libfuncs (void)
8221 ia64_init_libfuncs ();
8223 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
8224 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
8225 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
8227 /* ia64_expand_compare uses this. */
8228 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
8230 /* These should never be used. */
8231 set_optab_libfunc (eq_optab, TFmode, 0);
8232 set_optab_libfunc (ne_optab, TFmode, 0);
8233 set_optab_libfunc (gt_optab, TFmode, 0);
8234 set_optab_libfunc (ge_optab, TFmode, 0);
8235 set_optab_libfunc (lt_optab, TFmode, 0);
8236 set_optab_libfunc (le_optab, TFmode, 0);
8239 /* Rename the division and modulus functions in VMS. */
8242 ia64_vms_init_libfuncs (void)
8244 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
8245 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
8246 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
8247 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
8248 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
8249 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
8250 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
8251 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
8254 /* Rename the TFmode libfuncs available from soft-fp in glibc using
8255 the HPUX conventions. */
8258 ia64_sysv4_init_libfuncs (void)
8260 ia64_init_libfuncs ();
8262 /* These functions are not part of the HPUX TFmode interface. We
8263 use them instead of _U_Qfcmp, which doesn't work the way we
8265 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
8266 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
8267 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
8268 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
8269 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
8270 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
8272 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
8273 glibc doesn't have them. */
8276 /* Switch to the section to which we should output X. The only thing
8277 special we do here is to honor small data. */
8280 ia64_select_rtx_section (enum machine_mode mode, rtx x,
8281 unsigned HOST_WIDE_INT align)
8283 if (GET_MODE_SIZE (mode) > 0
8284 && GET_MODE_SIZE (mode) <= ia64_section_threshold)
8287 default_elf_select_rtx_section (mode, x, align);
8290 /* It is illegal to have relocations in shared segments on AIX and HPUX.
8291 Pretend flag_pic is always set. */
8294 ia64_rwreloc_select_section (tree exp, int reloc, unsigned HOST_WIDE_INT align)
8296 default_elf_select_section_1 (exp, reloc, align, true);
8300 ia64_rwreloc_unique_section (tree decl, int reloc)
8302 default_unique_section_1 (decl, reloc, true);
8306 ia64_rwreloc_select_rtx_section (enum machine_mode mode, rtx x,
8307 unsigned HOST_WIDE_INT align)
8309 int save_pic = flag_pic;
8311 ia64_select_rtx_section (mode, x, align);
8312 flag_pic = save_pic;
8315 #ifndef TARGET_RWRELOC
8316 #define TARGET_RWRELOC flag_pic
8320 ia64_section_type_flags (tree decl, const char *name, int reloc)
8322 unsigned int flags = 0;
8324 if (strcmp (name, ".sdata") == 0
8325 || strncmp (name, ".sdata.", 7) == 0
8326 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
8327 || strncmp (name, ".sdata2.", 8) == 0
8328 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
8329 || strcmp (name, ".sbss") == 0
8330 || strncmp (name, ".sbss.", 6) == 0
8331 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
8332 flags = SECTION_SMALL;
8334 flags |= default_section_type_flags_1 (decl, name, reloc, TARGET_RWRELOC);
8338 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
8339 structure type and that the address of that type should be passed
8340 in out0, rather than in r8. */
8343 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
8345 tree ret_type = TREE_TYPE (fntype);
8347 /* The Itanium C++ ABI requires that out0, rather than r8, be used
8348 as the structure return address parameter, if the return value
8349 type has a non-trivial copy constructor or destructor. It is not
8350 clear if this same convention should be used for other
8351 programming languages. Until G++ 3.4, we incorrectly used r8 for
8352 these return values. */
8353 return (abi_version_at_least (2)
8355 && TYPE_MODE (ret_type) == BLKmode
8356 && TREE_ADDRESSABLE (ret_type)
8357 && strcmp (lang_hooks.name, "GNU C++") == 0);
8360 /* Output the assembler code for a thunk function. THUNK_DECL is the
8361 declaration for the thunk function itself, FUNCTION is the decl for
8362 the target function. DELTA is an immediate constant offset to be
8363 added to THIS. If VCALL_OFFSET is nonzero, the word at
8364 *(*this + vcall_offset) should be added to THIS. */
8367 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
8368 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8371 rtx this, insn, funexp;
8372 unsigned int this_parmno;
8373 unsigned int this_regno;
8375 reload_completed = 1;
8376 epilogue_completed = 1;
8378 reset_block_changes ();
8380 /* Set things up as ia64_expand_prologue might. */
8381 last_scratch_gr_reg = 15;
8383 memset (¤t_frame_info, 0, sizeof (current_frame_info));
8384 current_frame_info.spill_cfa_off = -16;
8385 current_frame_info.n_input_regs = 1;
8386 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
8388 /* Mark the end of the (empty) prologue. */
8389 emit_note (NOTE_INSN_PROLOGUE_END);
8391 /* Figure out whether "this" will be the first parameter (the
8392 typical case) or the second parameter (as happens when the
8393 virtual function returns certain class objects). */
8395 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
8397 this_regno = IN_REG (this_parmno);
8398 if (!TARGET_REG_NAMES)
8399 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
8401 this = gen_rtx_REG (Pmode, this_regno);
8404 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
8405 REG_POINTER (tmp) = 1;
8406 if (delta && CONST_OK_FOR_I (delta))
8408 emit_insn (gen_ptr_extend_plus_imm (this, tmp, GEN_INT (delta)));
8412 emit_insn (gen_ptr_extend (this, tmp));
8415 /* Apply the constant offset, if required. */
8418 rtx delta_rtx = GEN_INT (delta);
8420 if (!CONST_OK_FOR_I (delta))
8422 rtx tmp = gen_rtx_REG (Pmode, 2);
8423 emit_move_insn (tmp, delta_rtx);
8426 emit_insn (gen_adddi3 (this, this, delta_rtx));
8429 /* Apply the offset from the vtable, if required. */
8432 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8433 rtx tmp = gen_rtx_REG (Pmode, 2);
8437 rtx t = gen_rtx_REG (ptr_mode, 2);
8438 REG_POINTER (t) = 1;
8439 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this));
8440 if (CONST_OK_FOR_I (vcall_offset))
8442 emit_insn (gen_ptr_extend_plus_imm (tmp, t,
8447 emit_insn (gen_ptr_extend (tmp, t));
8450 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8454 if (!CONST_OK_FOR_J (vcall_offset))
8456 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
8457 emit_move_insn (tmp2, vcall_offset_rtx);
8458 vcall_offset_rtx = tmp2;
8460 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
8464 emit_move_insn (gen_rtx_REG (ptr_mode, 2),
8465 gen_rtx_MEM (ptr_mode, tmp));
8467 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
8469 emit_insn (gen_adddi3 (this, this, tmp));
8472 /* Generate a tail call to the target function. */
8473 if (! TREE_USED (function))
8475 assemble_external (function);
8476 TREE_USED (function) = 1;
8478 funexp = XEXP (DECL_RTL (function), 0);
8479 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8480 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
8481 insn = get_last_insn ();
8482 SIBLING_CALL_P (insn) = 1;
8484 /* Code generation for calls relies on splitting. */
8485 reload_completed = 1;
8486 epilogue_completed = 1;
8487 try_split (PATTERN (insn), insn, 0);
8491 /* Run just enough of rest_of_compilation to get the insns emitted.
8492 There's not really enough bulk here to make other passes such as
8493 instruction scheduling worth while. Note that use_thunk calls
8494 assemble_start_function and assemble_end_function. */
8496 insn_locators_initialize ();
8497 emit_all_insn_group_barriers (NULL);
8498 insn = get_insns ();
8499 shorten_branches (insn);
8500 final_start_function (insn, file, 1);
8501 final (insn, file, 1);
8502 final_end_function ();
8504 reload_completed = 0;
8505 epilogue_completed = 0;
8509 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
8512 ia64_struct_value_rtx (tree fntype,
8513 int incoming ATTRIBUTE_UNUSED)
8515 if (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype))
8517 return gen_rtx_REG (Pmode, GR_REG (8));
8521 ia64_scalar_mode_supported_p (enum machine_mode mode)
8546 ia64_vector_mode_supported_p (enum machine_mode mode)
8564 ia64_output_function_profiler (FILE *file, int labelno)
8567 fputs ("\t.prologue 4, r40\n", file);
8569 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
8570 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
8572 if (NO_PROFILE_COUNTERS)
8573 fputs ("\tmov out3 = r0\n\t;;\n", file);
8577 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
8579 if (TARGET_AUTO_PIC)
8580 fputs ("\tmovl out3 = @gprel(", file);
8582 fputs ("\taddl out3 = @ltoff(", file);
8583 assemble_name (file, buf);
8584 if (TARGET_AUTO_PIC)
8585 fputs (")\n\t;;\n", file);
8587 fputs ("), r1\n\t;;\n", file);
8590 fputs ("\t.save rp, r42\n", file);
8591 fputs ("\tmov out2 = b0\n", file);
8592 fputs ("\t.body\n", file);
8593 fputs ("\tmov out1 = r1\n", file);
8594 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
8597 static GTY(()) rtx mcount_func_rtx;
8599 gen_mcount_func_rtx (void)
8601 if (!mcount_func_rtx)
8602 mcount_func_rtx = init_one_libfunc ("_mcount");
8603 return mcount_func_rtx;
8607 ia64_profile_hook (int labelno)
8611 if (NO_PROFILE_COUNTERS)
8616 const char *label_name;
8617 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
8618 label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
8619 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
8620 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
8622 ip = gen_reg_rtx (Pmode);
8623 emit_insn (gen_ip_value (ip));
8624 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
8626 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
8631 #include "gt-ia64.h"