1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to
21 the Free Software Foundation, 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
44 #include "basic-block.h"
46 #include "sched-int.h"
49 #include "target-def.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
56 /* This is used for communication between ASM_OUTPUT_LABEL and
57 ASM_OUTPUT_LABELREF. */
58 int ia64_asm_output_label = 0;
60 /* Define the information needed to generate branch and scc insns. This is
61 stored from the compare operation. */
62 struct rtx_def * ia64_compare_op0;
63 struct rtx_def * ia64_compare_op1;
65 /* Register names for ia64_expand_prologue. */
66 static const char * const ia64_reg_numbers[96] =
67 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
68 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
69 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
70 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
71 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
72 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
73 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
74 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
75 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
76 "r104","r105","r106","r107","r108","r109","r110","r111",
77 "r112","r113","r114","r115","r116","r117","r118","r119",
78 "r120","r121","r122","r123","r124","r125","r126","r127"};
80 /* ??? These strings could be shared with REGISTER_NAMES. */
81 static const char * const ia64_input_reg_names[8] =
82 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
84 /* ??? These strings could be shared with REGISTER_NAMES. */
85 static const char * const ia64_local_reg_names[80] =
86 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
87 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
88 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
89 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
90 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
91 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
92 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
93 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
94 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
95 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
97 /* ??? These strings could be shared with REGISTER_NAMES. */
98 static const char * const ia64_output_reg_names[8] =
99 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
101 /* String used with the -mfixed-range= option. */
102 const char *ia64_fixed_range_string;
104 /* Determines whether we use adds, addl, or movl to generate our
105 TLS immediate offsets. */
106 int ia64_tls_size = 22;
108 /* String used with the -mtls-size= option. */
109 const char *ia64_tls_size_string;
111 /* Which cpu are we scheduling for. */
112 enum processor_type ia64_tune;
114 /* String used with the -tune= option. */
115 const char *ia64_tune_string;
117 /* Determines whether we run our final scheduling pass or not. We always
118 avoid the normal second scheduling pass. */
119 static int ia64_flag_schedule_insns2;
121 /* Determines whether we run variable tracking in machine dependent
123 static int ia64_flag_var_tracking;
125 /* Variables which are this size or smaller are put in the sdata/sbss
128 unsigned int ia64_section_threshold;
130 /* The following variable is used by the DFA insn scheduler. The value is
131 TRUE if we do insn bundling instead of insn scheduling. */
134 /* Structure to be filled in by ia64_compute_frame_size with register
135 save masks and offsets for the current function. */
137 struct ia64_frame_info
139 HOST_WIDE_INT total_size; /* size of the stack frame, not including
140 the caller's scratch area. */
141 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
142 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
143 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
144 HARD_REG_SET mask; /* mask of saved registers. */
145 unsigned int gr_used_mask; /* mask of registers in use as gr spill
146 registers or long-term scratches. */
147 int n_spilled; /* number of spilled registers. */
148 int reg_fp; /* register for fp. */
149 int reg_save_b0; /* save register for b0. */
150 int reg_save_pr; /* save register for prs. */
151 int reg_save_ar_pfs; /* save register for ar.pfs. */
152 int reg_save_ar_unat; /* save register for ar.unat. */
153 int reg_save_ar_lc; /* save register for ar.lc. */
154 int reg_save_gp; /* save register for gp. */
155 int n_input_regs; /* number of input registers used. */
156 int n_local_regs; /* number of local registers used. */
157 int n_output_regs; /* number of output registers used. */
158 int n_rotate_regs; /* number of rotating registers used. */
160 char need_regstk; /* true if a .regstk directive needed. */
161 char initialized; /* true if the data is finalized. */
164 /* Current frame information calculated by ia64_compute_frame_size. */
165 static struct ia64_frame_info current_frame_info;
167 static int ia64_first_cycle_multipass_dfa_lookahead (void);
168 static void ia64_dependencies_evaluation_hook (rtx, rtx);
169 static void ia64_init_dfa_pre_cycle_insn (void);
170 static rtx ia64_dfa_pre_cycle_insn (void);
171 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
172 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
173 static rtx gen_tls_get_addr (void);
174 static rtx gen_thread_pointer (void);
175 static rtx ia64_expand_tls_address (enum tls_model, rtx, rtx);
176 static int find_gr_spill (int);
177 static int next_scratch_gr_reg (void);
178 static void mark_reg_gr_used_mask (rtx, void *);
179 static void ia64_compute_frame_size (HOST_WIDE_INT);
180 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
181 static void finish_spill_pointers (void);
182 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
183 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
184 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
185 static rtx gen_movdi_x (rtx, rtx, rtx);
186 static rtx gen_fr_spill_x (rtx, rtx, rtx);
187 static rtx gen_fr_restore_x (rtx, rtx, rtx);
189 static enum machine_mode hfa_element_mode (tree, int);
190 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
192 static bool ia64_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
194 static bool ia64_function_ok_for_sibcall (tree, tree);
195 static bool ia64_return_in_memory (tree, tree);
196 static bool ia64_rtx_costs (rtx, int, int, int *);
197 static void fix_range (const char *);
198 static struct machine_function * ia64_init_machine_status (void);
199 static void emit_insn_group_barriers (FILE *);
200 static void emit_all_insn_group_barriers (FILE *);
201 static void final_emit_insn_group_barriers (FILE *);
202 static void emit_predicate_relation_info (void);
203 static void ia64_reorg (void);
204 static bool ia64_in_small_data_p (tree);
205 static void process_epilogue (void);
206 static int process_set (FILE *, rtx);
208 static rtx ia64_expand_fetch_and_op (optab, enum machine_mode, tree, rtx);
209 static rtx ia64_expand_op_and_fetch (optab, enum machine_mode, tree, rtx);
210 static rtx ia64_expand_compare_and_swap (enum machine_mode, enum machine_mode,
212 static rtx ia64_expand_lock_test_and_set (enum machine_mode, tree, rtx);
213 static rtx ia64_expand_lock_release (enum machine_mode, tree, rtx);
214 static bool ia64_assemble_integer (rtx, unsigned int, int);
215 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
216 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
217 static void ia64_output_function_end_prologue (FILE *);
219 static int ia64_issue_rate (void);
220 static int ia64_adjust_cost (rtx, rtx, rtx, int);
221 static void ia64_sched_init (FILE *, int, int);
222 static void ia64_sched_finish (FILE *, int);
223 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
224 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
225 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
226 static int ia64_variable_issue (FILE *, int, rtx, int);
228 static struct bundle_state *get_free_bundle_state (void);
229 static void free_bundle_state (struct bundle_state *);
230 static void initiate_bundle_states (void);
231 static void finish_bundle_states (void);
232 static unsigned bundle_state_hash (const void *);
233 static int bundle_state_eq_p (const void *, const void *);
234 static int insert_bundle_state (struct bundle_state *);
235 static void initiate_bundle_state_table (void);
236 static void finish_bundle_state_table (void);
237 static int try_issue_nops (struct bundle_state *, int);
238 static int try_issue_insn (struct bundle_state *, rtx);
239 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
240 static int get_max_pos (state_t);
241 static int get_template (state_t, int);
243 static rtx get_next_important_insn (rtx, rtx);
244 static void bundling (FILE *, int, rtx, rtx);
246 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
247 HOST_WIDE_INT, tree);
248 static void ia64_file_start (void);
250 static void ia64_select_rtx_section (enum machine_mode, rtx,
251 unsigned HOST_WIDE_INT);
252 static void ia64_rwreloc_select_section (tree, int, unsigned HOST_WIDE_INT)
254 static void ia64_rwreloc_unique_section (tree, int)
256 static void ia64_rwreloc_select_rtx_section (enum machine_mode, rtx,
257 unsigned HOST_WIDE_INT)
259 static unsigned int ia64_rwreloc_section_type_flags (tree, const char *, int)
262 static void ia64_hpux_add_extern_decl (tree decl)
264 static void ia64_hpux_file_end (void)
266 static void ia64_init_libfuncs (void)
268 static void ia64_hpux_init_libfuncs (void)
270 static void ia64_sysv4_init_libfuncs (void)
272 static void ia64_vms_init_libfuncs (void)
275 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
276 static void ia64_encode_section_info (tree, rtx, int);
277 static rtx ia64_struct_value_rtx (tree, int);
278 static tree ia64_gimplify_va_arg (tree, tree, tree *, tree *);
279 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
282 /* Table of valid machine attributes. */
283 static const struct attribute_spec ia64_attribute_table[] =
285 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
286 { "syscall_linkage", 0, 0, false, true, true, NULL },
287 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
288 { NULL, 0, 0, false, false, false, NULL }
291 /* Initialize the GCC target structure. */
292 #undef TARGET_ATTRIBUTE_TABLE
293 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
295 #undef TARGET_INIT_BUILTINS
296 #define TARGET_INIT_BUILTINS ia64_init_builtins
298 #undef TARGET_EXPAND_BUILTIN
299 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
301 #undef TARGET_ASM_BYTE_OP
302 #define TARGET_ASM_BYTE_OP "\tdata1\t"
303 #undef TARGET_ASM_ALIGNED_HI_OP
304 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
305 #undef TARGET_ASM_ALIGNED_SI_OP
306 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
307 #undef TARGET_ASM_ALIGNED_DI_OP
308 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
309 #undef TARGET_ASM_UNALIGNED_HI_OP
310 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
311 #undef TARGET_ASM_UNALIGNED_SI_OP
312 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
313 #undef TARGET_ASM_UNALIGNED_DI_OP
314 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
315 #undef TARGET_ASM_INTEGER
316 #define TARGET_ASM_INTEGER ia64_assemble_integer
318 #undef TARGET_ASM_FUNCTION_PROLOGUE
319 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
320 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
321 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
322 #undef TARGET_ASM_FUNCTION_EPILOGUE
323 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
325 #undef TARGET_IN_SMALL_DATA_P
326 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
328 #undef TARGET_SCHED_ADJUST_COST
329 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
330 #undef TARGET_SCHED_ISSUE_RATE
331 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
332 #undef TARGET_SCHED_VARIABLE_ISSUE
333 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
334 #undef TARGET_SCHED_INIT
335 #define TARGET_SCHED_INIT ia64_sched_init
336 #undef TARGET_SCHED_FINISH
337 #define TARGET_SCHED_FINISH ia64_sched_finish
338 #undef TARGET_SCHED_REORDER
339 #define TARGET_SCHED_REORDER ia64_sched_reorder
340 #undef TARGET_SCHED_REORDER2
341 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
343 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
344 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
346 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
347 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
349 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
350 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
351 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
352 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
354 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
355 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
356 ia64_first_cycle_multipass_dfa_lookahead_guard
358 #undef TARGET_SCHED_DFA_NEW_CYCLE
359 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
361 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
362 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
363 #undef TARGET_PASS_BY_REFERENCE
364 #define TARGET_PASS_BY_REFERENCE ia64_pass_by_reference
366 #undef TARGET_ASM_OUTPUT_MI_THUNK
367 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
368 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
369 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
371 #undef TARGET_ASM_FILE_START
372 #define TARGET_ASM_FILE_START ia64_file_start
374 #undef TARGET_RTX_COSTS
375 #define TARGET_RTX_COSTS ia64_rtx_costs
376 #undef TARGET_ADDRESS_COST
377 #define TARGET_ADDRESS_COST hook_int_rtx_0
379 #undef TARGET_MACHINE_DEPENDENT_REORG
380 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
382 #undef TARGET_ENCODE_SECTION_INFO
383 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
385 /* ??? ABI doesn't allow us to define this. */
387 #undef TARGET_PROMOTE_FUNCTION_ARGS
388 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
391 /* ??? ABI doesn't allow us to define this. */
393 #undef TARGET_PROMOTE_FUNCTION_RETURN
394 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
397 /* ??? Investigate. */
399 #undef TARGET_PROMOTE_PROTOTYPES
400 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
403 #undef TARGET_STRUCT_VALUE_RTX
404 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
405 #undef TARGET_RETURN_IN_MEMORY
406 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
407 #undef TARGET_SETUP_INCOMING_VARARGS
408 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
409 #undef TARGET_STRICT_ARGUMENT_NAMING
410 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
411 #undef TARGET_MUST_PASS_IN_STACK
412 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
414 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
415 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
417 #undef TARGET_UNWIND_EMIT
418 #define TARGET_UNWIND_EMIT process_for_unwind_directive
420 #undef TARGET_SCALAR_MODE_SUPPORTED_P
421 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
423 struct gcc_target targetm = TARGET_INITIALIZER;
427 ADDR_AREA_NORMAL, /* normal address area */
428 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
432 static GTY(()) tree small_ident1;
433 static GTY(()) tree small_ident2;
438 if (small_ident1 == 0)
440 small_ident1 = get_identifier ("small");
441 small_ident2 = get_identifier ("__small__");
445 /* Retrieve the address area that has been chosen for the given decl. */
447 static ia64_addr_area
448 ia64_get_addr_area (tree decl)
452 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
458 id = TREE_VALUE (TREE_VALUE (model_attr));
459 if (id == small_ident1 || id == small_ident2)
460 return ADDR_AREA_SMALL;
462 return ADDR_AREA_NORMAL;
466 ia64_handle_model_attribute (tree *node, tree name, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
468 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
470 tree arg, decl = *node;
473 arg = TREE_VALUE (args);
474 if (arg == small_ident1 || arg == small_ident2)
476 addr_area = ADDR_AREA_SMALL;
480 warning ("invalid argument of `%s' attribute",
481 IDENTIFIER_POINTER (name));
482 *no_add_attrs = true;
485 switch (TREE_CODE (decl))
488 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
490 && !TREE_STATIC (decl))
492 error ("%Jan address area attribute cannot be specified for "
493 "local variables", decl, decl);
494 *no_add_attrs = true;
496 area = ia64_get_addr_area (decl);
497 if (area != ADDR_AREA_NORMAL && addr_area != area)
499 error ("%Jaddress area of '%s' conflicts with previous "
500 "declaration", decl, decl);
501 *no_add_attrs = true;
506 error ("%Jaddress area attribute cannot be specified for functions",
508 *no_add_attrs = true;
512 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
513 *no_add_attrs = true;
521 ia64_encode_addr_area (tree decl, rtx symbol)
525 flags = SYMBOL_REF_FLAGS (symbol);
526 switch (ia64_get_addr_area (decl))
528 case ADDR_AREA_NORMAL: break;
529 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
532 SYMBOL_REF_FLAGS (symbol) = flags;
536 ia64_encode_section_info (tree decl, rtx rtl, int first)
538 default_encode_section_info (decl, rtl, first);
540 /* Careful not to prod global register variables. */
541 if (TREE_CODE (decl) == VAR_DECL
542 && GET_CODE (DECL_RTL (decl)) == MEM
543 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
544 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
545 ia64_encode_addr_area (decl, XEXP (rtl, 0));
548 /* Return 1 if the operands of a move are ok. */
551 ia64_move_ok (rtx dst, rtx src)
553 /* If we're under init_recog_no_volatile, we'll not be able to use
554 memory_operand. So check the code directly and don't worry about
555 the validity of the underlying address, which should have been
556 checked elsewhere anyway. */
557 if (GET_CODE (dst) != MEM)
559 if (GET_CODE (src) == MEM)
561 if (register_operand (src, VOIDmode))
564 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
565 if (INTEGRAL_MODE_P (GET_MODE (dst)))
566 return src == const0_rtx;
568 return GET_CODE (src) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (src);
572 addp4_optimize_ok (rtx op1, rtx op2)
574 return (basereg_operand (op1, GET_MODE(op1)) !=
575 basereg_operand (op2, GET_MODE(op2)));
578 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
579 Return the length of the field, or <= 0 on failure. */
582 ia64_depz_field_mask (rtx rop, rtx rshift)
584 unsigned HOST_WIDE_INT op = INTVAL (rop);
585 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
587 /* Get rid of the zero bits we're shifting in. */
590 /* We must now have a solid block of 1's at bit 0. */
591 return exact_log2 (op + 1);
594 /* Expand a symbolic constant load. */
597 ia64_expand_load_address (rtx dest, rtx src)
599 if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (src))
601 if (GET_CODE (dest) != REG)
604 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
605 having to pointer-extend the value afterward. Other forms of address
606 computation below are also more natural to compute as 64-bit quantities.
607 If we've been given an SImode destination register, change it. */
608 if (GET_MODE (dest) != Pmode)
609 dest = gen_rtx_REG (Pmode, REGNO (dest));
611 if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_SMALL_ADDR_P (src))
613 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
616 else if (TARGET_AUTO_PIC)
618 emit_insn (gen_load_gprel64 (dest, src));
621 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
623 emit_insn (gen_load_fptr (dest, src));
626 else if (sdata_symbolic_operand (src, VOIDmode))
628 emit_insn (gen_load_gprel (dest, src));
632 if (GET_CODE (src) == CONST
633 && GET_CODE (XEXP (src, 0)) == PLUS
634 && GET_CODE (XEXP (XEXP (src, 0), 1)) == CONST_INT
635 && (INTVAL (XEXP (XEXP (src, 0), 1)) & 0x3fff) != 0)
637 rtx sym = XEXP (XEXP (src, 0), 0);
638 HOST_WIDE_INT ofs, hi, lo;
640 /* Split the offset into a sign extended 14-bit low part
641 and a complementary high part. */
642 ofs = INTVAL (XEXP (XEXP (src, 0), 1));
643 lo = ((ofs & 0x3fff) ^ 0x2000) - 0x2000;
646 ia64_expand_load_address (dest, plus_constant (sym, hi));
647 emit_insn (gen_adddi3 (dest, dest, GEN_INT (lo)));
653 tmp = gen_rtx_HIGH (Pmode, src);
654 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
655 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
657 tmp = gen_rtx_LO_SUM (GET_MODE (dest), dest, src);
658 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
662 static GTY(()) rtx gen_tls_tga;
664 gen_tls_get_addr (void)
667 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
671 static GTY(()) rtx thread_pointer_rtx;
673 gen_thread_pointer (void)
675 if (!thread_pointer_rtx)
676 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
677 return thread_pointer_rtx;
681 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1)
683 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
688 case TLS_MODEL_GLOBAL_DYNAMIC:
691 tga_op1 = gen_reg_rtx (Pmode);
692 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
693 tga_op1 = gen_const_mem (Pmode, tga_op1);
695 tga_op2 = gen_reg_rtx (Pmode);
696 emit_insn (gen_load_ltoff_dtprel (tga_op2, op1));
697 tga_op2 = gen_const_mem (Pmode, tga_op2);
699 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
700 LCT_CONST, Pmode, 2, tga_op1,
701 Pmode, tga_op2, Pmode);
703 insns = get_insns ();
706 if (GET_MODE (op0) != Pmode)
708 emit_libcall_block (insns, op0, tga_ret, op1);
711 case TLS_MODEL_LOCAL_DYNAMIC:
712 /* ??? This isn't the completely proper way to do local-dynamic
713 If the call to __tls_get_addr is used only by a single symbol,
714 then we should (somehow) move the dtprel to the second arg
715 to avoid the extra add. */
718 tga_op1 = gen_reg_rtx (Pmode);
719 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
720 tga_op1 = gen_const_mem (Pmode, tga_op1);
722 tga_op2 = const0_rtx;
724 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
725 LCT_CONST, Pmode, 2, tga_op1,
726 Pmode, tga_op2, Pmode);
728 insns = get_insns ();
731 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
733 tmp = gen_reg_rtx (Pmode);
734 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
736 if (!register_operand (op0, Pmode))
737 op0 = gen_reg_rtx (Pmode);
740 emit_insn (gen_load_dtprel (op0, op1));
741 emit_insn (gen_adddi3 (op0, tmp, op0));
744 emit_insn (gen_add_dtprel (op0, tmp, op1));
747 case TLS_MODEL_INITIAL_EXEC:
748 tmp = gen_reg_rtx (Pmode);
749 emit_insn (gen_load_ltoff_tprel (tmp, op1));
750 tmp = gen_const_mem (Pmode, tmp);
751 tmp = force_reg (Pmode, tmp);
753 if (!register_operand (op0, Pmode))
754 op0 = gen_reg_rtx (Pmode);
755 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
758 case TLS_MODEL_LOCAL_EXEC:
759 if (!register_operand (op0, Pmode))
760 op0 = gen_reg_rtx (Pmode);
763 emit_insn (gen_load_tprel (op0, op1));
764 emit_insn (gen_adddi3 (op0, gen_thread_pointer (), op0));
767 emit_insn (gen_add_tprel (op0, gen_thread_pointer (), op1));
776 if (GET_MODE (orig_op0) == Pmode)
778 return gen_lowpart (GET_MODE (orig_op0), op0);
782 ia64_expand_move (rtx op0, rtx op1)
784 enum machine_mode mode = GET_MODE (op0);
786 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
787 op1 = force_reg (mode, op1);
789 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
791 enum tls_model tls_kind;
792 if (GET_CODE (op1) == SYMBOL_REF
793 && (tls_kind = SYMBOL_REF_TLS_MODEL (op1)))
794 return ia64_expand_tls_address (tls_kind, op0, op1);
796 if (!TARGET_NO_PIC && reload_completed)
798 ia64_expand_load_address (op0, op1);
806 /* Split a move from OP1 to OP0 conditional on COND. */
809 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
811 rtx insn, first = get_last_insn ();
813 emit_move_insn (op0, op1);
815 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
817 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
821 /* Split a post-reload TImode or TFmode reference into two DImode
822 components. This is made extra difficult by the fact that we do
823 not get any scratch registers to work with, because reload cannot
824 be prevented from giving us a scratch that overlaps the register
825 pair involved. So instead, when addressing memory, we tweak the
826 pointer register up and back down with POST_INCs. Or up and not
827 back down when we can get away with it.
829 REVERSED is true when the loads must be done in reversed order
830 (high word first) for correctness. DEAD is true when the pointer
831 dies with the second insn we generate and therefore the second
832 address must not carry a postmodify.
834 May return an insn which is to be emitted after the moves. */
837 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
841 switch (GET_CODE (in))
844 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
845 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
850 /* Cannot occur reversed. */
851 if (reversed) abort ();
853 if (GET_MODE (in) != TFmode)
854 split_double (in, &out[0], &out[1]);
856 /* split_double does not understand how to split a TFmode
857 quantity into a pair of DImode constants. */
860 unsigned HOST_WIDE_INT p[2];
861 long l[4]; /* TFmode is 128 bits */
863 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
864 real_to_target (l, &r, TFmode);
866 if (FLOAT_WORDS_BIG_ENDIAN)
868 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
869 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
873 p[0] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
874 p[1] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
876 out[0] = GEN_INT (p[0]);
877 out[1] = GEN_INT (p[1]);
883 rtx base = XEXP (in, 0);
886 switch (GET_CODE (base))
891 out[0] = adjust_automodify_address
892 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
893 out[1] = adjust_automodify_address
894 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
898 /* Reversal requires a pre-increment, which can only
899 be done as a separate insn. */
900 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
901 out[0] = adjust_automodify_address
902 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
903 out[1] = adjust_address (in, DImode, 0);
908 if (reversed || dead) abort ();
909 /* Just do the increment in two steps. */
910 out[0] = adjust_automodify_address (in, DImode, 0, 0);
911 out[1] = adjust_automodify_address (in, DImode, 0, 8);
915 if (reversed || dead) abort ();
916 /* Add 8, subtract 24. */
917 base = XEXP (base, 0);
918 out[0] = adjust_automodify_address
919 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
920 out[1] = adjust_automodify_address
922 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
927 if (reversed || dead) abort ();
928 /* Extract and adjust the modification. This case is
929 trickier than the others, because we might have an
930 index register, or we might have a combined offset that
931 doesn't fit a signed 9-bit displacement field. We can
932 assume the incoming expression is already legitimate. */
933 offset = XEXP (base, 1);
934 base = XEXP (base, 0);
936 out[0] = adjust_automodify_address
937 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
939 if (GET_CODE (XEXP (offset, 1)) == REG)
941 /* Can't adjust the postmodify to match. Emit the
942 original, then a separate addition insn. */
943 out[1] = adjust_automodify_address (in, DImode, 0, 8);
944 fixup = gen_adddi3 (base, base, GEN_INT (-8));
946 else if (GET_CODE (XEXP (offset, 1)) != CONST_INT)
948 else if (INTVAL (XEXP (offset, 1)) < -256 + 8)
950 /* Again the postmodify cannot be made to match, but
951 in this case it's more efficient to get rid of the
952 postmodify entirely and fix up with an add insn. */
953 out[1] = adjust_automodify_address (in, DImode, base, 8);
954 fixup = gen_adddi3 (base, base,
955 GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
959 /* Combined offset still fits in the displacement field.
960 (We cannot overflow it at the high end.) */
961 out[1] = adjust_automodify_address
963 gen_rtx_POST_MODIFY (Pmode, base,
964 gen_rtx_PLUS (Pmode, base,
965 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
983 /* Split a TImode or TFmode move instruction after reload.
984 This is used by *movtf_internal and *movti_internal. */
986 ia64_split_tmode_move (rtx operands[])
988 rtx in[2], out[2], insn;
991 bool reversed = false;
993 /* It is possible for reload to decide to overwrite a pointer with
994 the value it points to. In that case we have to do the loads in
995 the appropriate order so that the pointer is not destroyed too
996 early. Also we must not generate a postmodify for that second
997 load, or rws_access_regno will abort. */
998 if (GET_CODE (operands[1]) == MEM
999 && reg_overlap_mentioned_p (operands[0], operands[1]))
1001 rtx base = XEXP (operands[1], 0);
1002 while (GET_CODE (base) != REG)
1003 base = XEXP (base, 0);
1005 if (REGNO (base) == REGNO (operands[0]))
1009 /* Another reason to do the moves in reversed order is if the first
1010 element of the target register pair is also the second element of
1011 the source register pair. */
1012 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1013 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1016 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1017 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1019 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1020 if (GET_CODE (EXP) == MEM \
1021 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1022 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1023 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1024 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1025 XEXP (XEXP (EXP, 0), 0), \
1028 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1029 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1030 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1032 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1033 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1034 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1037 emit_insn (fixup[0]);
1039 emit_insn (fixup[1]);
1041 #undef MAYBE_ADD_REG_INC_NOTE
1044 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1045 through memory plus an extra GR scratch register. Except that you can
1046 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1047 SECONDARY_RELOAD_CLASS, but not both.
1049 We got into problems in the first place by allowing a construct like
1050 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1051 This solution attempts to prevent this situation from occurring. When
1052 we see something like the above, we spill the inner register to memory. */
1055 spill_xfmode_operand (rtx in, int force)
1057 if (GET_CODE (in) == SUBREG
1058 && GET_MODE (SUBREG_REG (in)) == TImode
1059 && GET_CODE (SUBREG_REG (in)) == REG)
1061 rtx memt = assign_stack_temp (TImode, 16, 0);
1062 emit_move_insn (memt, SUBREG_REG (in));
1063 return adjust_address (memt, XFmode, 0);
1065 else if (force && GET_CODE (in) == REG)
1067 rtx memx = assign_stack_temp (XFmode, 16, 0);
1068 emit_move_insn (memx, in);
1075 /* Emit comparison instruction if necessary, returning the expression
1076 that holds the compare result in the proper mode. */
1078 static GTY(()) rtx cmptf_libfunc;
1081 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1083 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1086 /* If we have a BImode input, then we already have a compare result, and
1087 do not need to emit another comparison. */
1088 if (GET_MODE (op0) == BImode)
1090 if ((code == NE || code == EQ) && op1 == const0_rtx)
1095 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1096 magic number as its third argument, that indicates what to do.
1097 The return value is an integer to be compared against zero. */
1098 else if (GET_MODE (op0) == TFmode)
1101 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1107 enum rtx_code ncode;
1109 if (!cmptf_libfunc || GET_MODE (op1) != TFmode)
1113 /* 1 = equal, 0 = not equal. Equality operators do
1114 not raise FP_INVALID when given an SNaN operand. */
1115 case EQ: magic = QCMP_EQ; ncode = NE; break;
1116 case NE: magic = QCMP_EQ; ncode = EQ; break;
1117 /* isunordered() from C99. */
1118 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1119 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1120 /* Relational operators raise FP_INVALID when given
1122 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1123 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1124 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1125 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1126 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1127 Expanders for buneq etc. weuld have to be added to ia64.md
1128 for this to be useful. */
1134 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1135 op0, TFmode, op1, TFmode,
1136 GEN_INT (magic), DImode);
1137 cmp = gen_reg_rtx (BImode);
1138 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1139 gen_rtx_fmt_ee (ncode, BImode,
1142 insns = get_insns ();
1145 emit_libcall_block (insns, cmp, cmp,
1146 gen_rtx_fmt_ee (code, BImode, op0, op1));
1151 cmp = gen_reg_rtx (BImode);
1152 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1153 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1157 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1160 /* Emit the appropriate sequence for a call. */
1163 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1168 addr = XEXP (addr, 0);
1169 addr = convert_memory_address (DImode, addr);
1170 b0 = gen_rtx_REG (DImode, R_BR (0));
1172 /* ??? Should do this for functions known to bind local too. */
1173 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1176 insn = gen_sibcall_nogp (addr);
1178 insn = gen_call_nogp (addr, b0);
1180 insn = gen_call_value_nogp (retval, addr, b0);
1181 insn = emit_call_insn (insn);
1186 insn = gen_sibcall_gp (addr);
1188 insn = gen_call_gp (addr, b0);
1190 insn = gen_call_value_gp (retval, addr, b0);
1191 insn = emit_call_insn (insn);
1193 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1197 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1201 ia64_reload_gp (void)
1205 if (current_frame_info.reg_save_gp)
1206 tmp = gen_rtx_REG (DImode, current_frame_info.reg_save_gp);
1209 HOST_WIDE_INT offset;
1211 offset = (current_frame_info.spill_cfa_off
1212 + current_frame_info.spill_size);
1213 if (frame_pointer_needed)
1215 tmp = hard_frame_pointer_rtx;
1220 tmp = stack_pointer_rtx;
1221 offset = current_frame_info.total_size - offset;
1224 if (CONST_OK_FOR_I (offset))
1225 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1226 tmp, GEN_INT (offset)));
1229 emit_move_insn (pic_offset_table_rtx, GEN_INT (offset));
1230 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1231 pic_offset_table_rtx, tmp));
1234 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1237 emit_move_insn (pic_offset_table_rtx, tmp);
1241 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1242 rtx scratch_b, int noreturn_p, int sibcall_p)
1245 bool is_desc = false;
1247 /* If we find we're calling through a register, then we're actually
1248 calling through a descriptor, so load up the values. */
1249 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1254 /* ??? We are currently constrained to *not* use peep2, because
1255 we can legitimately change the global lifetime of the GP
1256 (in the form of killing where previously live). This is
1257 because a call through a descriptor doesn't use the previous
1258 value of the GP, while a direct call does, and we do not
1259 commit to either form until the split here.
1261 That said, this means that we lack precise life info for
1262 whether ADDR is dead after this call. This is not terribly
1263 important, since we can fix things up essentially for free
1264 with the POST_DEC below, but it's nice to not use it when we
1265 can immediately tell it's not necessary. */
1266 addr_dead_p = ((noreturn_p || sibcall_p
1267 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1269 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1271 /* Load the code address into scratch_b. */
1272 tmp = gen_rtx_POST_INC (Pmode, addr);
1273 tmp = gen_rtx_MEM (Pmode, tmp);
1274 emit_move_insn (scratch_r, tmp);
1275 emit_move_insn (scratch_b, scratch_r);
1277 /* Load the GP address. If ADDR is not dead here, then we must
1278 revert the change made above via the POST_INCREMENT. */
1280 tmp = gen_rtx_POST_DEC (Pmode, addr);
1283 tmp = gen_rtx_MEM (Pmode, tmp);
1284 emit_move_insn (pic_offset_table_rtx, tmp);
1291 insn = gen_sibcall_nogp (addr);
1293 insn = gen_call_value_nogp (retval, addr, retaddr);
1295 insn = gen_call_nogp (addr, retaddr);
1296 emit_call_insn (insn);
1298 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
1302 /* Begin the assembly file. */
1305 ia64_file_start (void)
1307 default_file_start ();
1308 emit_safe_across_calls ();
1312 emit_safe_across_calls (void)
1314 unsigned int rs, re;
1321 while (rs < 64 && call_used_regs[PR_REG (rs)])
1325 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
1329 fputs ("\t.pred.safe_across_calls ", asm_out_file);
1333 fputc (',', asm_out_file);
1335 fprintf (asm_out_file, "p%u", rs);
1337 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
1341 fputc ('\n', asm_out_file);
1344 /* Helper function for ia64_compute_frame_size: find an appropriate general
1345 register to spill some special register to. SPECIAL_SPILL_MASK contains
1346 bits in GR0 to GR31 that have already been allocated by this routine.
1347 TRY_LOCALS is true if we should attempt to locate a local regnum. */
1350 find_gr_spill (int try_locals)
1354 /* If this is a leaf function, first try an otherwise unused
1355 call-clobbered register. */
1356 if (current_function_is_leaf)
1358 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1359 if (! regs_ever_live[regno]
1360 && call_used_regs[regno]
1361 && ! fixed_regs[regno]
1362 && ! global_regs[regno]
1363 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1365 current_frame_info.gr_used_mask |= 1 << regno;
1372 regno = current_frame_info.n_local_regs;
1373 /* If there is a frame pointer, then we can't use loc79, because
1374 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
1375 reg_name switching code in ia64_expand_prologue. */
1376 if (regno < (80 - frame_pointer_needed))
1378 current_frame_info.n_local_regs = regno + 1;
1379 return LOC_REG (0) + regno;
1383 /* Failed to find a general register to spill to. Must use stack. */
1387 /* In order to make for nice schedules, we try to allocate every temporary
1388 to a different register. We must of course stay away from call-saved,
1389 fixed, and global registers. We must also stay away from registers
1390 allocated in current_frame_info.gr_used_mask, since those include regs
1391 used all through the prologue.
1393 Any register allocated here must be used immediately. The idea is to
1394 aid scheduling, not to solve data flow problems. */
1396 static int last_scratch_gr_reg;
1399 next_scratch_gr_reg (void)
1403 for (i = 0; i < 32; ++i)
1405 regno = (last_scratch_gr_reg + i + 1) & 31;
1406 if (call_used_regs[regno]
1407 && ! fixed_regs[regno]
1408 && ! global_regs[regno]
1409 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1411 last_scratch_gr_reg = regno;
1416 /* There must be _something_ available. */
1420 /* Helper function for ia64_compute_frame_size, called through
1421 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
1424 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
1426 unsigned int regno = REGNO (reg);
1429 unsigned int i, n = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1430 for (i = 0; i < n; ++i)
1431 current_frame_info.gr_used_mask |= 1 << (regno + i);
1435 /* Returns the number of bytes offset between the frame pointer and the stack
1436 pointer for the current function. SIZE is the number of bytes of space
1437 needed for local variables. */
1440 ia64_compute_frame_size (HOST_WIDE_INT size)
1442 HOST_WIDE_INT total_size;
1443 HOST_WIDE_INT spill_size = 0;
1444 HOST_WIDE_INT extra_spill_size = 0;
1445 HOST_WIDE_INT pretend_args_size;
1448 int spilled_gr_p = 0;
1449 int spilled_fr_p = 0;
1453 if (current_frame_info.initialized)
1456 memset (¤t_frame_info, 0, sizeof current_frame_info);
1457 CLEAR_HARD_REG_SET (mask);
1459 /* Don't allocate scratches to the return register. */
1460 diddle_return_value (mark_reg_gr_used_mask, NULL);
1462 /* Don't allocate scratches to the EH scratch registers. */
1463 if (cfun->machine->ia64_eh_epilogue_sp)
1464 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
1465 if (cfun->machine->ia64_eh_epilogue_bsp)
1466 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
1468 /* Find the size of the register stack frame. We have only 80 local
1469 registers, because we reserve 8 for the inputs and 8 for the
1472 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
1473 since we'll be adjusting that down later. */
1474 regno = LOC_REG (78) + ! frame_pointer_needed;
1475 for (; regno >= LOC_REG (0); regno--)
1476 if (regs_ever_live[regno])
1478 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
1480 /* For functions marked with the syscall_linkage attribute, we must mark
1481 all eight input registers as in use, so that locals aren't visible to
1484 if (cfun->machine->n_varargs > 0
1485 || lookup_attribute ("syscall_linkage",
1486 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
1487 current_frame_info.n_input_regs = 8;
1490 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
1491 if (regs_ever_live[regno])
1493 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
1496 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
1497 if (regs_ever_live[regno])
1499 i = regno - OUT_REG (0) + 1;
1501 /* When -p profiling, we need one output register for the mcount argument.
1502 Likewise for -a profiling for the bb_init_func argument. For -ax
1503 profiling, we need two output registers for the two bb_init_trace_func
1505 if (current_function_profile)
1507 current_frame_info.n_output_regs = i;
1509 /* ??? No rotating register support yet. */
1510 current_frame_info.n_rotate_regs = 0;
1512 /* Discover which registers need spilling, and how much room that
1513 will take. Begin with floating point and general registers,
1514 which will always wind up on the stack. */
1516 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
1517 if (regs_ever_live[regno] && ! call_used_regs[regno])
1519 SET_HARD_REG_BIT (mask, regno);
1525 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1526 if (regs_ever_live[regno] && ! call_used_regs[regno])
1528 SET_HARD_REG_BIT (mask, regno);
1534 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
1535 if (regs_ever_live[regno] && ! call_used_regs[regno])
1537 SET_HARD_REG_BIT (mask, regno);
1542 /* Now come all special registers that might get saved in other
1543 general registers. */
1545 if (frame_pointer_needed)
1547 current_frame_info.reg_fp = find_gr_spill (1);
1548 /* If we did not get a register, then we take LOC79. This is guaranteed
1549 to be free, even if regs_ever_live is already set, because this is
1550 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
1551 as we don't count loc79 above. */
1552 if (current_frame_info.reg_fp == 0)
1554 current_frame_info.reg_fp = LOC_REG (79);
1555 current_frame_info.n_local_regs++;
1559 if (! current_function_is_leaf)
1561 /* Emit a save of BR0 if we call other functions. Do this even
1562 if this function doesn't return, as EH depends on this to be
1563 able to unwind the stack. */
1564 SET_HARD_REG_BIT (mask, BR_REG (0));
1566 current_frame_info.reg_save_b0 = find_gr_spill (1);
1567 if (current_frame_info.reg_save_b0 == 0)
1573 /* Similarly for ar.pfs. */
1574 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
1575 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
1576 if (current_frame_info.reg_save_ar_pfs == 0)
1578 extra_spill_size += 8;
1582 /* Similarly for gp. Note that if we're calling setjmp, the stacked
1583 registers are clobbered, so we fall back to the stack. */
1584 current_frame_info.reg_save_gp
1585 = (current_function_calls_setjmp ? 0 : find_gr_spill (1));
1586 if (current_frame_info.reg_save_gp == 0)
1588 SET_HARD_REG_BIT (mask, GR_REG (1));
1595 if (regs_ever_live[BR_REG (0)] && ! call_used_regs[BR_REG (0)])
1597 SET_HARD_REG_BIT (mask, BR_REG (0));
1602 if (regs_ever_live[AR_PFS_REGNUM])
1604 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
1605 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
1606 if (current_frame_info.reg_save_ar_pfs == 0)
1608 extra_spill_size += 8;
1614 /* Unwind descriptor hackery: things are most efficient if we allocate
1615 consecutive GR save registers for RP, PFS, FP in that order. However,
1616 it is absolutely critical that FP get the only hard register that's
1617 guaranteed to be free, so we allocated it first. If all three did
1618 happen to be allocated hard regs, and are consecutive, rearrange them
1619 into the preferred order now. */
1620 if (current_frame_info.reg_fp != 0
1621 && current_frame_info.reg_save_b0 == current_frame_info.reg_fp + 1
1622 && current_frame_info.reg_save_ar_pfs == current_frame_info.reg_fp + 2)
1624 current_frame_info.reg_save_b0 = current_frame_info.reg_fp;
1625 current_frame_info.reg_save_ar_pfs = current_frame_info.reg_fp + 1;
1626 current_frame_info.reg_fp = current_frame_info.reg_fp + 2;
1629 /* See if we need to store the predicate register block. */
1630 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
1631 if (regs_ever_live[regno] && ! call_used_regs[regno])
1633 if (regno <= PR_REG (63))
1635 SET_HARD_REG_BIT (mask, PR_REG (0));
1636 current_frame_info.reg_save_pr = find_gr_spill (1);
1637 if (current_frame_info.reg_save_pr == 0)
1639 extra_spill_size += 8;
1643 /* ??? Mark them all as used so that register renaming and such
1644 are free to use them. */
1645 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
1646 regs_ever_live[regno] = 1;
1649 /* If we're forced to use st8.spill, we're forced to save and restore
1650 ar.unat as well. The check for existing liveness allows inline asm
1651 to touch ar.unat. */
1652 if (spilled_gr_p || cfun->machine->n_varargs
1653 || regs_ever_live[AR_UNAT_REGNUM])
1655 regs_ever_live[AR_UNAT_REGNUM] = 1;
1656 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
1657 current_frame_info.reg_save_ar_unat = find_gr_spill (spill_size == 0);
1658 if (current_frame_info.reg_save_ar_unat == 0)
1660 extra_spill_size += 8;
1665 if (regs_ever_live[AR_LC_REGNUM])
1667 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
1668 current_frame_info.reg_save_ar_lc = find_gr_spill (spill_size == 0);
1669 if (current_frame_info.reg_save_ar_lc == 0)
1671 extra_spill_size += 8;
1676 /* If we have an odd number of words of pretend arguments written to
1677 the stack, then the FR save area will be unaligned. We round the
1678 size of this area up to keep things 16 byte aligned. */
1680 pretend_args_size = IA64_STACK_ALIGN (current_function_pretend_args_size);
1682 pretend_args_size = current_function_pretend_args_size;
1684 total_size = (spill_size + extra_spill_size + size + pretend_args_size
1685 + current_function_outgoing_args_size);
1686 total_size = IA64_STACK_ALIGN (total_size);
1688 /* We always use the 16-byte scratch area provided by the caller, but
1689 if we are a leaf function, there's no one to which we need to provide
1691 if (current_function_is_leaf)
1692 total_size = MAX (0, total_size - 16);
1694 current_frame_info.total_size = total_size;
1695 current_frame_info.spill_cfa_off = pretend_args_size - 16;
1696 current_frame_info.spill_size = spill_size;
1697 current_frame_info.extra_spill_size = extra_spill_size;
1698 COPY_HARD_REG_SET (current_frame_info.mask, mask);
1699 current_frame_info.n_spilled = n_spilled;
1700 current_frame_info.initialized = reload_completed;
1703 /* Compute the initial difference between the specified pair of registers. */
1706 ia64_initial_elimination_offset (int from, int to)
1708 HOST_WIDE_INT offset;
1710 ia64_compute_frame_size (get_frame_size ());
1713 case FRAME_POINTER_REGNUM:
1714 if (to == HARD_FRAME_POINTER_REGNUM)
1716 if (current_function_is_leaf)
1717 offset = -current_frame_info.total_size;
1719 offset = -(current_frame_info.total_size
1720 - current_function_outgoing_args_size - 16);
1722 else if (to == STACK_POINTER_REGNUM)
1724 if (current_function_is_leaf)
1727 offset = 16 + current_function_outgoing_args_size;
1733 case ARG_POINTER_REGNUM:
1734 /* Arguments start above the 16 byte save area, unless stdarg
1735 in which case we store through the 16 byte save area. */
1736 if (to == HARD_FRAME_POINTER_REGNUM)
1737 offset = 16 - current_function_pretend_args_size;
1738 else if (to == STACK_POINTER_REGNUM)
1739 offset = (current_frame_info.total_size
1740 + 16 - current_function_pretend_args_size);
1752 /* If there are more than a trivial number of register spills, we use
1753 two interleaved iterators so that we can get two memory references
1756 In order to simplify things in the prologue and epilogue expanders,
1757 we use helper functions to fix up the memory references after the
1758 fact with the appropriate offsets to a POST_MODIFY memory mode.
1759 The following data structure tracks the state of the two iterators
1760 while insns are being emitted. */
1762 struct spill_fill_data
1764 rtx init_after; /* point at which to emit initializations */
1765 rtx init_reg[2]; /* initial base register */
1766 rtx iter_reg[2]; /* the iterator registers */
1767 rtx *prev_addr[2]; /* address of last memory use */
1768 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
1769 HOST_WIDE_INT prev_off[2]; /* last offset */
1770 int n_iter; /* number of iterators in use */
1771 int next_iter; /* next iterator to use */
1772 unsigned int save_gr_used_mask;
1775 static struct spill_fill_data spill_fill_data;
1778 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
1782 spill_fill_data.init_after = get_last_insn ();
1783 spill_fill_data.init_reg[0] = init_reg;
1784 spill_fill_data.init_reg[1] = init_reg;
1785 spill_fill_data.prev_addr[0] = NULL;
1786 spill_fill_data.prev_addr[1] = NULL;
1787 spill_fill_data.prev_insn[0] = NULL;
1788 spill_fill_data.prev_insn[1] = NULL;
1789 spill_fill_data.prev_off[0] = cfa_off;
1790 spill_fill_data.prev_off[1] = cfa_off;
1791 spill_fill_data.next_iter = 0;
1792 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
1794 spill_fill_data.n_iter = 1 + (n_spills > 2);
1795 for (i = 0; i < spill_fill_data.n_iter; ++i)
1797 int regno = next_scratch_gr_reg ();
1798 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
1799 current_frame_info.gr_used_mask |= 1 << regno;
1804 finish_spill_pointers (void)
1806 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
1810 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
1812 int iter = spill_fill_data.next_iter;
1813 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
1814 rtx disp_rtx = GEN_INT (disp);
1817 if (spill_fill_data.prev_addr[iter])
1819 if (CONST_OK_FOR_N (disp))
1821 *spill_fill_data.prev_addr[iter]
1822 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
1823 gen_rtx_PLUS (DImode,
1824 spill_fill_data.iter_reg[iter],
1826 REG_NOTES (spill_fill_data.prev_insn[iter])
1827 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
1828 REG_NOTES (spill_fill_data.prev_insn[iter]));
1832 /* ??? Could use register post_modify for loads. */
1833 if (! CONST_OK_FOR_I (disp))
1835 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
1836 emit_move_insn (tmp, disp_rtx);
1839 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
1840 spill_fill_data.iter_reg[iter], disp_rtx));
1843 /* Micro-optimization: if we've created a frame pointer, it's at
1844 CFA 0, which may allow the real iterator to be initialized lower,
1845 slightly increasing parallelism. Also, if there are few saves
1846 it may eliminate the iterator entirely. */
1848 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
1849 && frame_pointer_needed)
1851 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
1852 set_mem_alias_set (mem, get_varargs_alias_set ());
1860 seq = gen_movdi (spill_fill_data.iter_reg[iter],
1861 spill_fill_data.init_reg[iter]);
1866 if (! CONST_OK_FOR_I (disp))
1868 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
1869 emit_move_insn (tmp, disp_rtx);
1873 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
1874 spill_fill_data.init_reg[iter],
1881 /* Careful for being the first insn in a sequence. */
1882 if (spill_fill_data.init_after)
1883 insn = emit_insn_after (seq, spill_fill_data.init_after);
1886 rtx first = get_insns ();
1888 insn = emit_insn_before (seq, first);
1890 insn = emit_insn (seq);
1892 spill_fill_data.init_after = insn;
1894 /* If DISP is 0, we may or may not have a further adjustment
1895 afterward. If we do, then the load/store insn may be modified
1896 to be a post-modify. If we don't, then this copy may be
1897 eliminated by copyprop_hardreg_forward, which makes this
1898 insn garbage, which runs afoul of the sanity check in
1899 propagate_one_insn. So mark this insn as legal to delete. */
1901 REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx,
1905 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
1907 /* ??? Not all of the spills are for varargs, but some of them are.
1908 The rest of the spills belong in an alias set of their own. But
1909 it doesn't actually hurt to include them here. */
1910 set_mem_alias_set (mem, get_varargs_alias_set ());
1912 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
1913 spill_fill_data.prev_off[iter] = cfa_off;
1915 if (++iter >= spill_fill_data.n_iter)
1917 spill_fill_data.next_iter = iter;
1923 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
1926 int iter = spill_fill_data.next_iter;
1929 mem = spill_restore_mem (reg, cfa_off);
1930 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
1931 spill_fill_data.prev_insn[iter] = insn;
1938 RTX_FRAME_RELATED_P (insn) = 1;
1940 /* Don't even pretend that the unwind code can intuit its way
1941 through a pair of interleaved post_modify iterators. Just
1942 provide the correct answer. */
1944 if (frame_pointer_needed)
1946 base = hard_frame_pointer_rtx;
1951 base = stack_pointer_rtx;
1952 off = current_frame_info.total_size - cfa_off;
1956 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
1957 gen_rtx_SET (VOIDmode,
1958 gen_rtx_MEM (GET_MODE (reg),
1959 plus_constant (base, off)),
1966 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
1968 int iter = spill_fill_data.next_iter;
1971 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
1972 GEN_INT (cfa_off)));
1973 spill_fill_data.prev_insn[iter] = insn;
1976 /* Wrapper functions that discards the CONST_INT spill offset. These
1977 exist so that we can give gr_spill/gr_fill the offset they need and
1978 use a consistent function interface. */
1981 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
1983 return gen_movdi (dest, src);
1987 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
1989 return gen_fr_spill (dest, src);
1993 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
1995 return gen_fr_restore (dest, src);
1998 /* Called after register allocation to add any instructions needed for the
1999 prologue. Using a prologue insn is favored compared to putting all of the
2000 instructions in output_function_prologue(), since it allows the scheduler
2001 to intermix instructions with the saves of the caller saved registers. In
2002 some cases, it might be necessary to emit a barrier instruction as the last
2003 insn to prevent such scheduling.
2005 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2006 so that the debug info generation code can handle them properly.
2008 The register save area is layed out like so:
2010 [ varargs spill area ]
2011 [ fr register spill area ]
2012 [ br register spill area ]
2013 [ ar register spill area ]
2014 [ pr register spill area ]
2015 [ gr register spill area ] */
2017 /* ??? Get inefficient code when the frame size is larger than can fit in an
2018 adds instruction. */
2021 ia64_expand_prologue (void)
2023 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2024 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2027 ia64_compute_frame_size (get_frame_size ());
2028 last_scratch_gr_reg = 15;
2030 /* If there is no epilogue, then we don't need some prologue insns.
2031 We need to avoid emitting the dead prologue insns, because flow
2032 will complain about them. */
2038 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
2039 if ((e->flags & EDGE_FAKE) == 0
2040 && (e->flags & EDGE_FALLTHRU) != 0)
2042 epilogue_p = (e != NULL);
2047 /* Set the local, input, and output register names. We need to do this
2048 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2049 half. If we use in/loc/out register names, then we get assembler errors
2050 in crtn.S because there is no alloc insn or regstk directive in there. */
2051 if (! TARGET_REG_NAMES)
2053 int inputs = current_frame_info.n_input_regs;
2054 int locals = current_frame_info.n_local_regs;
2055 int outputs = current_frame_info.n_output_regs;
2057 for (i = 0; i < inputs; i++)
2058 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2059 for (i = 0; i < locals; i++)
2060 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2061 for (i = 0; i < outputs; i++)
2062 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2065 /* Set the frame pointer register name. The regnum is logically loc79,
2066 but of course we'll not have allocated that many locals. Rather than
2067 worrying about renumbering the existing rtxs, we adjust the name. */
2068 /* ??? This code means that we can never use one local register when
2069 there is a frame pointer. loc79 gets wasted in this case, as it is
2070 renamed to a register that will never be used. See also the try_locals
2071 code in find_gr_spill. */
2072 if (current_frame_info.reg_fp)
2074 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2075 reg_names[HARD_FRAME_POINTER_REGNUM]
2076 = reg_names[current_frame_info.reg_fp];
2077 reg_names[current_frame_info.reg_fp] = tmp;
2080 /* We don't need an alloc instruction if we've used no outputs or locals. */
2081 if (current_frame_info.n_local_regs == 0
2082 && current_frame_info.n_output_regs == 0
2083 && current_frame_info.n_input_regs <= current_function_args_info.int_regs
2084 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2086 /* If there is no alloc, but there are input registers used, then we
2087 need a .regstk directive. */
2088 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
2089 ar_pfs_save_reg = NULL_RTX;
2093 current_frame_info.need_regstk = 0;
2095 if (current_frame_info.reg_save_ar_pfs)
2096 regno = current_frame_info.reg_save_ar_pfs;
2098 regno = next_scratch_gr_reg ();
2099 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
2101 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
2102 GEN_INT (current_frame_info.n_input_regs),
2103 GEN_INT (current_frame_info.n_local_regs),
2104 GEN_INT (current_frame_info.n_output_regs),
2105 GEN_INT (current_frame_info.n_rotate_regs)));
2106 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_pfs != 0);
2109 /* Set up frame pointer, stack pointer, and spill iterators. */
2111 n_varargs = cfun->machine->n_varargs;
2112 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
2113 stack_pointer_rtx, 0);
2115 if (frame_pointer_needed)
2117 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
2118 RTX_FRAME_RELATED_P (insn) = 1;
2121 if (current_frame_info.total_size != 0)
2123 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
2126 if (CONST_OK_FOR_I (- current_frame_info.total_size))
2127 offset = frame_size_rtx;
2130 regno = next_scratch_gr_reg ();
2131 offset = gen_rtx_REG (DImode, regno);
2132 emit_move_insn (offset, frame_size_rtx);
2135 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
2136 stack_pointer_rtx, offset));
2138 if (! frame_pointer_needed)
2140 RTX_FRAME_RELATED_P (insn) = 1;
2141 if (GET_CODE (offset) != CONST_INT)
2144 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2145 gen_rtx_SET (VOIDmode,
2147 gen_rtx_PLUS (DImode,
2154 /* ??? At this point we must generate a magic insn that appears to
2155 modify the stack pointer, the frame pointer, and all spill
2156 iterators. This would allow the most scheduling freedom. For
2157 now, just hard stop. */
2158 emit_insn (gen_blockage ());
2161 /* Must copy out ar.unat before doing any integer spills. */
2162 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2164 if (current_frame_info.reg_save_ar_unat)
2166 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2169 alt_regno = next_scratch_gr_reg ();
2170 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2171 current_frame_info.gr_used_mask |= 1 << alt_regno;
2174 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2175 insn = emit_move_insn (ar_unat_save_reg, reg);
2176 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_unat != 0);
2178 /* Even if we're not going to generate an epilogue, we still
2179 need to save the register so that EH works. */
2180 if (! epilogue_p && current_frame_info.reg_save_ar_unat)
2181 emit_insn (gen_prologue_use (ar_unat_save_reg));
2184 ar_unat_save_reg = NULL_RTX;
2186 /* Spill all varargs registers. Do this before spilling any GR registers,
2187 since we want the UNAT bits for the GR registers to override the UNAT
2188 bits from varargs, which we don't care about. */
2191 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
2193 reg = gen_rtx_REG (DImode, regno);
2194 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
2197 /* Locate the bottom of the register save area. */
2198 cfa_off = (current_frame_info.spill_cfa_off
2199 + current_frame_info.spill_size
2200 + current_frame_info.extra_spill_size);
2202 /* Save the predicate register block either in a register or in memory. */
2203 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2205 reg = gen_rtx_REG (DImode, PR_REG (0));
2206 if (current_frame_info.reg_save_pr != 0)
2208 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2209 insn = emit_move_insn (alt_reg, reg);
2211 /* ??? Denote pr spill/fill by a DImode move that modifies all
2212 64 hard registers. */
2213 RTX_FRAME_RELATED_P (insn) = 1;
2215 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2216 gen_rtx_SET (VOIDmode, alt_reg, reg),
2219 /* Even if we're not going to generate an epilogue, we still
2220 need to save the register so that EH works. */
2222 emit_insn (gen_prologue_use (alt_reg));
2226 alt_regno = next_scratch_gr_reg ();
2227 alt_reg = gen_rtx_REG (DImode, alt_regno);
2228 insn = emit_move_insn (alt_reg, reg);
2229 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2234 /* Handle AR regs in numerical order. All of them get special handling. */
2235 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
2236 && current_frame_info.reg_save_ar_unat == 0)
2238 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2239 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
2243 /* The alloc insn already copied ar.pfs into a general register. The
2244 only thing we have to do now is copy that register to a stack slot
2245 if we'd not allocated a local register for the job. */
2246 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
2247 && current_frame_info.reg_save_ar_pfs == 0)
2249 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2250 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
2254 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2256 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2257 if (current_frame_info.reg_save_ar_lc != 0)
2259 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2260 insn = emit_move_insn (alt_reg, reg);
2261 RTX_FRAME_RELATED_P (insn) = 1;
2263 /* Even if we're not going to generate an epilogue, we still
2264 need to save the register so that EH works. */
2266 emit_insn (gen_prologue_use (alt_reg));
2270 alt_regno = next_scratch_gr_reg ();
2271 alt_reg = gen_rtx_REG (DImode, alt_regno);
2272 emit_move_insn (alt_reg, reg);
2273 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2278 if (current_frame_info.reg_save_gp)
2280 insn = emit_move_insn (gen_rtx_REG (DImode,
2281 current_frame_info.reg_save_gp),
2282 pic_offset_table_rtx);
2283 /* We don't know for sure yet if this is actually needed, since
2284 we've not split the PIC call patterns. If all of the calls
2285 are indirect, and not followed by any uses of the gp, then
2286 this save is dead. Allow it to go away. */
2288 = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, REG_NOTES (insn));
2291 /* We should now be at the base of the gr/br/fr spill area. */
2292 if (cfa_off != (current_frame_info.spill_cfa_off
2293 + current_frame_info.spill_size))
2296 /* Spill all general registers. */
2297 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
2298 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2300 reg = gen_rtx_REG (DImode, regno);
2301 do_spill (gen_gr_spill, reg, cfa_off, reg);
2305 /* Handle BR0 specially -- it may be getting stored permanently in
2306 some GR register. */
2307 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2309 reg = gen_rtx_REG (DImode, BR_REG (0));
2310 if (current_frame_info.reg_save_b0 != 0)
2312 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2313 insn = emit_move_insn (alt_reg, reg);
2314 RTX_FRAME_RELATED_P (insn) = 1;
2316 /* Even if we're not going to generate an epilogue, we still
2317 need to save the register so that EH works. */
2319 emit_insn (gen_prologue_use (alt_reg));
2323 alt_regno = next_scratch_gr_reg ();
2324 alt_reg = gen_rtx_REG (DImode, alt_regno);
2325 emit_move_insn (alt_reg, reg);
2326 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2331 /* Spill the rest of the BR registers. */
2332 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2333 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2335 alt_regno = next_scratch_gr_reg ();
2336 alt_reg = gen_rtx_REG (DImode, alt_regno);
2337 reg = gen_rtx_REG (DImode, regno);
2338 emit_move_insn (alt_reg, reg);
2339 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2343 /* Align the frame and spill all FR registers. */
2344 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2345 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2349 reg = gen_rtx_REG (XFmode, regno);
2350 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
2354 if (cfa_off != current_frame_info.spill_cfa_off)
2357 finish_spill_pointers ();
2360 /* Called after register allocation to add any instructions needed for the
2361 epilogue. Using an epilogue insn is favored compared to putting all of the
2362 instructions in output_function_prologue(), since it allows the scheduler
2363 to intermix instructions with the saves of the caller saved registers. In
2364 some cases, it might be necessary to emit a barrier instruction as the last
2365 insn to prevent such scheduling. */
2368 ia64_expand_epilogue (int sibcall_p)
2370 rtx insn, reg, alt_reg, ar_unat_save_reg;
2371 int regno, alt_regno, cfa_off;
2373 ia64_compute_frame_size (get_frame_size ());
2375 /* If there is a frame pointer, then we use it instead of the stack
2376 pointer, so that the stack pointer does not need to be valid when
2377 the epilogue starts. See EXIT_IGNORE_STACK. */
2378 if (frame_pointer_needed)
2379 setup_spill_pointers (current_frame_info.n_spilled,
2380 hard_frame_pointer_rtx, 0);
2382 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
2383 current_frame_info.total_size);
2385 if (current_frame_info.total_size != 0)
2387 /* ??? At this point we must generate a magic insn that appears to
2388 modify the spill iterators and the frame pointer. This would
2389 allow the most scheduling freedom. For now, just hard stop. */
2390 emit_insn (gen_blockage ());
2393 /* Locate the bottom of the register save area. */
2394 cfa_off = (current_frame_info.spill_cfa_off
2395 + current_frame_info.spill_size
2396 + current_frame_info.extra_spill_size);
2398 /* Restore the predicate registers. */
2399 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2401 if (current_frame_info.reg_save_pr != 0)
2402 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2405 alt_regno = next_scratch_gr_reg ();
2406 alt_reg = gen_rtx_REG (DImode, alt_regno);
2407 do_restore (gen_movdi_x, alt_reg, cfa_off);
2410 reg = gen_rtx_REG (DImode, PR_REG (0));
2411 emit_move_insn (reg, alt_reg);
2414 /* Restore the application registers. */
2416 /* Load the saved unat from the stack, but do not restore it until
2417 after the GRs have been restored. */
2418 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2420 if (current_frame_info.reg_save_ar_unat != 0)
2422 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2425 alt_regno = next_scratch_gr_reg ();
2426 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2427 current_frame_info.gr_used_mask |= 1 << alt_regno;
2428 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
2433 ar_unat_save_reg = NULL_RTX;
2435 if (current_frame_info.reg_save_ar_pfs != 0)
2437 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_pfs);
2438 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2439 emit_move_insn (reg, alt_reg);
2441 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2443 alt_regno = next_scratch_gr_reg ();
2444 alt_reg = gen_rtx_REG (DImode, alt_regno);
2445 do_restore (gen_movdi_x, alt_reg, cfa_off);
2447 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2448 emit_move_insn (reg, alt_reg);
2451 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2453 if (current_frame_info.reg_save_ar_lc != 0)
2454 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2457 alt_regno = next_scratch_gr_reg ();
2458 alt_reg = gen_rtx_REG (DImode, alt_regno);
2459 do_restore (gen_movdi_x, alt_reg, cfa_off);
2462 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2463 emit_move_insn (reg, alt_reg);
2466 /* We should now be at the base of the gr/br/fr spill area. */
2467 if (cfa_off != (current_frame_info.spill_cfa_off
2468 + current_frame_info.spill_size))
2471 /* The GP may be stored on the stack in the prologue, but it's
2472 never restored in the epilogue. Skip the stack slot. */
2473 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
2476 /* Restore all general registers. */
2477 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
2478 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2480 reg = gen_rtx_REG (DImode, regno);
2481 do_restore (gen_gr_restore, reg, cfa_off);
2485 /* Restore the branch registers. Handle B0 specially, as it may
2486 have gotten stored in some GR register. */
2487 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2489 if (current_frame_info.reg_save_b0 != 0)
2490 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2493 alt_regno = next_scratch_gr_reg ();
2494 alt_reg = gen_rtx_REG (DImode, alt_regno);
2495 do_restore (gen_movdi_x, alt_reg, cfa_off);
2498 reg = gen_rtx_REG (DImode, BR_REG (0));
2499 emit_move_insn (reg, alt_reg);
2502 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2503 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2505 alt_regno = next_scratch_gr_reg ();
2506 alt_reg = gen_rtx_REG (DImode, alt_regno);
2507 do_restore (gen_movdi_x, alt_reg, cfa_off);
2509 reg = gen_rtx_REG (DImode, regno);
2510 emit_move_insn (reg, alt_reg);
2513 /* Restore floating point registers. */
2514 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2515 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2519 reg = gen_rtx_REG (XFmode, regno);
2520 do_restore (gen_fr_restore_x, reg, cfa_off);
2524 /* Restore ar.unat for real. */
2525 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2527 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2528 emit_move_insn (reg, ar_unat_save_reg);
2531 if (cfa_off != current_frame_info.spill_cfa_off)
2534 finish_spill_pointers ();
2536 if (current_frame_info.total_size || cfun->machine->ia64_eh_epilogue_sp)
2538 /* ??? At this point we must generate a magic insn that appears to
2539 modify the spill iterators, the stack pointer, and the frame
2540 pointer. This would allow the most scheduling freedom. For now,
2542 emit_insn (gen_blockage ());
2545 if (cfun->machine->ia64_eh_epilogue_sp)
2546 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
2547 else if (frame_pointer_needed)
2549 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
2550 RTX_FRAME_RELATED_P (insn) = 1;
2552 else if (current_frame_info.total_size)
2554 rtx offset, frame_size_rtx;
2556 frame_size_rtx = GEN_INT (current_frame_info.total_size);
2557 if (CONST_OK_FOR_I (current_frame_info.total_size))
2558 offset = frame_size_rtx;
2561 regno = next_scratch_gr_reg ();
2562 offset = gen_rtx_REG (DImode, regno);
2563 emit_move_insn (offset, frame_size_rtx);
2566 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
2569 RTX_FRAME_RELATED_P (insn) = 1;
2570 if (GET_CODE (offset) != CONST_INT)
2573 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2574 gen_rtx_SET (VOIDmode,
2576 gen_rtx_PLUS (DImode,
2583 if (cfun->machine->ia64_eh_epilogue_bsp)
2584 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
2587 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
2590 int fp = GR_REG (2);
2591 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
2592 first available call clobbered register. If there was a frame_pointer
2593 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
2594 so we have to make sure we're using the string "r2" when emitting
2595 the register name for the assembler. */
2596 if (current_frame_info.reg_fp && current_frame_info.reg_fp == GR_REG (2))
2597 fp = HARD_FRAME_POINTER_REGNUM;
2599 /* We must emit an alloc to force the input registers to become output
2600 registers. Otherwise, if the callee tries to pass its parameters
2601 through to another call without an intervening alloc, then these
2603 /* ??? We don't need to preserve all input registers. We only need to
2604 preserve those input registers used as arguments to the sibling call.
2605 It is unclear how to compute that number here. */
2606 if (current_frame_info.n_input_regs != 0)
2607 emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
2608 const0_rtx, const0_rtx,
2609 GEN_INT (current_frame_info.n_input_regs),
2614 /* Return 1 if br.ret can do all the work required to return from a
2618 ia64_direct_return (void)
2620 if (reload_completed && ! frame_pointer_needed)
2622 ia64_compute_frame_size (get_frame_size ());
2624 return (current_frame_info.total_size == 0
2625 && current_frame_info.n_spilled == 0
2626 && current_frame_info.reg_save_b0 == 0
2627 && current_frame_info.reg_save_pr == 0
2628 && current_frame_info.reg_save_ar_pfs == 0
2629 && current_frame_info.reg_save_ar_unat == 0
2630 && current_frame_info.reg_save_ar_lc == 0);
2635 /* Return the magic cookie that we use to hold the return address
2636 during early compilation. */
2639 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
2643 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
2646 /* Split this value after reload, now that we know where the return
2647 address is saved. */
2650 ia64_split_return_addr_rtx (rtx dest)
2654 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2656 if (current_frame_info.reg_save_b0 != 0)
2657 src = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2663 /* Compute offset from CFA for BR0. */
2664 /* ??? Must be kept in sync with ia64_expand_prologue. */
2665 off = (current_frame_info.spill_cfa_off
2666 + current_frame_info.spill_size);
2667 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
2668 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2671 /* Convert CFA offset to a register based offset. */
2672 if (frame_pointer_needed)
2673 src = hard_frame_pointer_rtx;
2676 src = stack_pointer_rtx;
2677 off += current_frame_info.total_size;
2680 /* Load address into scratch register. */
2681 if (CONST_OK_FOR_I (off))
2682 emit_insn (gen_adddi3 (dest, src, GEN_INT (off)));
2685 emit_move_insn (dest, GEN_INT (off));
2686 emit_insn (gen_adddi3 (dest, src, dest));
2689 src = gen_rtx_MEM (Pmode, dest);
2693 src = gen_rtx_REG (DImode, BR_REG (0));
2695 emit_move_insn (dest, src);
2699 ia64_hard_regno_rename_ok (int from, int to)
2701 /* Don't clobber any of the registers we reserved for the prologue. */
2702 if (to == current_frame_info.reg_fp
2703 || to == current_frame_info.reg_save_b0
2704 || to == current_frame_info.reg_save_pr
2705 || to == current_frame_info.reg_save_ar_pfs
2706 || to == current_frame_info.reg_save_ar_unat
2707 || to == current_frame_info.reg_save_ar_lc)
2710 if (from == current_frame_info.reg_fp
2711 || from == current_frame_info.reg_save_b0
2712 || from == current_frame_info.reg_save_pr
2713 || from == current_frame_info.reg_save_ar_pfs
2714 || from == current_frame_info.reg_save_ar_unat
2715 || from == current_frame_info.reg_save_ar_lc)
2718 /* Don't use output registers outside the register frame. */
2719 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
2722 /* Retain even/oddness on predicate register pairs. */
2723 if (PR_REGNO_P (from) && PR_REGNO_P (to))
2724 return (from & 1) == (to & 1);
2729 /* Target hook for assembling integer objects. Handle word-sized
2730 aligned objects and detect the cases when @fptr is needed. */
2733 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
2735 if (size == POINTER_SIZE / BITS_PER_UNIT
2737 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
2738 && GET_CODE (x) == SYMBOL_REF
2739 && SYMBOL_REF_FUNCTION_P (x))
2741 if (POINTER_SIZE == 32)
2742 fputs ("\tdata4\t@fptr(", asm_out_file);
2744 fputs ("\tdata8\t@fptr(", asm_out_file);
2745 output_addr_const (asm_out_file, x);
2746 fputs (")\n", asm_out_file);
2749 return default_assemble_integer (x, size, aligned_p);
2752 /* Emit the function prologue. */
2755 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
2757 int mask, grsave, grsave_prev;
2759 if (current_frame_info.need_regstk)
2760 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
2761 current_frame_info.n_input_regs,
2762 current_frame_info.n_local_regs,
2763 current_frame_info.n_output_regs,
2764 current_frame_info.n_rotate_regs);
2766 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
2769 /* Emit the .prologue directive. */
2772 grsave = grsave_prev = 0;
2773 if (current_frame_info.reg_save_b0 != 0)
2776 grsave = grsave_prev = current_frame_info.reg_save_b0;
2778 if (current_frame_info.reg_save_ar_pfs != 0
2779 && (grsave_prev == 0
2780 || current_frame_info.reg_save_ar_pfs == grsave_prev + 1))
2783 if (grsave_prev == 0)
2784 grsave = current_frame_info.reg_save_ar_pfs;
2785 grsave_prev = current_frame_info.reg_save_ar_pfs;
2787 if (current_frame_info.reg_fp != 0
2788 && (grsave_prev == 0
2789 || current_frame_info.reg_fp == grsave_prev + 1))
2792 if (grsave_prev == 0)
2793 grsave = HARD_FRAME_POINTER_REGNUM;
2794 grsave_prev = current_frame_info.reg_fp;
2796 if (current_frame_info.reg_save_pr != 0
2797 && (grsave_prev == 0
2798 || current_frame_info.reg_save_pr == grsave_prev + 1))
2801 if (grsave_prev == 0)
2802 grsave = current_frame_info.reg_save_pr;
2805 if (mask && TARGET_GNU_AS)
2806 fprintf (file, "\t.prologue %d, %d\n", mask,
2807 ia64_dbx_register_number (grsave));
2809 fputs ("\t.prologue\n", file);
2811 /* Emit a .spill directive, if necessary, to relocate the base of
2812 the register spill area. */
2813 if (current_frame_info.spill_cfa_off != -16)
2814 fprintf (file, "\t.spill %ld\n",
2815 (long) (current_frame_info.spill_cfa_off
2816 + current_frame_info.spill_size));
2819 /* Emit the .body directive at the scheduled end of the prologue. */
2822 ia64_output_function_end_prologue (FILE *file)
2824 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
2827 fputs ("\t.body\n", file);
2830 /* Emit the function epilogue. */
2833 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
2834 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
2838 if (current_frame_info.reg_fp)
2840 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2841 reg_names[HARD_FRAME_POINTER_REGNUM]
2842 = reg_names[current_frame_info.reg_fp];
2843 reg_names[current_frame_info.reg_fp] = tmp;
2845 if (! TARGET_REG_NAMES)
2847 for (i = 0; i < current_frame_info.n_input_regs; i++)
2848 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
2849 for (i = 0; i < current_frame_info.n_local_regs; i++)
2850 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
2851 for (i = 0; i < current_frame_info.n_output_regs; i++)
2852 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
2855 current_frame_info.initialized = 0;
2859 ia64_dbx_register_number (int regno)
2861 /* In ia64_expand_prologue we quite literally renamed the frame pointer
2862 from its home at loc79 to something inside the register frame. We
2863 must perform the same renumbering here for the debug info. */
2864 if (current_frame_info.reg_fp)
2866 if (regno == HARD_FRAME_POINTER_REGNUM)
2867 regno = current_frame_info.reg_fp;
2868 else if (regno == current_frame_info.reg_fp)
2869 regno = HARD_FRAME_POINTER_REGNUM;
2872 if (IN_REGNO_P (regno))
2873 return 32 + regno - IN_REG (0);
2874 else if (LOC_REGNO_P (regno))
2875 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
2876 else if (OUT_REGNO_P (regno))
2877 return (32 + current_frame_info.n_input_regs
2878 + current_frame_info.n_local_regs + regno - OUT_REG (0));
2884 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
2886 rtx addr_reg, eight = GEN_INT (8);
2888 /* The Intel assembler requires that the global __ia64_trampoline symbol
2889 be declared explicitly */
2892 static bool declared_ia64_trampoline = false;
2894 if (!declared_ia64_trampoline)
2896 declared_ia64_trampoline = true;
2897 (*targetm.asm_out.globalize_label) (asm_out_file,
2898 "__ia64_trampoline");
2902 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
2903 addr = convert_memory_address (Pmode, addr);
2904 fnaddr = convert_memory_address (Pmode, fnaddr);
2905 static_chain = convert_memory_address (Pmode, static_chain);
2907 /* Load up our iterator. */
2908 addr_reg = gen_reg_rtx (Pmode);
2909 emit_move_insn (addr_reg, addr);
2911 /* The first two words are the fake descriptor:
2912 __ia64_trampoline, ADDR+16. */
2913 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
2914 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
2915 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
2917 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
2918 copy_to_reg (plus_constant (addr, 16)));
2919 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
2921 /* The third word is the target descriptor. */
2922 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
2923 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
2925 /* The fourth word is the static chain. */
2926 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
2929 /* Do any needed setup for a variadic function. CUM has not been updated
2930 for the last named argument which has type TYPE and mode MODE.
2932 We generate the actual spill instructions during prologue generation. */
2935 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
2936 tree type, int * pretend_size,
2937 int second_time ATTRIBUTE_UNUSED)
2939 CUMULATIVE_ARGS next_cum = *cum;
2941 /* Skip the current argument. */
2942 ia64_function_arg_advance (&next_cum, mode, type, 1);
2944 if (next_cum.words < MAX_ARGUMENT_SLOTS)
2946 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
2947 *pretend_size = n * UNITS_PER_WORD;
2948 cfun->machine->n_varargs = n;
2952 /* Check whether TYPE is a homogeneous floating point aggregate. If
2953 it is, return the mode of the floating point type that appears
2954 in all leafs. If it is not, return VOIDmode.
2956 An aggregate is a homogeneous floating point aggregate is if all
2957 fields/elements in it have the same floating point type (e.g,
2958 SFmode). 128-bit quad-precision floats are excluded. */
2960 static enum machine_mode
2961 hfa_element_mode (tree type, int nested)
2963 enum machine_mode element_mode = VOIDmode;
2964 enum machine_mode mode;
2965 enum tree_code code = TREE_CODE (type);
2966 int know_element_mode = 0;
2971 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
2972 case BOOLEAN_TYPE: case CHAR_TYPE: case POINTER_TYPE:
2973 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
2974 case FILE_TYPE: case SET_TYPE: case LANG_TYPE:
2978 /* Fortran complex types are supposed to be HFAs, so we need to handle
2979 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
2982 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
2983 && TYPE_MODE (type) != TCmode)
2984 return GET_MODE_INNER (TYPE_MODE (type));
2989 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
2990 mode if this is contained within an aggregate. */
2991 if (nested && TYPE_MODE (type) != TFmode)
2992 return TYPE_MODE (type);
2997 return hfa_element_mode (TREE_TYPE (type), 1);
3001 case QUAL_UNION_TYPE:
3002 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3004 if (TREE_CODE (t) != FIELD_DECL)
3007 mode = hfa_element_mode (TREE_TYPE (t), 1);
3008 if (know_element_mode)
3010 if (mode != element_mode)
3013 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3017 know_element_mode = 1;
3018 element_mode = mode;
3021 return element_mode;
3024 /* If we reach here, we probably have some front-end specific type
3025 that the backend doesn't know about. This can happen via the
3026 aggregate_value_p call in init_function_start. All we can do is
3027 ignore unknown tree types. */
3034 /* Return the number of words required to hold a quantity of TYPE and MODE
3035 when passed as an argument. */
3037 ia64_function_arg_words (tree type, enum machine_mode mode)
3041 if (mode == BLKmode)
3042 words = int_size_in_bytes (type);
3044 words = GET_MODE_SIZE (mode);
3046 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
3049 /* Return the number of registers that should be skipped so the current
3050 argument (described by TYPE and WORDS) will be properly aligned.
3052 Integer and float arguments larger than 8 bytes start at the next
3053 even boundary. Aggregates larger than 8 bytes start at the next
3054 even boundary if the aggregate has 16 byte alignment. Note that
3055 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
3056 but are still to be aligned in registers.
3058 ??? The ABI does not specify how to handle aggregates with
3059 alignment from 9 to 15 bytes, or greater than 16. We handle them
3060 all as if they had 16 byte alignment. Such aggregates can occur
3061 only if gcc extensions are used. */
3063 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
3065 if ((cum->words & 1) == 0)
3069 && TREE_CODE (type) != INTEGER_TYPE
3070 && TREE_CODE (type) != REAL_TYPE)
3071 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
3076 /* Return rtx for register where argument is passed, or zero if it is passed
3078 /* ??? 128-bit quad-precision floats are always passed in general
3082 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
3083 int named, int incoming)
3085 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
3086 int words = ia64_function_arg_words (type, mode);
3087 int offset = ia64_function_arg_offset (cum, type, words);
3088 enum machine_mode hfa_mode = VOIDmode;
3090 /* If all argument slots are used, then it must go on the stack. */
3091 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3094 /* Check for and handle homogeneous FP aggregates. */
3096 hfa_mode = hfa_element_mode (type, 0);
3098 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3099 and unprototyped hfas are passed specially. */
3100 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3104 int fp_regs = cum->fp_regs;
3105 int int_regs = cum->words + offset;
3106 int hfa_size = GET_MODE_SIZE (hfa_mode);
3110 /* If prototyped, pass it in FR regs then GR regs.
3111 If not prototyped, pass it in both FR and GR regs.
3113 If this is an SFmode aggregate, then it is possible to run out of
3114 FR regs while GR regs are still left. In that case, we pass the
3115 remaining part in the GR regs. */
3117 /* Fill the FP regs. We do this always. We stop if we reach the end
3118 of the argument, the last FP register, or the last argument slot. */
3120 byte_size = ((mode == BLKmode)
3121 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3122 args_byte_size = int_regs * UNITS_PER_WORD;
3124 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3125 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
3127 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3128 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
3132 args_byte_size += hfa_size;
3136 /* If no prototype, then the whole thing must go in GR regs. */
3137 if (! cum->prototype)
3139 /* If this is an SFmode aggregate, then we might have some left over
3140 that needs to go in GR regs. */
3141 else if (byte_size != offset)
3142 int_regs += offset / UNITS_PER_WORD;
3144 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
3146 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
3148 enum machine_mode gr_mode = DImode;
3149 unsigned int gr_size;
3151 /* If we have an odd 4 byte hunk because we ran out of FR regs,
3152 then this goes in a GR reg left adjusted/little endian, right
3153 adjusted/big endian. */
3154 /* ??? Currently this is handled wrong, because 4-byte hunks are
3155 always right adjusted/little endian. */
3158 /* If we have an even 4 byte hunk because the aggregate is a
3159 multiple of 4 bytes in size, then this goes in a GR reg right
3160 adjusted/little endian. */
3161 else if (byte_size - offset == 4)
3164 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3165 gen_rtx_REG (gr_mode, (basereg
3169 gr_size = GET_MODE_SIZE (gr_mode);
3171 if (gr_size == UNITS_PER_WORD
3172 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
3174 else if (gr_size > UNITS_PER_WORD)
3175 int_regs += gr_size / UNITS_PER_WORD;
3177 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3180 /* Integral and aggregates go in general registers. If we have run out of
3181 FR registers, then FP values must also go in general registers. This can
3182 happen when we have a SFmode HFA. */
3183 else if (mode == TFmode || mode == TCmode
3184 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
3186 int byte_size = ((mode == BLKmode)
3187 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3188 if (BYTES_BIG_ENDIAN
3189 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
3190 && byte_size < UNITS_PER_WORD
3193 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3194 gen_rtx_REG (DImode,
3195 (basereg + cum->words
3198 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
3201 return gen_rtx_REG (mode, basereg + cum->words + offset);
3205 /* If there is a prototype, then FP values go in a FR register when
3206 named, and in a GR register when unnamed. */
3207 else if (cum->prototype)
3210 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
3211 /* In big-endian mode, an anonymous SFmode value must be represented
3212 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
3213 the value into the high half of the general register. */
3214 else if (BYTES_BIG_ENDIAN && mode == SFmode)
3215 return gen_rtx_PARALLEL (mode,
3217 gen_rtx_EXPR_LIST (VOIDmode,
3218 gen_rtx_REG (DImode, basereg + cum->words + offset),
3221 return gen_rtx_REG (mode, basereg + cum->words + offset);
3223 /* If there is no prototype, then FP values go in both FR and GR
3227 /* See comment above. */
3228 enum machine_mode inner_mode =
3229 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
3231 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
3232 gen_rtx_REG (mode, (FR_ARG_FIRST
3235 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3236 gen_rtx_REG (inner_mode,
3237 (basereg + cum->words
3241 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
3245 /* Return number of words, at the beginning of the argument, that must be
3246 put in registers. 0 is the argument is entirely in registers or entirely
3250 ia64_function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3251 tree type, int named ATTRIBUTE_UNUSED)
3253 int words = ia64_function_arg_words (type, mode);
3254 int offset = ia64_function_arg_offset (cum, type, words);
3256 /* If all argument slots are used, then it must go on the stack. */
3257 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3260 /* It doesn't matter whether the argument goes in FR or GR regs. If
3261 it fits within the 8 argument slots, then it goes entirely in
3262 registers. If it extends past the last argument slot, then the rest
3263 goes on the stack. */
3265 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
3268 return MAX_ARGUMENT_SLOTS - cum->words - offset;
3271 /* Update CUM to point after this argument. This is patterned after
3272 ia64_function_arg. */
3275 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3276 tree type, int named)
3278 int words = ia64_function_arg_words (type, mode);
3279 int offset = ia64_function_arg_offset (cum, type, words);
3280 enum machine_mode hfa_mode = VOIDmode;
3282 /* If all arg slots are already full, then there is nothing to do. */
3283 if (cum->words >= MAX_ARGUMENT_SLOTS)
3286 cum->words += words + offset;
3288 /* Check for and handle homogeneous FP aggregates. */
3290 hfa_mode = hfa_element_mode (type, 0);
3292 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3293 and unprototyped hfas are passed specially. */
3294 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3296 int fp_regs = cum->fp_regs;
3297 /* This is the original value of cum->words + offset. */
3298 int int_regs = cum->words - words;
3299 int hfa_size = GET_MODE_SIZE (hfa_mode);
3303 /* If prototyped, pass it in FR regs then GR regs.
3304 If not prototyped, pass it in both FR and GR regs.
3306 If this is an SFmode aggregate, then it is possible to run out of
3307 FR regs while GR regs are still left. In that case, we pass the
3308 remaining part in the GR regs. */
3310 /* Fill the FP regs. We do this always. We stop if we reach the end
3311 of the argument, the last FP register, or the last argument slot. */
3313 byte_size = ((mode == BLKmode)
3314 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3315 args_byte_size = int_regs * UNITS_PER_WORD;
3317 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3318 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
3321 args_byte_size += hfa_size;
3325 cum->fp_regs = fp_regs;
3328 /* Integral and aggregates go in general registers. So do TFmode FP values.
3329 If we have run out of FR registers, then other FP values must also go in
3330 general registers. This can happen when we have a SFmode HFA. */
3331 else if (mode == TFmode || mode == TCmode
3332 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
3333 cum->int_regs = cum->words;
3335 /* If there is a prototype, then FP values go in a FR register when
3336 named, and in a GR register when unnamed. */
3337 else if (cum->prototype)
3340 cum->int_regs = cum->words;
3342 /* ??? Complex types should not reach here. */
3343 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3345 /* If there is no prototype, then FP values go in both FR and GR
3349 /* ??? Complex types should not reach here. */
3350 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3351 cum->int_regs = cum->words;
3355 /* Arguments with alignment larger than 8 bytes start at the next even
3356 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
3357 even though their normal alignment is 8 bytes. See ia64_function_arg. */
3360 ia64_function_arg_boundary (enum machine_mode mode, tree type)
3363 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
3364 return PARM_BOUNDARY * 2;
3368 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
3369 return PARM_BOUNDARY * 2;
3371 return PARM_BOUNDARY;
3374 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
3375 return PARM_BOUNDARY * 2;
3377 return PARM_BOUNDARY;
3380 /* Variable sized types are passed by reference. */
3381 /* ??? At present this is a GCC extension to the IA-64 ABI. */
3384 ia64_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3385 enum machine_mode mode ATTRIBUTE_UNUSED,
3386 tree type, bool named ATTRIBUTE_UNUSED)
3388 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
3391 /* True if it is OK to do sibling call optimization for the specified
3392 call expression EXP. DECL will be the called function, or NULL if
3393 this is an indirect call. */
3395 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3397 /* We can't perform a sibcall if the current function has the syscall_linkage
3399 if (lookup_attribute ("syscall_linkage",
3400 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
3403 /* We must always return with our current GP. This means we can
3404 only sibcall to functions defined in the current module. */
3405 return decl && (*targetm.binds_local_p) (decl);
3409 /* Implement va_arg. */
3412 ia64_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
3414 /* Variable sized types are passed by reference. */
3415 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
3417 tree ptrtype = build_pointer_type (type);
3418 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
3419 return build_va_arg_indirect_ref (addr);
3422 /* Aggregate arguments with alignment larger than 8 bytes start at
3423 the next even boundary. Integer and floating point arguments
3424 do so if they are larger than 8 bytes, whether or not they are
3425 also aligned larger than 8 bytes. */
3426 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
3427 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
3429 tree t = build (PLUS_EXPR, TREE_TYPE (valist), valist,
3430 build_int_cst (NULL_TREE, 2 * UNITS_PER_WORD - 1));
3431 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
3432 build_int_cst (NULL_TREE, -2 * UNITS_PER_WORD));
3433 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
3434 gimplify_and_add (t, pre_p);
3437 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3440 /* Return 1 if function return value returned in memory. Return 0 if it is
3444 ia64_return_in_memory (tree valtype, tree fntype ATTRIBUTE_UNUSED)
3446 enum machine_mode mode;
3447 enum machine_mode hfa_mode;
3448 HOST_WIDE_INT byte_size;
3450 mode = TYPE_MODE (valtype);
3451 byte_size = GET_MODE_SIZE (mode);
3452 if (mode == BLKmode)
3454 byte_size = int_size_in_bytes (valtype);
3459 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
3461 hfa_mode = hfa_element_mode (valtype, 0);
3462 if (hfa_mode != VOIDmode)
3464 int hfa_size = GET_MODE_SIZE (hfa_mode);
3466 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
3471 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
3477 /* Return rtx for register that holds the function return value. */
3480 ia64_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
3482 enum machine_mode mode;
3483 enum machine_mode hfa_mode;
3485 mode = TYPE_MODE (valtype);
3486 hfa_mode = hfa_element_mode (valtype, 0);
3488 if (hfa_mode != VOIDmode)
3496 hfa_size = GET_MODE_SIZE (hfa_mode);
3497 byte_size = ((mode == BLKmode)
3498 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
3500 for (i = 0; offset < byte_size; i++)
3502 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3503 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
3507 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3509 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
3510 return gen_rtx_REG (mode, FR_ARG_FIRST);
3513 if (BYTES_BIG_ENDIAN
3514 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
3522 bytesize = int_size_in_bytes (valtype);
3523 for (i = 0; offset < bytesize; i++)
3525 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3526 gen_rtx_REG (DImode,
3529 offset += UNITS_PER_WORD;
3531 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3534 return gen_rtx_REG (mode, GR_RET_FIRST);
3538 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
3539 We need to emit DTP-relative relocations. */
3542 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
3546 fputs ("\tdata8.ua\t@dtprel(", file);
3547 output_addr_const (file, x);
3551 /* Print a memory address as an operand to reference that memory location. */
3553 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
3554 also call this from ia64_print_operand for memory addresses. */
3557 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
3558 rtx address ATTRIBUTE_UNUSED)
3562 /* Print an operand to an assembler instruction.
3563 C Swap and print a comparison operator.
3564 D Print an FP comparison operator.
3565 E Print 32 - constant, for SImode shifts as extract.
3566 e Print 64 - constant, for DImode rotates.
3567 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
3568 a floating point register emitted normally.
3569 I Invert a predicate register by adding 1.
3570 J Select the proper predicate register for a condition.
3571 j Select the inverse predicate register for a condition.
3572 O Append .acq for volatile load.
3573 P Postincrement of a MEM.
3574 Q Append .rel for volatile store.
3575 S Shift amount for shladd instruction.
3576 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
3577 for Intel assembler.
3578 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
3579 for Intel assembler.
3580 r Print register name, or constant 0 as r0. HP compatibility for
3583 ia64_print_operand (FILE * file, rtx x, int code)
3590 /* Handled below. */
3595 enum rtx_code c = swap_condition (GET_CODE (x));
3596 fputs (GET_RTX_NAME (c), file);
3601 switch (GET_CODE (x))
3613 str = GET_RTX_NAME (GET_CODE (x));
3620 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
3624 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
3628 if (x == CONST0_RTX (GET_MODE (x)))
3629 str = reg_names [FR_REG (0)];
3630 else if (x == CONST1_RTX (GET_MODE (x)))
3631 str = reg_names [FR_REG (1)];
3632 else if (GET_CODE (x) == REG)
3633 str = reg_names [REGNO (x)];
3640 fputs (reg_names [REGNO (x) + 1], file);
3646 unsigned int regno = REGNO (XEXP (x, 0));
3647 if (GET_CODE (x) == EQ)
3651 fputs (reg_names [regno], file);
3656 if (MEM_VOLATILE_P (x))
3657 fputs(".acq", file);
3662 HOST_WIDE_INT value;
3664 switch (GET_CODE (XEXP (x, 0)))
3670 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
3671 if (GET_CODE (x) == CONST_INT)
3673 else if (GET_CODE (x) == REG)
3675 fprintf (file, ", %s", reg_names[REGNO (x)]);
3683 value = GET_MODE_SIZE (GET_MODE (x));
3687 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
3691 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
3696 if (MEM_VOLATILE_P (x))
3697 fputs(".rel", file);
3701 fprintf (file, "%d", exact_log2 (INTVAL (x)));
3705 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
3707 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
3713 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
3715 const char *prefix = "0x";
3716 if (INTVAL (x) & 0x80000000)
3718 fprintf (file, "0xffffffff");
3721 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
3727 /* If this operand is the constant zero, write it as register zero.
3728 Any register, zero, or CONST_INT value is OK here. */
3729 if (GET_CODE (x) == REG)
3730 fputs (reg_names[REGNO (x)], file);
3731 else if (x == CONST0_RTX (GET_MODE (x)))
3733 else if (GET_CODE (x) == CONST_INT)
3734 output_addr_const (file, x);
3736 output_operand_lossage ("invalid %%r value");
3743 /* For conditional branches, returns or calls, substitute
3744 sptk, dptk, dpnt, or spnt for %s. */
3745 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
3748 int pred_val = INTVAL (XEXP (x, 0));
3750 /* Guess top and bottom 10% statically predicted. */
3751 if (pred_val < REG_BR_PROB_BASE / 50)
3753 else if (pred_val < REG_BR_PROB_BASE / 2)
3755 else if (pred_val < REG_BR_PROB_BASE / 100 * 98)
3760 else if (GET_CODE (current_output_insn) == CALL_INSN)
3765 fputs (which, file);
3770 x = current_insn_predicate;
3773 unsigned int regno = REGNO (XEXP (x, 0));
3774 if (GET_CODE (x) == EQ)
3776 fprintf (file, "(%s) ", reg_names [regno]);
3781 output_operand_lossage ("ia64_print_operand: unknown code");
3785 switch (GET_CODE (x))
3787 /* This happens for the spill/restore instructions. */
3792 /* ... fall through ... */
3795 fputs (reg_names [REGNO (x)], file);
3800 rtx addr = XEXP (x, 0);
3801 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
3802 addr = XEXP (addr, 0);
3803 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
3808 output_addr_const (file, x);
3815 /* Compute a (partial) cost for rtx X. Return true if the complete
3816 cost has been computed, and false if subexpressions should be
3817 scanned. In either case, *TOTAL contains the cost result. */
3818 /* ??? This is incomplete. */
3821 ia64_rtx_costs (rtx x, int code, int outer_code, int *total)
3829 *total = CONST_OK_FOR_J (INTVAL (x)) ? 0 : COSTS_N_INSNS (1);
3832 if (CONST_OK_FOR_I (INTVAL (x)))
3834 else if (CONST_OK_FOR_J (INTVAL (x)))
3837 *total = COSTS_N_INSNS (1);
3840 if (CONST_OK_FOR_K (INTVAL (x)) || CONST_OK_FOR_L (INTVAL (x)))
3843 *total = COSTS_N_INSNS (1);
3848 *total = COSTS_N_INSNS (1);
3854 *total = COSTS_N_INSNS (3);
3858 /* For multiplies wider than HImode, we have to go to the FPU,
3859 which normally involves copies. Plus there's the latency
3860 of the multiply itself, and the latency of the instructions to
3861 transfer integer regs to FP regs. */
3862 /* ??? Check for FP mode. */
3863 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
3864 *total = COSTS_N_INSNS (10);
3866 *total = COSTS_N_INSNS (2);
3874 *total = COSTS_N_INSNS (1);
3881 /* We make divide expensive, so that divide-by-constant will be
3882 optimized to a multiply. */
3883 *total = COSTS_N_INSNS (60);
3891 /* Calculate the cost of moving data from a register in class FROM to
3892 one in class TO, using MODE. */
3895 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
3898 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
3899 if (to == ADDL_REGS)
3901 if (from == ADDL_REGS)
3904 /* All costs are symmetric, so reduce cases by putting the
3905 lower number class as the destination. */
3908 enum reg_class tmp = to;
3909 to = from, from = tmp;
3912 /* Moving from FR<->GR in XFmode must be more expensive than 2,
3913 so that we get secondary memory reloads. Between FR_REGS,
3914 we have to make this at least as expensive as MEMORY_MOVE_COST
3915 to avoid spectacularly poor register class preferencing. */
3918 if (to != GR_REGS || from != GR_REGS)
3919 return MEMORY_MOVE_COST (mode, to, 0);
3927 /* Moving between PR registers takes two insns. */
3928 if (from == PR_REGS)
3930 /* Moving between PR and anything but GR is impossible. */
3931 if (from != GR_REGS)
3932 return MEMORY_MOVE_COST (mode, to, 0);
3936 /* Moving between BR and anything but GR is impossible. */
3937 if (from != GR_REGS && from != GR_AND_BR_REGS)
3938 return MEMORY_MOVE_COST (mode, to, 0);
3943 /* Moving between AR and anything but GR is impossible. */
3944 if (from != GR_REGS)
3945 return MEMORY_MOVE_COST (mode, to, 0);
3950 case GR_AND_FR_REGS:
3951 case GR_AND_BR_REGS:
3962 /* This function returns the register class required for a secondary
3963 register when copying between one of the registers in CLASS, and X,
3964 using MODE. A return value of NO_REGS means that no secondary register
3968 ia64_secondary_reload_class (enum reg_class class,
3969 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
3973 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
3974 regno = true_regnum (x);
3981 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
3982 interaction. We end up with two pseudos with overlapping lifetimes
3983 both of which are equiv to the same constant, and both which need
3984 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
3985 changes depending on the path length, which means the qty_first_reg
3986 check in make_regs_eqv can give different answers at different times.
3987 At some point I'll probably need a reload_indi pattern to handle
3990 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
3991 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
3992 non-general registers for good measure. */
3993 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
3996 /* This is needed if a pseudo used as a call_operand gets spilled to a
3998 if (GET_CODE (x) == MEM)
4003 /* Need to go through general registers to get to other class regs. */
4004 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
4007 /* This can happen when a paradoxical subreg is an operand to the
4009 /* ??? This shouldn't be necessary after instruction scheduling is
4010 enabled, because paradoxical subregs are not accepted by
4011 register_operand when INSN_SCHEDULING is defined. Or alternatively,
4012 stop the paradoxical subreg stupidity in the *_operand functions
4014 if (GET_CODE (x) == MEM
4015 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
4016 || GET_MODE (x) == QImode))
4019 /* This can happen because of the ior/and/etc patterns that accept FP
4020 registers as operands. If the third operand is a constant, then it
4021 needs to be reloaded into a FP register. */
4022 if (GET_CODE (x) == CONST_INT)
4025 /* This can happen because of register elimination in a muldi3 insn.
4026 E.g. `26107 * (unsigned long)&u'. */
4027 if (GET_CODE (x) == PLUS)
4032 /* ??? This happens if we cse/gcse a BImode value across a call,
4033 and the function has a nonlocal goto. This is because global
4034 does not allocate call crossing pseudos to hard registers when
4035 current_function_has_nonlocal_goto is true. This is relatively
4036 common for C++ programs that use exceptions. To reproduce,
4037 return NO_REGS and compile libstdc++. */
4038 if (GET_CODE (x) == MEM)
4041 /* This can happen when we take a BImode subreg of a DImode value,
4042 and that DImode value winds up in some non-GR register. */
4043 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
4055 /* Emit text to declare externally defined variables and functions, because
4056 the Intel assembler does not support undefined externals. */
4059 ia64_asm_output_external (FILE *file, tree decl, const char *name)
4061 int save_referenced;
4063 /* GNU as does not need anything here, but the HP linker does need
4064 something for external functions. */
4068 || TREE_CODE (decl) != FUNCTION_DECL
4069 || strstr (name, "__builtin_") == name))
4072 /* ??? The Intel assembler creates a reference that needs to be satisfied by
4073 the linker when we do this, so we need to be careful not to do this for
4074 builtin functions which have no library equivalent. Unfortunately, we
4075 can't tell here whether or not a function will actually be called by
4076 expand_expr, so we pull in library functions even if we may not need
4078 if (! strcmp (name, "__builtin_next_arg")
4079 || ! strcmp (name, "alloca")
4080 || ! strcmp (name, "__builtin_constant_p")
4081 || ! strcmp (name, "__builtin_args_info"))
4085 ia64_hpux_add_extern_decl (decl);
4088 /* assemble_name will set TREE_SYMBOL_REFERENCED, so we must save and
4090 save_referenced = TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl));
4091 if (TREE_CODE (decl) == FUNCTION_DECL)
4092 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
4093 (*targetm.asm_out.globalize_label) (file, name);
4094 TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)) = save_referenced;
4098 /* Parse the -mfixed-range= option string. */
4101 fix_range (const char *const_str)
4104 char *str, *dash, *comma;
4106 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
4107 REG2 are either register names or register numbers. The effect
4108 of this option is to mark the registers in the range from REG1 to
4109 REG2 as ``fixed'' so they won't be used by the compiler. This is
4110 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
4112 i = strlen (const_str);
4113 str = (char *) alloca (i + 1);
4114 memcpy (str, const_str, i + 1);
4118 dash = strchr (str, '-');
4121 warning ("value of -mfixed-range must have form REG1-REG2");
4126 comma = strchr (dash + 1, ',');
4130 first = decode_reg_name (str);
4133 warning ("unknown register name: %s", str);
4137 last = decode_reg_name (dash + 1);
4140 warning ("unknown register name: %s", dash + 1);
4148 warning ("%s-%s is an empty range", str, dash + 1);
4152 for (i = first; i <= last; ++i)
4153 fixed_regs[i] = call_used_regs[i] = 1;
4163 static struct machine_function *
4164 ia64_init_machine_status (void)
4166 return ggc_alloc_cleared (sizeof (struct machine_function));
4169 /* Handle TARGET_OPTIONS switches. */
4172 ia64_override_options (void)
4176 const char *const name; /* processor name or nickname. */
4177 const enum processor_type processor;
4179 const processor_alias_table[] =
4181 {"itanium", PROCESSOR_ITANIUM},
4182 {"itanium1", PROCESSOR_ITANIUM},
4183 {"merced", PROCESSOR_ITANIUM},
4184 {"itanium2", PROCESSOR_ITANIUM2},
4185 {"mckinley", PROCESSOR_ITANIUM2},
4188 int const pta_size = ARRAY_SIZE (processor_alias_table);
4191 if (TARGET_AUTO_PIC)
4192 target_flags |= MASK_CONST_GP;
4194 if (TARGET_INLINE_FLOAT_DIV_LAT && TARGET_INLINE_FLOAT_DIV_THR)
4196 if ((target_flags_explicit & MASK_INLINE_FLOAT_DIV_LAT)
4197 && (target_flags_explicit & MASK_INLINE_FLOAT_DIV_THR))
4199 warning ("cannot optimize floating point division for both latency and throughput");
4200 target_flags &= ~MASK_INLINE_FLOAT_DIV_THR;
4204 if (target_flags_explicit & MASK_INLINE_FLOAT_DIV_THR)
4205 target_flags &= ~MASK_INLINE_FLOAT_DIV_LAT;
4207 target_flags &= ~MASK_INLINE_FLOAT_DIV_THR;
4211 if (TARGET_INLINE_INT_DIV_LAT && TARGET_INLINE_INT_DIV_THR)
4213 if ((target_flags_explicit & MASK_INLINE_INT_DIV_LAT)
4214 && (target_flags_explicit & MASK_INLINE_INT_DIV_THR))
4216 warning ("cannot optimize integer division for both latency and throughput");
4217 target_flags &= ~MASK_INLINE_INT_DIV_THR;
4221 if (target_flags_explicit & MASK_INLINE_INT_DIV_THR)
4222 target_flags &= ~MASK_INLINE_INT_DIV_LAT;
4224 target_flags &= ~MASK_INLINE_INT_DIV_THR;
4228 if (TARGET_INLINE_SQRT_LAT && TARGET_INLINE_SQRT_THR)
4230 if ((target_flags_explicit & MASK_INLINE_SQRT_LAT)
4231 && (target_flags_explicit & MASK_INLINE_SQRT_THR))
4233 warning ("cannot optimize square root for both latency and throughput");
4234 target_flags &= ~MASK_INLINE_SQRT_THR;
4238 if (target_flags_explicit & MASK_INLINE_SQRT_THR)
4239 target_flags &= ~MASK_INLINE_SQRT_LAT;
4241 target_flags &= ~MASK_INLINE_SQRT_THR;
4245 if (TARGET_INLINE_SQRT_LAT)
4247 warning ("not yet implemented: latency-optimized inline square root");
4248 target_flags &= ~MASK_INLINE_SQRT_LAT;
4251 if (ia64_fixed_range_string)
4252 fix_range (ia64_fixed_range_string);
4254 if (ia64_tls_size_string)
4257 unsigned long tmp = strtoul (ia64_tls_size_string, &end, 10);
4258 if (*end || (tmp != 14 && tmp != 22 && tmp != 64))
4259 error ("bad value (%s) for -mtls-size= switch", ia64_tls_size_string);
4261 ia64_tls_size = tmp;
4264 if (!ia64_tune_string)
4265 ia64_tune_string = "itanium2";
4267 for (i = 0; i < pta_size; i++)
4268 if (! strcmp (ia64_tune_string, processor_alias_table[i].name))
4270 ia64_tune = processor_alias_table[i].processor;
4275 error ("bad value (%s) for -tune= switch", ia64_tune_string);
4277 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
4278 flag_schedule_insns_after_reload = 0;
4280 /* Variable tracking should be run after all optimizations which change order
4281 of insns. It also needs a valid CFG. */
4282 ia64_flag_var_tracking = flag_var_tracking;
4283 flag_var_tracking = 0;
4285 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
4287 init_machine_status = ia64_init_machine_status;
4290 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
4291 static enum attr_type ia64_safe_type (rtx);
4293 static enum attr_itanium_class
4294 ia64_safe_itanium_class (rtx insn)
4296 if (recog_memoized (insn) >= 0)
4297 return get_attr_itanium_class (insn);
4299 return ITANIUM_CLASS_UNKNOWN;
4302 static enum attr_type
4303 ia64_safe_type (rtx insn)
4305 if (recog_memoized (insn) >= 0)
4306 return get_attr_type (insn);
4308 return TYPE_UNKNOWN;
4311 /* The following collection of routines emit instruction group stop bits as
4312 necessary to avoid dependencies. */
4314 /* Need to track some additional registers as far as serialization is
4315 concerned so we can properly handle br.call and br.ret. We could
4316 make these registers visible to gcc, but since these registers are
4317 never explicitly used in gcc generated code, it seems wasteful to
4318 do so (plus it would make the call and return patterns needlessly
4320 #define REG_RP (BR_REG (0))
4321 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
4322 /* This is used for volatile asms which may require a stop bit immediately
4323 before and after them. */
4324 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
4325 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
4326 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
4328 /* For each register, we keep track of how it has been written in the
4329 current instruction group.
4331 If a register is written unconditionally (no qualifying predicate),
4332 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
4334 If a register is written if its qualifying predicate P is true, we
4335 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
4336 may be written again by the complement of P (P^1) and when this happens,
4337 WRITE_COUNT gets set to 2.
4339 The result of this is that whenever an insn attempts to write a register
4340 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
4342 If a predicate register is written by a floating-point insn, we set
4343 WRITTEN_BY_FP to true.
4345 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
4346 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
4348 struct reg_write_state
4350 unsigned int write_count : 2;
4351 unsigned int first_pred : 16;
4352 unsigned int written_by_fp : 1;
4353 unsigned int written_by_and : 1;
4354 unsigned int written_by_or : 1;
4357 /* Cumulative info for the current instruction group. */
4358 struct reg_write_state rws_sum[NUM_REGS];
4359 /* Info for the current instruction. This gets copied to rws_sum after a
4360 stop bit is emitted. */
4361 struct reg_write_state rws_insn[NUM_REGS];
4363 /* Indicates whether this is the first instruction after a stop bit,
4364 in which case we don't need another stop bit. Without this, we hit
4365 the abort in ia64_variable_issue when scheduling an alloc. */
4366 static int first_instruction;
4368 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
4369 RTL for one instruction. */
4372 unsigned int is_write : 1; /* Is register being written? */
4373 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
4374 unsigned int is_branch : 1; /* Is register used as part of a branch? */
4375 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
4376 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
4377 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
4380 static void rws_update (struct reg_write_state *, int, struct reg_flags, int);
4381 static int rws_access_regno (int, struct reg_flags, int);
4382 static int rws_access_reg (rtx, struct reg_flags, int);
4383 static void update_set_flags (rtx, struct reg_flags *, int *, rtx *);
4384 static int set_src_needs_barrier (rtx, struct reg_flags, int, rtx);
4385 static int rtx_needs_barrier (rtx, struct reg_flags, int);
4386 static void init_insn_group_barriers (void);
4387 static int group_barrier_needed_p (rtx);
4388 static int safe_group_barrier_needed_p (rtx);
4390 /* Update *RWS for REGNO, which is being written by the current instruction,
4391 with predicate PRED, and associated register flags in FLAGS. */
4394 rws_update (struct reg_write_state *rws, int regno, struct reg_flags flags, int pred)
4397 rws[regno].write_count++;
4399 rws[regno].write_count = 2;
4400 rws[regno].written_by_fp |= flags.is_fp;
4401 /* ??? Not tracking and/or across differing predicates. */
4402 rws[regno].written_by_and = flags.is_and;
4403 rws[regno].written_by_or = flags.is_or;
4404 rws[regno].first_pred = pred;
4407 /* Handle an access to register REGNO of type FLAGS using predicate register
4408 PRED. Update rws_insn and rws_sum arrays. Return 1 if this access creates
4409 a dependency with an earlier instruction in the same group. */
4412 rws_access_regno (int regno, struct reg_flags flags, int pred)
4414 int need_barrier = 0;
4416 if (regno >= NUM_REGS)
4419 if (! PR_REGNO_P (regno))
4420 flags.is_and = flags.is_or = 0;
4426 /* One insn writes same reg multiple times? */
4427 if (rws_insn[regno].write_count > 0)
4430 /* Update info for current instruction. */
4431 rws_update (rws_insn, regno, flags, pred);
4432 write_count = rws_sum[regno].write_count;
4434 switch (write_count)
4437 /* The register has not been written yet. */
4438 rws_update (rws_sum, regno, flags, pred);
4442 /* The register has been written via a predicate. If this is
4443 not a complementary predicate, then we need a barrier. */
4444 /* ??? This assumes that P and P+1 are always complementary
4445 predicates for P even. */
4446 if (flags.is_and && rws_sum[regno].written_by_and)
4448 else if (flags.is_or && rws_sum[regno].written_by_or)
4450 else if ((rws_sum[regno].first_pred ^ 1) != pred)
4452 rws_update (rws_sum, regno, flags, pred);
4456 /* The register has been unconditionally written already. We
4458 if (flags.is_and && rws_sum[regno].written_by_and)
4460 else if (flags.is_or && rws_sum[regno].written_by_or)
4464 rws_sum[regno].written_by_and = flags.is_and;
4465 rws_sum[regno].written_by_or = flags.is_or;
4474 if (flags.is_branch)
4476 /* Branches have several RAW exceptions that allow to avoid
4479 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
4480 /* RAW dependencies on branch regs are permissible as long
4481 as the writer is a non-branch instruction. Since we
4482 never generate code that uses a branch register written
4483 by a branch instruction, handling this case is
4487 if (REGNO_REG_CLASS (regno) == PR_REGS
4488 && ! rws_sum[regno].written_by_fp)
4489 /* The predicates of a branch are available within the
4490 same insn group as long as the predicate was written by
4491 something other than a floating-point instruction. */
4495 if (flags.is_and && rws_sum[regno].written_by_and)
4497 if (flags.is_or && rws_sum[regno].written_by_or)
4500 switch (rws_sum[regno].write_count)
4503 /* The register has not been written yet. */
4507 /* The register has been written via a predicate. If this is
4508 not a complementary predicate, then we need a barrier. */
4509 /* ??? This assumes that P and P+1 are always complementary
4510 predicates for P even. */
4511 if ((rws_sum[regno].first_pred ^ 1) != pred)
4516 /* The register has been unconditionally written already. We
4526 return need_barrier;
4530 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
4532 int regno = REGNO (reg);
4533 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
4536 return rws_access_regno (regno, flags, pred);
4539 int need_barrier = 0;
4541 need_barrier |= rws_access_regno (regno + n, flags, pred);
4542 return need_barrier;
4546 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
4547 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
4550 update_set_flags (rtx x, struct reg_flags *pflags, int *ppred, rtx *pcond)
4552 rtx src = SET_SRC (x);
4556 switch (GET_CODE (src))
4562 if (SET_DEST (x) == pc_rtx)
4563 /* X is a conditional branch. */
4567 int is_complemented = 0;
4569 /* X is a conditional move. */
4570 rtx cond = XEXP (src, 0);
4571 if (GET_CODE (cond) == EQ)
4572 is_complemented = 1;
4573 cond = XEXP (cond, 0);
4574 if (GET_CODE (cond) != REG
4575 && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS)
4578 if (XEXP (src, 1) == SET_DEST (x)
4579 || XEXP (src, 2) == SET_DEST (x))
4581 /* X is a conditional move that conditionally writes the
4584 /* We need another complement in this case. */
4585 if (XEXP (src, 1) == SET_DEST (x))
4586 is_complemented = ! is_complemented;
4588 *ppred = REGNO (cond);
4589 if (is_complemented)
4593 /* ??? If this is a conditional write to the dest, then this
4594 instruction does not actually read one source. This probably
4595 doesn't matter, because that source is also the dest. */
4596 /* ??? Multiple writes to predicate registers are allowed
4597 if they are all AND type compares, or if they are all OR
4598 type compares. We do not generate such instructions
4601 /* ... fall through ... */
4604 if (COMPARISON_P (src)
4605 && GET_MODE_CLASS (GET_MODE (XEXP (src, 0))) == MODE_FLOAT)
4606 /* Set pflags->is_fp to 1 so that we know we're dealing
4607 with a floating point comparison when processing the
4608 destination of the SET. */
4611 /* Discover if this is a parallel comparison. We only handle
4612 and.orcm and or.andcm at present, since we must retain a
4613 strict inverse on the predicate pair. */
4614 else if (GET_CODE (src) == AND)
4616 else if (GET_CODE (src) == IOR)
4623 /* Subroutine of rtx_needs_barrier; this function determines whether the
4624 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
4625 are as in rtx_needs_barrier. COND is an rtx that holds the condition
4629 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred, rtx cond)
4631 int need_barrier = 0;
4633 rtx src = SET_SRC (x);
4635 if (GET_CODE (src) == CALL)
4636 /* We don't need to worry about the result registers that
4637 get written by subroutine call. */
4638 return rtx_needs_barrier (src, flags, pred);
4639 else if (SET_DEST (x) == pc_rtx)
4641 /* X is a conditional branch. */
4642 /* ??? This seems redundant, as the caller sets this bit for
4644 flags.is_branch = 1;
4645 return rtx_needs_barrier (src, flags, pred);
4648 need_barrier = rtx_needs_barrier (src, flags, pred);
4650 /* This instruction unconditionally uses a predicate register. */
4652 need_barrier |= rws_access_reg (cond, flags, 0);
4655 if (GET_CODE (dst) == ZERO_EXTRACT)
4657 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
4658 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
4659 dst = XEXP (dst, 0);
4661 return need_barrier;
4664 /* Handle an access to rtx X of type FLAGS using predicate register
4665 PRED. Return 1 if this access creates a dependency with an earlier
4666 instruction in the same group. */
4669 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
4672 int is_complemented = 0;
4673 int need_barrier = 0;
4674 const char *format_ptr;
4675 struct reg_flags new_flags;
4683 switch (GET_CODE (x))
4686 update_set_flags (x, &new_flags, &pred, &cond);
4687 need_barrier = set_src_needs_barrier (x, new_flags, pred, cond);
4688 if (GET_CODE (SET_SRC (x)) != CALL)
4690 new_flags.is_write = 1;
4691 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
4696 new_flags.is_write = 0;
4697 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
4699 /* Avoid multiple register writes, in case this is a pattern with
4700 multiple CALL rtx. This avoids an abort in rws_access_reg. */
4701 if (! flags.is_sibcall && ! rws_insn[REG_AR_CFM].write_count)
4703 new_flags.is_write = 1;
4704 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
4705 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
4706 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
4711 /* X is a predicated instruction. */
4713 cond = COND_EXEC_TEST (x);
4716 need_barrier = rtx_needs_barrier (cond, flags, 0);
4718 if (GET_CODE (cond) == EQ)
4719 is_complemented = 1;
4720 cond = XEXP (cond, 0);
4721 if (GET_CODE (cond) != REG
4722 && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS)
4724 pred = REGNO (cond);
4725 if (is_complemented)
4728 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
4729 return need_barrier;
4733 /* Clobber & use are for earlier compiler-phases only. */
4738 /* We always emit stop bits for traditional asms. We emit stop bits
4739 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
4740 if (GET_CODE (x) != ASM_OPERANDS
4741 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
4743 /* Avoid writing the register multiple times if we have multiple
4744 asm outputs. This avoids an abort in rws_access_reg. */
4745 if (! rws_insn[REG_VOLATILE].write_count)
4747 new_flags.is_write = 1;
4748 rws_access_regno (REG_VOLATILE, new_flags, pred);
4753 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
4754 We cannot just fall through here since then we would be confused
4755 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
4756 traditional asms unlike their normal usage. */
4758 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
4759 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
4764 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
4766 rtx pat = XVECEXP (x, 0, i);
4767 if (GET_CODE (pat) == SET)
4769 update_set_flags (pat, &new_flags, &pred, &cond);
4770 need_barrier |= set_src_needs_barrier (pat, new_flags, pred, cond);
4772 else if (GET_CODE (pat) == USE
4773 || GET_CODE (pat) == CALL
4774 || GET_CODE (pat) == ASM_OPERANDS)
4775 need_barrier |= rtx_needs_barrier (pat, flags, pred);
4776 else if (GET_CODE (pat) != CLOBBER && GET_CODE (pat) != RETURN)
4779 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
4781 rtx pat = XVECEXP (x, 0, i);
4782 if (GET_CODE (pat) == SET)
4784 if (GET_CODE (SET_SRC (pat)) != CALL)
4786 new_flags.is_write = 1;
4787 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
4791 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
4792 need_barrier |= rtx_needs_barrier (pat, flags, pred);
4800 if (REGNO (x) == AR_UNAT_REGNUM)
4802 for (i = 0; i < 64; ++i)
4803 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
4806 need_barrier = rws_access_reg (x, flags, pred);
4810 /* Find the regs used in memory address computation. */
4811 new_flags.is_write = 0;
4812 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
4815 case CONST_INT: case CONST_DOUBLE:
4816 case SYMBOL_REF: case LABEL_REF: case CONST:
4819 /* Operators with side-effects. */
4820 case POST_INC: case POST_DEC:
4821 if (GET_CODE (XEXP (x, 0)) != REG)
4824 new_flags.is_write = 0;
4825 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
4826 new_flags.is_write = 1;
4827 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
4831 if (GET_CODE (XEXP (x, 0)) != REG)
4834 new_flags.is_write = 0;
4835 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
4836 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
4837 new_flags.is_write = 1;
4838 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
4841 /* Handle common unary and binary ops for efficiency. */
4842 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
4843 case MOD: case UDIV: case UMOD: case AND: case IOR:
4844 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
4845 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
4846 case NE: case EQ: case GE: case GT: case LE:
4847 case LT: case GEU: case GTU: case LEU: case LTU:
4848 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
4849 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
4852 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
4853 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
4854 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
4855 case SQRT: case FFS: case POPCOUNT:
4856 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
4860 switch (XINT (x, 1))
4862 case UNSPEC_LTOFF_DTPMOD:
4863 case UNSPEC_LTOFF_DTPREL:
4865 case UNSPEC_LTOFF_TPREL:
4867 case UNSPEC_PRED_REL_MUTEX:
4868 case UNSPEC_PIC_CALL:
4870 case UNSPEC_FETCHADD_ACQ:
4871 case UNSPEC_BSP_VALUE:
4872 case UNSPEC_FLUSHRS:
4873 case UNSPEC_BUNDLE_SELECTOR:
4876 case UNSPEC_GR_SPILL:
4877 case UNSPEC_GR_RESTORE:
4879 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
4880 HOST_WIDE_INT bit = (offset >> 3) & 63;
4882 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
4883 new_flags.is_write = (XINT (x, 1) == 1);
4884 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
4889 case UNSPEC_FR_SPILL:
4890 case UNSPEC_FR_RESTORE:
4891 case UNSPEC_GETF_EXP:
4892 case UNSPEC_SETF_EXP:
4894 case UNSPEC_FR_SQRT_RECIP_APPROX:
4895 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
4898 case UNSPEC_FR_RECIP_APPROX:
4899 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
4900 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
4903 case UNSPEC_CMPXCHG_ACQ:
4904 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
4905 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
4913 case UNSPEC_VOLATILE:
4914 switch (XINT (x, 1))
4917 /* Alloc must always be the first instruction of a group.
4918 We force this by always returning true. */
4919 /* ??? We might get better scheduling if we explicitly check for
4920 input/local/output register dependencies, and modify the
4921 scheduler so that alloc is always reordered to the start of
4922 the current group. We could then eliminate all of the
4923 first_instruction code. */
4924 rws_access_regno (AR_PFS_REGNUM, flags, pred);
4926 new_flags.is_write = 1;
4927 rws_access_regno (REG_AR_CFM, new_flags, pred);
4930 case UNSPECV_SET_BSP:
4934 case UNSPECV_BLOCKAGE:
4935 case UNSPECV_INSN_GROUP_BARRIER:
4937 case UNSPECV_PSAC_ALL:
4938 case UNSPECV_PSAC_NORMAL:
4947 new_flags.is_write = 0;
4948 need_barrier = rws_access_regno (REG_RP, flags, pred);
4949 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
4951 new_flags.is_write = 1;
4952 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
4953 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
4957 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
4958 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4959 switch (format_ptr[i])
4961 case '0': /* unused field */
4962 case 'i': /* integer */
4963 case 'n': /* note */
4964 case 'w': /* wide integer */
4965 case 's': /* pointer to string */
4966 case 'S': /* optional pointer to string */
4970 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
4975 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
4976 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
4985 return need_barrier;
4988 /* Clear out the state for group_barrier_needed_p at the start of a
4989 sequence of insns. */
4992 init_insn_group_barriers (void)
4994 memset (rws_sum, 0, sizeof (rws_sum));
4995 first_instruction = 1;
4998 /* Given the current state, recorded by previous calls to this function,
4999 determine whether a group barrier (a stop bit) is necessary before INSN.
5000 Return nonzero if so. */
5003 group_barrier_needed_p (rtx insn)
5006 int need_barrier = 0;
5007 struct reg_flags flags;
5009 memset (&flags, 0, sizeof (flags));
5010 switch (GET_CODE (insn))
5016 /* A barrier doesn't imply an instruction group boundary. */
5020 memset (rws_insn, 0, sizeof (rws_insn));
5024 flags.is_branch = 1;
5025 flags.is_sibcall = SIBLING_CALL_P (insn);
5026 memset (rws_insn, 0, sizeof (rws_insn));
5028 /* Don't bundle a call following another call. */
5029 if ((pat = prev_active_insn (insn))
5030 && GET_CODE (pat) == CALL_INSN)
5036 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
5040 flags.is_branch = 1;
5042 /* Don't bundle a jump following a call. */
5043 if ((pat = prev_active_insn (insn))
5044 && GET_CODE (pat) == CALL_INSN)
5052 if (GET_CODE (PATTERN (insn)) == USE
5053 || GET_CODE (PATTERN (insn)) == CLOBBER)
5054 /* Don't care about USE and CLOBBER "insns"---those are used to
5055 indicate to the optimizer that it shouldn't get rid of
5056 certain operations. */
5059 pat = PATTERN (insn);
5061 /* Ug. Hack hacks hacked elsewhere. */
5062 switch (recog_memoized (insn))
5064 /* We play dependency tricks with the epilogue in order
5065 to get proper schedules. Undo this for dv analysis. */
5066 case CODE_FOR_epilogue_deallocate_stack:
5067 case CODE_FOR_prologue_allocate_stack:
5068 pat = XVECEXP (pat, 0, 0);
5071 /* The pattern we use for br.cloop confuses the code above.
5072 The second element of the vector is representative. */
5073 case CODE_FOR_doloop_end_internal:
5074 pat = XVECEXP (pat, 0, 1);
5077 /* Doesn't generate code. */
5078 case CODE_FOR_pred_rel_mutex:
5079 case CODE_FOR_prologue_use:
5086 memset (rws_insn, 0, sizeof (rws_insn));
5087 need_barrier = rtx_needs_barrier (pat, flags, 0);
5089 /* Check to see if the previous instruction was a volatile
5092 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
5099 if (first_instruction && INSN_P (insn)
5100 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
5101 && GET_CODE (PATTERN (insn)) != USE
5102 && GET_CODE (PATTERN (insn)) != CLOBBER)
5105 first_instruction = 0;
5108 return need_barrier;
5111 /* Like group_barrier_needed_p, but do not clobber the current state. */
5114 safe_group_barrier_needed_p (rtx insn)
5116 struct reg_write_state rws_saved[NUM_REGS];
5117 int saved_first_instruction;
5120 memcpy (rws_saved, rws_sum, NUM_REGS * sizeof *rws_saved);
5121 saved_first_instruction = first_instruction;
5123 t = group_barrier_needed_p (insn);
5125 memcpy (rws_sum, rws_saved, NUM_REGS * sizeof *rws_saved);
5126 first_instruction = saved_first_instruction;
5131 /* Scan the current function and insert stop bits as necessary to
5132 eliminate dependencies. This function assumes that a final
5133 instruction scheduling pass has been run which has already
5134 inserted most of the necessary stop bits. This function only
5135 inserts new ones at basic block boundaries, since these are
5136 invisible to the scheduler. */
5139 emit_insn_group_barriers (FILE *dump)
5143 int insns_since_last_label = 0;
5145 init_insn_group_barriers ();
5147 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5149 if (GET_CODE (insn) == CODE_LABEL)
5151 if (insns_since_last_label)
5153 insns_since_last_label = 0;
5155 else if (GET_CODE (insn) == NOTE
5156 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
5158 if (insns_since_last_label)
5160 insns_since_last_label = 0;
5162 else if (GET_CODE (insn) == INSN
5163 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
5164 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
5166 init_insn_group_barriers ();
5169 else if (INSN_P (insn))
5171 insns_since_last_label = 1;
5173 if (group_barrier_needed_p (insn))
5178 fprintf (dump, "Emitting stop before label %d\n",
5179 INSN_UID (last_label));
5180 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
5183 init_insn_group_barriers ();
5191 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
5192 This function has to emit all necessary group barriers. */
5195 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
5199 init_insn_group_barriers ();
5201 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5203 if (GET_CODE (insn) == BARRIER)
5205 rtx last = prev_active_insn (insn);
5209 if (GET_CODE (last) == JUMP_INSN
5210 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
5211 last = prev_active_insn (last);
5212 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
5213 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
5215 init_insn_group_barriers ();
5217 else if (INSN_P (insn))
5219 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
5220 init_insn_group_barriers ();
5221 else if (group_barrier_needed_p (insn))
5223 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5224 init_insn_group_barriers ();
5225 group_barrier_needed_p (insn);
5232 static int errata_find_address_regs (rtx *, void *);
5233 static void errata_emit_nops (rtx);
5234 static void fixup_errata (void);
5236 /* This structure is used to track some details about the previous insns
5237 groups so we can determine if it may be necessary to insert NOPs to
5238 workaround hardware errata. */
5241 HARD_REG_SET p_reg_set;
5242 HARD_REG_SET gr_reg_conditionally_set;
5245 /* Index into the last_group array. */
5246 static int group_idx;
5248 /* Called through for_each_rtx; determines if a hard register that was
5249 conditionally set in the previous group is used as an address register.
5250 It ensures that for_each_rtx returns 1 in that case. */
5252 errata_find_address_regs (rtx *xp, void *data ATTRIBUTE_UNUSED)
5255 if (GET_CODE (x) != MEM)
5258 if (GET_CODE (x) == POST_MODIFY)
5260 if (GET_CODE (x) == REG)
5262 struct group *prev_group = last_group + (group_idx ^ 1);
5263 if (TEST_HARD_REG_BIT (prev_group->gr_reg_conditionally_set,
5271 /* Called for each insn; this function keeps track of the state in
5272 last_group and emits additional NOPs if necessary to work around
5273 an Itanium A/B step erratum. */
5275 errata_emit_nops (rtx insn)
5277 struct group *this_group = last_group + group_idx;
5278 struct group *prev_group = last_group + (group_idx ^ 1);
5279 rtx pat = PATTERN (insn);
5280 rtx cond = GET_CODE (pat) == COND_EXEC ? COND_EXEC_TEST (pat) : 0;
5281 rtx real_pat = cond ? COND_EXEC_CODE (pat) : pat;
5282 enum attr_type type;
5285 if (GET_CODE (real_pat) == USE
5286 || GET_CODE (real_pat) == CLOBBER
5287 || GET_CODE (real_pat) == ASM_INPUT
5288 || GET_CODE (real_pat) == ADDR_VEC
5289 || GET_CODE (real_pat) == ADDR_DIFF_VEC
5290 || asm_noperands (PATTERN (insn)) >= 0)
5293 /* single_set doesn't work for COND_EXEC insns, so we have to duplicate
5296 if (GET_CODE (set) == PARALLEL)
5299 set = XVECEXP (real_pat, 0, 0);
5300 for (i = 1; i < XVECLEN (real_pat, 0); i++)
5301 if (GET_CODE (XVECEXP (real_pat, 0, i)) != USE
5302 && GET_CODE (XVECEXP (real_pat, 0, i)) != CLOBBER)
5309 if (set && GET_CODE (set) != SET)
5312 type = get_attr_type (insn);
5315 && set && REG_P (SET_DEST (set)) && PR_REGNO_P (REGNO (SET_DEST (set))))
5316 SET_HARD_REG_BIT (this_group->p_reg_set, REGNO (SET_DEST (set)));
5318 if ((type == TYPE_M || type == TYPE_A) && cond && set
5319 && REG_P (SET_DEST (set))
5320 && GET_CODE (SET_SRC (set)) != PLUS
5321 && GET_CODE (SET_SRC (set)) != MINUS
5322 && (GET_CODE (SET_SRC (set)) != ASHIFT
5323 || !shladd_operand (XEXP (SET_SRC (set), 1), VOIDmode))
5324 && (GET_CODE (SET_SRC (set)) != MEM
5325 || GET_CODE (XEXP (SET_SRC (set), 0)) != POST_MODIFY)
5326 && GENERAL_REGNO_P (REGNO (SET_DEST (set))))
5328 if (!COMPARISON_P (cond)
5329 || !REG_P (XEXP (cond, 0)))
5332 if (TEST_HARD_REG_BIT (prev_group->p_reg_set, REGNO (XEXP (cond, 0))))
5333 SET_HARD_REG_BIT (this_group->gr_reg_conditionally_set, REGNO (SET_DEST (set)));
5335 if (for_each_rtx (&real_pat, errata_find_address_regs, NULL))
5337 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5338 emit_insn_before (gen_nop (), insn);
5339 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5341 memset (last_group, 0, sizeof last_group);
5345 /* Emit extra nops if they are required to work around hardware errata. */
5352 if (! TARGET_B_STEP)
5356 memset (last_group, 0, sizeof last_group);
5358 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5363 if (ia64_safe_type (insn) == TYPE_S)
5366 memset (last_group + group_idx, 0, sizeof last_group[group_idx]);
5369 errata_emit_nops (insn);
5374 /* Instruction scheduling support. */
5376 #define NR_BUNDLES 10
5378 /* A list of names of all available bundles. */
5380 static const char *bundle_name [NR_BUNDLES] =
5386 #if NR_BUNDLES == 10
5396 /* Nonzero if we should insert stop bits into the schedule. */
5398 int ia64_final_schedule = 0;
5400 /* Codes of the corresponding quieryied units: */
5402 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
5403 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
5405 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
5406 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
5408 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
5410 /* The following variable value is an insn group barrier. */
5412 static rtx dfa_stop_insn;
5414 /* The following variable value is the last issued insn. */
5416 static rtx last_scheduled_insn;
5418 /* The following variable value is size of the DFA state. */
5420 static size_t dfa_state_size;
5422 /* The following variable value is pointer to a DFA state used as
5423 temporary variable. */
5425 static state_t temp_dfa_state = NULL;
5427 /* The following variable value is DFA state after issuing the last
5430 static state_t prev_cycle_state = NULL;
5432 /* The following array element values are TRUE if the corresponding
5433 insn requires to add stop bits before it. */
5435 static char *stops_p;
5437 /* The following variable is used to set up the mentioned above array. */
5439 static int stop_before_p = 0;
5441 /* The following variable value is length of the arrays `clocks' and
5444 static int clocks_length;
5446 /* The following array element values are cycles on which the
5447 corresponding insn will be issued. The array is used only for
5452 /* The following array element values are numbers of cycles should be
5453 added to improve insn scheduling for MM_insns for Itanium1. */
5455 static int *add_cycles;
5457 static rtx ia64_single_set (rtx);
5458 static void ia64_emit_insn_before (rtx, rtx);
5460 /* Map a bundle number to its pseudo-op. */
5463 get_bundle_name (int b)
5465 return bundle_name[b];
5469 /* Return the maximum number of instructions a cpu can issue. */
5472 ia64_issue_rate (void)
5477 /* Helper function - like single_set, but look inside COND_EXEC. */
5480 ia64_single_set (rtx insn)
5482 rtx x = PATTERN (insn), ret;
5483 if (GET_CODE (x) == COND_EXEC)
5484 x = COND_EXEC_CODE (x);
5485 if (GET_CODE (x) == SET)
5488 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
5489 Although they are not classical single set, the second set is there just
5490 to protect it from moving past FP-relative stack accesses. */
5491 switch (recog_memoized (insn))
5493 case CODE_FOR_prologue_allocate_stack:
5494 case CODE_FOR_epilogue_deallocate_stack:
5495 ret = XVECEXP (x, 0, 0);
5499 ret = single_set_2 (insn, x);
5506 /* Adjust the cost of a scheduling dependency. Return the new cost of
5507 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
5510 ia64_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
5512 enum attr_itanium_class dep_class;
5513 enum attr_itanium_class insn_class;
5515 if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT)
5518 insn_class = ia64_safe_itanium_class (insn);
5519 dep_class = ia64_safe_itanium_class (dep_insn);
5520 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
5521 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
5527 /* Like emit_insn_before, but skip cycle_display notes.
5528 ??? When cycle display notes are implemented, update this. */
5531 ia64_emit_insn_before (rtx insn, rtx before)
5533 emit_insn_before (insn, before);
5536 /* The following function marks insns who produce addresses for load
5537 and store insns. Such insns will be placed into M slots because it
5538 decrease latency time for Itanium1 (see function
5539 `ia64_produce_address_p' and the DFA descriptions). */
5542 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
5544 rtx insn, link, next, next_tail;
5546 next_tail = NEXT_INSN (tail);
5547 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5550 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5552 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
5554 for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
5556 next = XEXP (link, 0);
5557 if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_ST
5558 || ia64_safe_itanium_class (next) == ITANIUM_CLASS_STF)
5559 && ia64_st_address_bypass_p (insn, next))
5561 else if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_LD
5562 || ia64_safe_itanium_class (next)
5563 == ITANIUM_CLASS_FLD)
5564 && ia64_ld_address_bypass_p (insn, next))
5567 insn->call = link != 0;
5571 /* We're beginning a new block. Initialize data structures as necessary. */
5574 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
5575 int sched_verbose ATTRIBUTE_UNUSED,
5576 int max_ready ATTRIBUTE_UNUSED)
5578 #ifdef ENABLE_CHECKING
5581 if (reload_completed)
5582 for (insn = NEXT_INSN (current_sched_info->prev_head);
5583 insn != current_sched_info->next_tail;
5584 insn = NEXT_INSN (insn))
5585 if (SCHED_GROUP_P (insn))
5588 last_scheduled_insn = NULL_RTX;
5589 init_insn_group_barriers ();
5592 /* We are about to being issuing insns for this clock cycle.
5593 Override the default sort algorithm to better slot instructions. */
5596 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
5597 int *pn_ready, int clock_var ATTRIBUTE_UNUSED,
5601 int n_ready = *pn_ready;
5602 rtx *e_ready = ready + n_ready;
5606 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
5608 if (reorder_type == 0)
5610 /* First, move all USEs, CLOBBERs and other crud out of the way. */
5612 for (insnp = ready; insnp < e_ready; insnp++)
5613 if (insnp < e_ready)
5616 enum attr_type t = ia64_safe_type (insn);
5617 if (t == TYPE_UNKNOWN)
5619 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
5620 || asm_noperands (PATTERN (insn)) >= 0)
5622 rtx lowest = ready[n_asms];
5623 ready[n_asms] = insn;
5629 rtx highest = ready[n_ready - 1];
5630 ready[n_ready - 1] = insn;
5637 if (n_asms < n_ready)
5639 /* Some normal insns to process. Skip the asms. */
5643 else if (n_ready > 0)
5647 if (ia64_final_schedule)
5650 int nr_need_stop = 0;
5652 for (insnp = ready; insnp < e_ready; insnp++)
5653 if (safe_group_barrier_needed_p (*insnp))
5656 if (reorder_type == 1 && n_ready == nr_need_stop)
5658 if (reorder_type == 0)
5661 /* Move down everything that needs a stop bit, preserving
5663 while (insnp-- > ready + deleted)
5664 while (insnp >= ready + deleted)
5667 if (! safe_group_barrier_needed_p (insn))
5669 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
5680 /* We are about to being issuing insns for this clock cycle. Override
5681 the default sort algorithm to better slot instructions. */
5684 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
5687 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
5688 pn_ready, clock_var, 0);
5691 /* Like ia64_sched_reorder, but called after issuing each insn.
5692 Override the default sort algorithm to better slot instructions. */
5695 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
5696 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
5697 int *pn_ready, int clock_var)
5699 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
5700 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
5701 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
5705 /* We are about to issue INSN. Return the number of insns left on the
5706 ready queue that can be issued this cycle. */
5709 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
5710 int sched_verbose ATTRIBUTE_UNUSED,
5711 rtx insn ATTRIBUTE_UNUSED,
5712 int can_issue_more ATTRIBUTE_UNUSED)
5714 last_scheduled_insn = insn;
5715 memcpy (prev_cycle_state, curr_state, dfa_state_size);
5716 if (reload_completed)
5718 if (group_barrier_needed_p (insn))
5720 if (GET_CODE (insn) == CALL_INSN)
5721 init_insn_group_barriers ();
5722 stops_p [INSN_UID (insn)] = stop_before_p;
5728 /* We are choosing insn from the ready queue. Return nonzero if INSN
5732 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
5734 if (insn == NULL_RTX || !INSN_P (insn))
5736 return (!reload_completed
5737 || !safe_group_barrier_needed_p (insn));
5740 /* The following variable value is pseudo-insn used by the DFA insn
5741 scheduler to change the DFA state when the simulated clock is
5744 static rtx dfa_pre_cycle_insn;
5746 /* We are about to being issuing INSN. Return nonzero if we cannot
5747 issue it on given cycle CLOCK and return zero if we should not sort
5748 the ready queue on the next clock start. */
5751 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
5752 int clock, int *sort_p)
5754 int setup_clocks_p = FALSE;
5756 if (insn == NULL_RTX || !INSN_P (insn))
5758 if ((reload_completed && safe_group_barrier_needed_p (insn))
5759 || (last_scheduled_insn
5760 && (GET_CODE (last_scheduled_insn) == CALL_INSN
5761 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
5762 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
5764 init_insn_group_barriers ();
5765 if (verbose && dump)
5766 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
5767 last_clock == clock ? " + cycle advance" : "");
5769 if (last_clock == clock)
5771 state_transition (curr_state, dfa_stop_insn);
5772 if (TARGET_EARLY_STOP_BITS)
5773 *sort_p = (last_scheduled_insn == NULL_RTX
5774 || GET_CODE (last_scheduled_insn) != CALL_INSN);
5779 else if (reload_completed)
5780 setup_clocks_p = TRUE;
5781 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
5782 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
5783 state_reset (curr_state);
5786 memcpy (curr_state, prev_cycle_state, dfa_state_size);
5787 state_transition (curr_state, dfa_stop_insn);
5788 state_transition (curr_state, dfa_pre_cycle_insn);
5789 state_transition (curr_state, NULL);
5792 else if (reload_completed)
5793 setup_clocks_p = TRUE;
5794 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM
5795 && GET_CODE (PATTERN (insn)) != ASM_INPUT
5796 && asm_noperands (PATTERN (insn)) < 0)
5798 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
5800 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
5805 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
5806 if (REG_NOTE_KIND (link) == 0)
5808 enum attr_itanium_class dep_class;
5809 rtx dep_insn = XEXP (link, 0);
5811 dep_class = ia64_safe_itanium_class (dep_insn);
5812 if ((dep_class == ITANIUM_CLASS_MMMUL
5813 || dep_class == ITANIUM_CLASS_MMSHF)
5814 && last_clock - clocks [INSN_UID (dep_insn)] < 4
5816 || last_clock - clocks [INSN_UID (dep_insn)] < d))
5817 d = last_clock - clocks [INSN_UID (dep_insn)];
5820 add_cycles [INSN_UID (insn)] = 3 - d;
5828 /* The following page contains abstract data `bundle states' which are
5829 used for bundling insns (inserting nops and template generation). */
5831 /* The following describes state of insn bundling. */
5835 /* Unique bundle state number to identify them in the debugging
5838 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
5839 /* number nops before and after the insn */
5840 short before_nops_num, after_nops_num;
5841 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
5843 int cost; /* cost of the state in cycles */
5844 int accumulated_insns_num; /* number of all previous insns including
5845 nops. L is considered as 2 insns */
5846 int branch_deviation; /* deviation of previous branches from 3rd slots */
5847 struct bundle_state *next; /* next state with the same insn_num */
5848 struct bundle_state *originator; /* originator (previous insn state) */
5849 /* All bundle states are in the following chain. */
5850 struct bundle_state *allocated_states_chain;
5851 /* The DFA State after issuing the insn and the nops. */
5855 /* The following is map insn number to the corresponding bundle state. */
5857 static struct bundle_state **index_to_bundle_states;
5859 /* The unique number of next bundle state. */
5861 static int bundle_states_num;
5863 /* All allocated bundle states are in the following chain. */
5865 static struct bundle_state *allocated_bundle_states_chain;
5867 /* All allocated but not used bundle states are in the following
5870 static struct bundle_state *free_bundle_state_chain;
5873 /* The following function returns a free bundle state. */
5875 static struct bundle_state *
5876 get_free_bundle_state (void)
5878 struct bundle_state *result;
5880 if (free_bundle_state_chain != NULL)
5882 result = free_bundle_state_chain;
5883 free_bundle_state_chain = result->next;
5887 result = xmalloc (sizeof (struct bundle_state));
5888 result->dfa_state = xmalloc (dfa_state_size);
5889 result->allocated_states_chain = allocated_bundle_states_chain;
5890 allocated_bundle_states_chain = result;
5892 result->unique_num = bundle_states_num++;
5897 /* The following function frees given bundle state. */
5900 free_bundle_state (struct bundle_state *state)
5902 state->next = free_bundle_state_chain;
5903 free_bundle_state_chain = state;
5906 /* Start work with abstract data `bundle states'. */
5909 initiate_bundle_states (void)
5911 bundle_states_num = 0;
5912 free_bundle_state_chain = NULL;
5913 allocated_bundle_states_chain = NULL;
5916 /* Finish work with abstract data `bundle states'. */
5919 finish_bundle_states (void)
5921 struct bundle_state *curr_state, *next_state;
5923 for (curr_state = allocated_bundle_states_chain;
5925 curr_state = next_state)
5927 next_state = curr_state->allocated_states_chain;
5928 free (curr_state->dfa_state);
5933 /* Hash table of the bundle states. The key is dfa_state and insn_num
5934 of the bundle states. */
5936 static htab_t bundle_state_table;
5938 /* The function returns hash of BUNDLE_STATE. */
5941 bundle_state_hash (const void *bundle_state)
5943 const struct bundle_state *state = (struct bundle_state *) bundle_state;
5946 for (result = i = 0; i < dfa_state_size; i++)
5947 result += (((unsigned char *) state->dfa_state) [i]
5948 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
5949 return result + state->insn_num;
5952 /* The function returns nonzero if the bundle state keys are equal. */
5955 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
5957 const struct bundle_state * state1 = (struct bundle_state *) bundle_state_1;
5958 const struct bundle_state * state2 = (struct bundle_state *) bundle_state_2;
5960 return (state1->insn_num == state2->insn_num
5961 && memcmp (state1->dfa_state, state2->dfa_state,
5962 dfa_state_size) == 0);
5965 /* The function inserts the BUNDLE_STATE into the hash table. The
5966 function returns nonzero if the bundle has been inserted into the
5967 table. The table contains the best bundle state with given key. */
5970 insert_bundle_state (struct bundle_state *bundle_state)
5974 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
5975 if (*entry_ptr == NULL)
5977 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
5978 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
5979 *entry_ptr = (void *) bundle_state;
5982 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
5983 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
5984 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
5985 > bundle_state->accumulated_insns_num
5986 || (((struct bundle_state *)
5987 *entry_ptr)->accumulated_insns_num
5988 == bundle_state->accumulated_insns_num
5989 && ((struct bundle_state *)
5990 *entry_ptr)->branch_deviation
5991 > bundle_state->branch_deviation))))
5994 struct bundle_state temp;
5996 temp = *(struct bundle_state *) *entry_ptr;
5997 *(struct bundle_state *) *entry_ptr = *bundle_state;
5998 ((struct bundle_state *) *entry_ptr)->next = temp.next;
5999 *bundle_state = temp;
6004 /* Start work with the hash table. */
6007 initiate_bundle_state_table (void)
6009 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
6013 /* Finish work with the hash table. */
6016 finish_bundle_state_table (void)
6018 htab_delete (bundle_state_table);
6023 /* The following variable is a insn `nop' used to check bundle states
6024 with different number of inserted nops. */
6026 static rtx ia64_nop;
6028 /* The following function tries to issue NOPS_NUM nops for the current
6029 state without advancing processor cycle. If it failed, the
6030 function returns FALSE and frees the current state. */
6033 try_issue_nops (struct bundle_state *curr_state, int nops_num)
6037 for (i = 0; i < nops_num; i++)
6038 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
6040 free_bundle_state (curr_state);
6046 /* The following function tries to issue INSN for the current
6047 state without advancing processor cycle. If it failed, the
6048 function returns FALSE and frees the current state. */
6051 try_issue_insn (struct bundle_state *curr_state, rtx insn)
6053 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
6055 free_bundle_state (curr_state);
6061 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
6062 starting with ORIGINATOR without advancing processor cycle. If
6063 TRY_BUNDLE_END_P is TRUE, the function also/only (if
6064 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
6065 If it was successful, the function creates new bundle state and
6066 insert into the hash table and into `index_to_bundle_states'. */
6069 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
6070 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
6072 struct bundle_state *curr_state;
6074 curr_state = get_free_bundle_state ();
6075 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
6076 curr_state->insn = insn;
6077 curr_state->insn_num = originator->insn_num + 1;
6078 curr_state->cost = originator->cost;
6079 curr_state->originator = originator;
6080 curr_state->before_nops_num = before_nops_num;
6081 curr_state->after_nops_num = 0;
6082 curr_state->accumulated_insns_num
6083 = originator->accumulated_insns_num + before_nops_num;
6084 curr_state->branch_deviation = originator->branch_deviation;
6085 if (insn == NULL_RTX)
6087 else if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
6089 if (GET_MODE (insn) == TImode)
6091 if (!try_issue_nops (curr_state, before_nops_num))
6093 if (!try_issue_insn (curr_state, insn))
6095 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
6096 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
6097 && curr_state->accumulated_insns_num % 3 != 0)
6099 free_bundle_state (curr_state);
6103 else if (GET_MODE (insn) != TImode)
6105 if (!try_issue_nops (curr_state, before_nops_num))
6107 if (!try_issue_insn (curr_state, insn))
6109 curr_state->accumulated_insns_num++;
6110 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6111 || asm_noperands (PATTERN (insn)) >= 0)
6113 if (ia64_safe_type (insn) == TYPE_L)
6114 curr_state->accumulated_insns_num++;
6118 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
6119 state_transition (curr_state->dfa_state, NULL);
6121 if (!try_issue_nops (curr_state, before_nops_num))
6123 if (!try_issue_insn (curr_state, insn))
6125 curr_state->accumulated_insns_num++;
6126 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6127 || asm_noperands (PATTERN (insn)) >= 0)
6129 /* Finish bundle containing asm insn. */
6130 curr_state->after_nops_num
6131 = 3 - curr_state->accumulated_insns_num % 3;
6132 curr_state->accumulated_insns_num
6133 += 3 - curr_state->accumulated_insns_num % 3;
6135 else if (ia64_safe_type (insn) == TYPE_L)
6136 curr_state->accumulated_insns_num++;
6138 if (ia64_safe_type (insn) == TYPE_B)
6139 curr_state->branch_deviation
6140 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
6141 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
6143 if (!only_bundle_end_p && insert_bundle_state (curr_state))
6146 struct bundle_state *curr_state1;
6147 struct bundle_state *allocated_states_chain;
6149 curr_state1 = get_free_bundle_state ();
6150 dfa_state = curr_state1->dfa_state;
6151 allocated_states_chain = curr_state1->allocated_states_chain;
6152 *curr_state1 = *curr_state;
6153 curr_state1->dfa_state = dfa_state;
6154 curr_state1->allocated_states_chain = allocated_states_chain;
6155 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
6157 curr_state = curr_state1;
6159 if (!try_issue_nops (curr_state,
6160 3 - curr_state->accumulated_insns_num % 3))
6162 curr_state->after_nops_num
6163 = 3 - curr_state->accumulated_insns_num % 3;
6164 curr_state->accumulated_insns_num
6165 += 3 - curr_state->accumulated_insns_num % 3;
6167 if (!insert_bundle_state (curr_state))
6168 free_bundle_state (curr_state);
6172 /* The following function returns position in the two window bundle
6176 get_max_pos (state_t state)
6178 if (cpu_unit_reservation_p (state, pos_6))
6180 else if (cpu_unit_reservation_p (state, pos_5))
6182 else if (cpu_unit_reservation_p (state, pos_4))
6184 else if (cpu_unit_reservation_p (state, pos_3))
6186 else if (cpu_unit_reservation_p (state, pos_2))
6188 else if (cpu_unit_reservation_p (state, pos_1))
6194 /* The function returns code of a possible template for given position
6195 and state. The function should be called only with 2 values of
6196 position equal to 3 or 6. */
6199 get_template (state_t state, int pos)
6204 if (cpu_unit_reservation_p (state, _0mii_))
6206 else if (cpu_unit_reservation_p (state, _0mmi_))
6208 else if (cpu_unit_reservation_p (state, _0mfi_))
6210 else if (cpu_unit_reservation_p (state, _0mmf_))
6212 else if (cpu_unit_reservation_p (state, _0bbb_))
6214 else if (cpu_unit_reservation_p (state, _0mbb_))
6216 else if (cpu_unit_reservation_p (state, _0mib_))
6218 else if (cpu_unit_reservation_p (state, _0mmb_))
6220 else if (cpu_unit_reservation_p (state, _0mfb_))
6222 else if (cpu_unit_reservation_p (state, _0mlx_))
6227 if (cpu_unit_reservation_p (state, _1mii_))
6229 else if (cpu_unit_reservation_p (state, _1mmi_))
6231 else if (cpu_unit_reservation_p (state, _1mfi_))
6233 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
6235 else if (cpu_unit_reservation_p (state, _1bbb_))
6237 else if (cpu_unit_reservation_p (state, _1mbb_))
6239 else if (cpu_unit_reservation_p (state, _1mib_))
6241 else if (cpu_unit_reservation_p (state, _1mmb_))
6243 else if (cpu_unit_reservation_p (state, _1mfb_))
6245 else if (cpu_unit_reservation_p (state, _1mlx_))
6254 /* The following function returns an insn important for insn bundling
6255 followed by INSN and before TAIL. */
6258 get_next_important_insn (rtx insn, rtx tail)
6260 for (; insn && insn != tail; insn = NEXT_INSN (insn))
6262 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6263 && GET_CODE (PATTERN (insn)) != USE
6264 && GET_CODE (PATTERN (insn)) != CLOBBER)
6269 /* The following function does insn bundling. Bundling means
6270 inserting templates and nop insns to fit insn groups into permitted
6271 templates. Instruction scheduling uses NDFA (non-deterministic
6272 finite automata) encoding informations about the templates and the
6273 inserted nops. Nondeterminism of the automata permits follows
6274 all possible insn sequences very fast.
6276 Unfortunately it is not possible to get information about inserting
6277 nop insns and used templates from the automata states. The
6278 automata only says that we can issue an insn possibly inserting
6279 some nops before it and using some template. Therefore insn
6280 bundling in this function is implemented by using DFA
6281 (deterministic finite automata). We follows all possible insn
6282 sequences by inserting 0-2 nops (that is what the NDFA describe for
6283 insn scheduling) before/after each insn being bundled. We know the
6284 start of simulated processor cycle from insn scheduling (insn
6285 starting a new cycle has TImode).
6287 Simple implementation of insn bundling would create enormous
6288 number of possible insn sequences satisfying information about new
6289 cycle ticks taken from the insn scheduling. To make the algorithm
6290 practical we use dynamic programming. Each decision (about
6291 inserting nops and implicitly about previous decisions) is described
6292 by structure bundle_state (see above). If we generate the same
6293 bundle state (key is automaton state after issuing the insns and
6294 nops for it), we reuse already generated one. As consequence we
6295 reject some decisions which cannot improve the solution and
6296 reduce memory for the algorithm.
6298 When we reach the end of EBB (extended basic block), we choose the
6299 best sequence and then, moving back in EBB, insert templates for
6300 the best alternative. The templates are taken from querying
6301 automaton state for each insn in chosen bundle states.
6303 So the algorithm makes two (forward and backward) passes through
6304 EBB. There is an additional forward pass through EBB for Itanium1
6305 processor. This pass inserts more nops to make dependency between
6306 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
6309 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
6311 struct bundle_state *curr_state, *next_state, *best_state;
6312 rtx insn, next_insn;
6314 int i, bundle_end_p, only_bundle_end_p, asm_p;
6315 int pos = 0, max_pos, template0, template1;
6318 enum attr_type type;
6321 /* Count insns in the EBB. */
6322 for (insn = NEXT_INSN (prev_head_insn);
6323 insn && insn != tail;
6324 insn = NEXT_INSN (insn))
6330 dfa_clean_insn_cache ();
6331 initiate_bundle_state_table ();
6332 index_to_bundle_states = xmalloc ((insn_num + 2)
6333 * sizeof (struct bundle_state *));
6334 /* First (forward) pass -- generation of bundle states. */
6335 curr_state = get_free_bundle_state ();
6336 curr_state->insn = NULL;
6337 curr_state->before_nops_num = 0;
6338 curr_state->after_nops_num = 0;
6339 curr_state->insn_num = 0;
6340 curr_state->cost = 0;
6341 curr_state->accumulated_insns_num = 0;
6342 curr_state->branch_deviation = 0;
6343 curr_state->next = NULL;
6344 curr_state->originator = NULL;
6345 state_reset (curr_state->dfa_state);
6346 index_to_bundle_states [0] = curr_state;
6348 /* Shift cycle mark if it is put on insn which could be ignored. */
6349 for (insn = NEXT_INSN (prev_head_insn);
6351 insn = NEXT_INSN (insn))
6353 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6354 || GET_CODE (PATTERN (insn)) == USE
6355 || GET_CODE (PATTERN (insn)) == CLOBBER)
6356 && GET_MODE (insn) == TImode)
6358 PUT_MODE (insn, VOIDmode);
6359 for (next_insn = NEXT_INSN (insn);
6361 next_insn = NEXT_INSN (next_insn))
6362 if (INSN_P (next_insn)
6363 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
6364 && GET_CODE (PATTERN (next_insn)) != USE
6365 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
6367 PUT_MODE (next_insn, TImode);
6371 /* Froward pass: generation of bundle states. */
6372 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
6377 || ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6378 || GET_CODE (PATTERN (insn)) == USE
6379 || GET_CODE (PATTERN (insn)) == CLOBBER)
6381 type = ia64_safe_type (insn);
6382 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
6384 index_to_bundle_states [insn_num] = NULL;
6385 for (curr_state = index_to_bundle_states [insn_num - 1];
6387 curr_state = next_state)
6389 pos = curr_state->accumulated_insns_num % 3;
6390 next_state = curr_state->next;
6391 /* We must fill up the current bundle in order to start a
6392 subsequent asm insn in a new bundle. Asm insn is always
6393 placed in a separate bundle. */
6395 = (next_insn != NULL_RTX
6396 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
6397 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
6398 /* We may fill up the current bundle if it is the cycle end
6399 without a group barrier. */
6401 = (only_bundle_end_p || next_insn == NULL_RTX
6402 || (GET_MODE (next_insn) == TImode
6403 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
6404 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
6406 /* We need to insert 2 nops for cases like M_MII. To
6407 guarantee issuing all insns on the same cycle for
6408 Itanium 1, we need to issue 2 nops after the first M
6409 insn (MnnMII where n is a nop insn). */
6410 || ((type == TYPE_M || type == TYPE_A)
6411 && ia64_tune == PROCESSOR_ITANIUM
6412 && !bundle_end_p && pos == 1))
6413 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
6415 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
6417 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
6420 if (index_to_bundle_states [insn_num] == NULL)
6422 for (curr_state = index_to_bundle_states [insn_num];
6424 curr_state = curr_state->next)
6425 if (verbose >= 2 && dump)
6427 /* This structure is taken from generated code of the
6428 pipeline hazard recognizer (see file insn-attrtab.c).
6429 Please don't forget to change the structure if a new
6430 automaton is added to .md file. */
6433 unsigned short one_automaton_state;
6434 unsigned short oneb_automaton_state;
6435 unsigned short two_automaton_state;
6436 unsigned short twob_automaton_state;
6441 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
6442 curr_state->unique_num,
6443 (curr_state->originator == NULL
6444 ? -1 : curr_state->originator->unique_num),
6446 curr_state->before_nops_num, curr_state->after_nops_num,
6447 curr_state->accumulated_insns_num, curr_state->branch_deviation,
6448 (ia64_tune == PROCESSOR_ITANIUM
6449 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
6450 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
6454 if (index_to_bundle_states [insn_num] == NULL)
6455 /* We should find a solution because the 2nd insn scheduling has
6458 /* Find a state corresponding to the best insn sequence. */
6460 for (curr_state = index_to_bundle_states [insn_num];
6462 curr_state = curr_state->next)
6463 /* We are just looking at the states with fully filled up last
6464 bundle. The first we prefer insn sequences with minimal cost
6465 then with minimal inserted nops and finally with branch insns
6466 placed in the 3rd slots. */
6467 if (curr_state->accumulated_insns_num % 3 == 0
6468 && (best_state == NULL || best_state->cost > curr_state->cost
6469 || (best_state->cost == curr_state->cost
6470 && (curr_state->accumulated_insns_num
6471 < best_state->accumulated_insns_num
6472 || (curr_state->accumulated_insns_num
6473 == best_state->accumulated_insns_num
6474 && curr_state->branch_deviation
6475 < best_state->branch_deviation)))))
6476 best_state = curr_state;
6477 /* Second (backward) pass: adding nops and templates. */
6478 insn_num = best_state->before_nops_num;
6479 template0 = template1 = -1;
6480 for (curr_state = best_state;
6481 curr_state->originator != NULL;
6482 curr_state = curr_state->originator)
6484 insn = curr_state->insn;
6485 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
6486 || asm_noperands (PATTERN (insn)) >= 0);
6488 if (verbose >= 2 && dump)
6492 unsigned short one_automaton_state;
6493 unsigned short oneb_automaton_state;
6494 unsigned short two_automaton_state;
6495 unsigned short twob_automaton_state;
6500 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
6501 curr_state->unique_num,
6502 (curr_state->originator == NULL
6503 ? -1 : curr_state->originator->unique_num),
6505 curr_state->before_nops_num, curr_state->after_nops_num,
6506 curr_state->accumulated_insns_num, curr_state->branch_deviation,
6507 (ia64_tune == PROCESSOR_ITANIUM
6508 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
6509 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
6512 /* Find the position in the current bundle window. The window can
6513 contain at most two bundles. Two bundle window means that
6514 the processor will make two bundle rotation. */
6515 max_pos = get_max_pos (curr_state->dfa_state);
6517 /* The following (negative template number) means that the
6518 processor did one bundle rotation. */
6519 || (max_pos == 3 && template0 < 0))
6521 /* We are at the end of the window -- find template(s) for
6525 template0 = get_template (curr_state->dfa_state, 3);
6528 template1 = get_template (curr_state->dfa_state, 3);
6529 template0 = get_template (curr_state->dfa_state, 6);
6532 if (max_pos > 3 && template1 < 0)
6533 /* It may happen when we have the stop inside a bundle. */
6537 template1 = get_template (curr_state->dfa_state, 3);
6541 /* Emit nops after the current insn. */
6542 for (i = 0; i < curr_state->after_nops_num; i++)
6545 emit_insn_after (nop, insn);
6551 /* We are at the start of a bundle: emit the template
6552 (it should be defined). */
6555 b = gen_bundle_selector (GEN_INT (template0));
6556 ia64_emit_insn_before (b, nop);
6557 /* If we have two bundle window, we make one bundle
6558 rotation. Otherwise template0 will be undefined
6559 (negative value). */
6560 template0 = template1;
6564 /* Move the position backward in the window. Group barrier has
6565 no slot. Asm insn takes all bundle. */
6566 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
6567 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6568 && asm_noperands (PATTERN (insn)) < 0)
6570 /* Long insn takes 2 slots. */
6571 if (ia64_safe_type (insn) == TYPE_L)
6576 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
6577 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6578 && asm_noperands (PATTERN (insn)) < 0)
6580 /* The current insn is at the bundle start: emit the
6584 b = gen_bundle_selector (GEN_INT (template0));
6585 ia64_emit_insn_before (b, insn);
6586 b = PREV_INSN (insn);
6588 /* See comment above in analogous place for emitting nops
6590 template0 = template1;
6593 /* Emit nops after the current insn. */
6594 for (i = 0; i < curr_state->before_nops_num; i++)
6597 ia64_emit_insn_before (nop, insn);
6598 nop = PREV_INSN (insn);
6605 /* See comment above in analogous place for emitting nops
6609 b = gen_bundle_selector (GEN_INT (template0));
6610 ia64_emit_insn_before (b, insn);
6611 b = PREV_INSN (insn);
6613 template0 = template1;
6618 if (ia64_tune == PROCESSOR_ITANIUM)
6619 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
6620 Itanium1 has a strange design, if the distance between an insn
6621 and dependent MM-insn is less 4 then we have a 6 additional
6622 cycles stall. So we make the distance equal to 4 cycles if it
6624 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
6629 || ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6630 || GET_CODE (PATTERN (insn)) == USE
6631 || GET_CODE (PATTERN (insn)) == CLOBBER)
6633 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
6634 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
6635 /* We found a MM-insn which needs additional cycles. */
6641 /* Now we are searching for a template of the bundle in
6642 which the MM-insn is placed and the position of the
6643 insn in the bundle (0, 1, 2). Also we are searching
6644 for that there is a stop before the insn. */
6645 last = prev_active_insn (insn);
6646 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
6648 last = prev_active_insn (last);
6650 for (;; last = prev_active_insn (last))
6651 if (recog_memoized (last) == CODE_FOR_bundle_selector)
6653 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
6655 /* The insn is in MLX bundle. Change the template
6656 onto MFI because we will add nops before the
6657 insn. It simplifies subsequent code a lot. */
6659 = gen_bundle_selector (const2_rtx); /* -> MFI */
6662 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier
6663 && (ia64_safe_itanium_class (last)
6664 != ITANIUM_CLASS_IGNORE))
6666 /* Some check of correctness: the stop is not at the
6667 bundle start, there are no more 3 insns in the bundle,
6668 and the MM-insn is not at the start of bundle with
6670 if ((pred_stop_p && n == 0) || n > 2
6671 || (template0 == 9 && n != 0))
6673 /* Put nops after the insn in the bundle. */
6674 for (j = 3 - n; j > 0; j --)
6675 ia64_emit_insn_before (gen_nop (), insn);
6676 /* It takes into account that we will add more N nops
6677 before the insn lately -- please see code below. */
6678 add_cycles [INSN_UID (insn)]--;
6679 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
6680 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6683 add_cycles [INSN_UID (insn)]--;
6684 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
6686 /* Insert "MII;" template. */
6687 ia64_emit_insn_before (gen_bundle_selector (const0_rtx),
6689 ia64_emit_insn_before (gen_nop (), insn);
6690 ia64_emit_insn_before (gen_nop (), insn);
6693 /* To decrease code size, we use "MI;I;"
6695 ia64_emit_insn_before
6696 (gen_insn_group_barrier (GEN_INT (3)), insn);
6699 ia64_emit_insn_before (gen_nop (), insn);
6700 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6703 /* Put the MM-insn in the same slot of a bundle with the
6704 same template as the original one. */
6705 ia64_emit_insn_before (gen_bundle_selector (GEN_INT (template0)),
6707 /* To put the insn in the same slot, add necessary number
6709 for (j = n; j > 0; j --)
6710 ia64_emit_insn_before (gen_nop (), insn);
6711 /* Put the stop if the original bundle had it. */
6713 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6717 free (index_to_bundle_states);
6718 finish_bundle_state_table ();
6720 dfa_clean_insn_cache ();
6723 /* The following function is called at the end of scheduling BB or
6724 EBB. After reload, it inserts stop bits and does insn bundling. */
6727 ia64_sched_finish (FILE *dump, int sched_verbose)
6730 fprintf (dump, "// Finishing schedule.\n");
6731 if (!reload_completed)
6733 if (reload_completed)
6735 final_emit_insn_group_barriers (dump);
6736 bundling (dump, sched_verbose, current_sched_info->prev_head,
6737 current_sched_info->next_tail);
6738 if (sched_verbose && dump)
6739 fprintf (dump, "// finishing %d-%d\n",
6740 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
6741 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
6747 /* The following function inserts stop bits in scheduled BB or EBB. */
6750 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6753 int need_barrier_p = 0;
6754 rtx prev_insn = NULL_RTX;
6756 init_insn_group_barriers ();
6758 for (insn = NEXT_INSN (current_sched_info->prev_head);
6759 insn != current_sched_info->next_tail;
6760 insn = NEXT_INSN (insn))
6762 if (GET_CODE (insn) == BARRIER)
6764 rtx last = prev_active_insn (insn);
6768 if (GET_CODE (last) == JUMP_INSN
6769 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6770 last = prev_active_insn (last);
6771 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6772 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6774 init_insn_group_barriers ();
6776 prev_insn = NULL_RTX;
6778 else if (INSN_P (insn))
6780 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6782 init_insn_group_barriers ();
6784 prev_insn = NULL_RTX;
6786 else if (need_barrier_p || group_barrier_needed_p (insn))
6788 if (TARGET_EARLY_STOP_BITS)
6793 last != current_sched_info->prev_head;
6794 last = PREV_INSN (last))
6795 if (INSN_P (last) && GET_MODE (last) == TImode
6796 && stops_p [INSN_UID (last)])
6798 if (last == current_sched_info->prev_head)
6800 last = prev_active_insn (last);
6802 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
6803 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
6805 init_insn_group_barriers ();
6806 for (last = NEXT_INSN (last);
6808 last = NEXT_INSN (last))
6810 group_barrier_needed_p (last);
6814 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6816 init_insn_group_barriers ();
6818 group_barrier_needed_p (insn);
6819 prev_insn = NULL_RTX;
6821 else if (recog_memoized (insn) >= 0)
6823 need_barrier_p = (GET_CODE (insn) == CALL_INSN
6824 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6825 || asm_noperands (PATTERN (insn)) >= 0);
6832 /* If the following function returns TRUE, we will use the the DFA
6836 ia64_first_cycle_multipass_dfa_lookahead (void)
6838 return (reload_completed ? 6 : 4);
6841 /* The following function initiates variable `dfa_pre_cycle_insn'. */
6844 ia64_init_dfa_pre_cycle_insn (void)
6846 if (temp_dfa_state == NULL)
6848 dfa_state_size = state_size ();
6849 temp_dfa_state = xmalloc (dfa_state_size);
6850 prev_cycle_state = xmalloc (dfa_state_size);
6852 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
6853 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
6854 recog_memoized (dfa_pre_cycle_insn);
6855 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
6856 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
6857 recog_memoized (dfa_stop_insn);
6860 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
6861 used by the DFA insn scheduler. */
6864 ia64_dfa_pre_cycle_insn (void)
6866 return dfa_pre_cycle_insn;
6869 /* The following function returns TRUE if PRODUCER (of type ilog or
6870 ld) produces address for CONSUMER (of type st or stf). */
6873 ia64_st_address_bypass_p (rtx producer, rtx consumer)
6877 if (producer == NULL_RTX || consumer == NULL_RTX)
6879 dest = ia64_single_set (producer);
6880 if (dest == NULL_RTX || (reg = SET_DEST (dest)) == NULL_RTX
6881 || (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG))
6883 if (GET_CODE (reg) == SUBREG)
6884 reg = SUBREG_REG (reg);
6885 dest = ia64_single_set (consumer);
6886 if (dest == NULL_RTX || (mem = SET_DEST (dest)) == NULL_RTX
6887 || GET_CODE (mem) != MEM)
6889 return reg_mentioned_p (reg, mem);
6892 /* The following function returns TRUE if PRODUCER (of type ilog or
6893 ld) produces address for CONSUMER (of type ld or fld). */
6896 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
6898 rtx dest, src, reg, mem;
6900 if (producer == NULL_RTX || consumer == NULL_RTX)
6902 dest = ia64_single_set (producer);
6903 if (dest == NULL_RTX || (reg = SET_DEST (dest)) == NULL_RTX
6904 || (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG))
6906 if (GET_CODE (reg) == SUBREG)
6907 reg = SUBREG_REG (reg);
6908 src = ia64_single_set (consumer);
6909 if (src == NULL_RTX || (mem = SET_SRC (src)) == NULL_RTX)
6911 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
6912 mem = XVECEXP (mem, 0, 0);
6913 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
6914 mem = XEXP (mem, 0);
6916 /* Note that LO_SUM is used for GOT loads. */
6917 if (GET_CODE (mem) != LO_SUM && GET_CODE (mem) != MEM)
6920 return reg_mentioned_p (reg, mem);
6923 /* The following function returns TRUE if INSN produces address for a
6924 load/store insn. We will place such insns into M slot because it
6925 decreases its latency time. */
6928 ia64_produce_address_p (rtx insn)
6934 /* Emit pseudo-ops for the assembler to describe predicate relations.
6935 At present this assumes that we only consider predicate pairs to
6936 be mutex, and that the assembler can deduce proper values from
6937 straight-line code. */
6940 emit_predicate_relation_info (void)
6944 FOR_EACH_BB_REVERSE (bb)
6947 rtx head = BB_HEAD (bb);
6949 /* We only need such notes at code labels. */
6950 if (GET_CODE (head) != CODE_LABEL)
6952 if (GET_CODE (NEXT_INSN (head)) == NOTE
6953 && NOTE_LINE_NUMBER (NEXT_INSN (head)) == NOTE_INSN_BASIC_BLOCK)
6954 head = NEXT_INSN (head);
6956 for (r = PR_REG (0); r < PR_REG (64); r += 2)
6957 if (REGNO_REG_SET_P (bb->global_live_at_start, r))
6959 rtx p = gen_rtx_REG (BImode, r);
6960 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
6961 if (head == BB_END (bb))
6967 /* Look for conditional calls that do not return, and protect predicate
6968 relations around them. Otherwise the assembler will assume the call
6969 returns, and complain about uses of call-clobbered predicates after
6971 FOR_EACH_BB_REVERSE (bb)
6973 rtx insn = BB_HEAD (bb);
6977 if (GET_CODE (insn) == CALL_INSN
6978 && GET_CODE (PATTERN (insn)) == COND_EXEC
6979 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
6981 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
6982 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
6983 if (BB_HEAD (bb) == insn)
6985 if (BB_END (bb) == insn)
6989 if (insn == BB_END (bb))
6991 insn = NEXT_INSN (insn);
6996 /* Perform machine dependent operations on the rtl chain INSNS. */
7001 /* We are freeing block_for_insn in the toplev to keep compatibility
7002 with old MDEP_REORGS that are not CFG based. Recompute it now. */
7003 compute_bb_for_insn ();
7005 /* If optimizing, we'll have split before scheduling. */
7007 split_all_insns (0);
7009 /* ??? update_life_info_in_dirty_blocks fails to terminate during
7010 non-optimizing bootstrap. */
7011 update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES);
7013 if (ia64_flag_schedule_insns2)
7015 timevar_push (TV_SCHED2);
7016 ia64_final_schedule = 1;
7018 initiate_bundle_states ();
7019 ia64_nop = make_insn_raw (gen_nop ());
7020 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
7021 recog_memoized (ia64_nop);
7022 clocks_length = get_max_uid () + 1;
7023 stops_p = xcalloc (1, clocks_length);
7024 if (ia64_tune == PROCESSOR_ITANIUM)
7026 clocks = xcalloc (clocks_length, sizeof (int));
7027 add_cycles = xcalloc (clocks_length, sizeof (int));
7029 if (ia64_tune == PROCESSOR_ITANIUM2)
7031 pos_1 = get_cpu_unit_code ("2_1");
7032 pos_2 = get_cpu_unit_code ("2_2");
7033 pos_3 = get_cpu_unit_code ("2_3");
7034 pos_4 = get_cpu_unit_code ("2_4");
7035 pos_5 = get_cpu_unit_code ("2_5");
7036 pos_6 = get_cpu_unit_code ("2_6");
7037 _0mii_ = get_cpu_unit_code ("2b_0mii.");
7038 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
7039 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
7040 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
7041 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
7042 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
7043 _0mib_ = get_cpu_unit_code ("2b_0mib.");
7044 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
7045 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
7046 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
7047 _1mii_ = get_cpu_unit_code ("2b_1mii.");
7048 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
7049 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
7050 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
7051 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
7052 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
7053 _1mib_ = get_cpu_unit_code ("2b_1mib.");
7054 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
7055 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
7056 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
7060 pos_1 = get_cpu_unit_code ("1_1");
7061 pos_2 = get_cpu_unit_code ("1_2");
7062 pos_3 = get_cpu_unit_code ("1_3");
7063 pos_4 = get_cpu_unit_code ("1_4");
7064 pos_5 = get_cpu_unit_code ("1_5");
7065 pos_6 = get_cpu_unit_code ("1_6");
7066 _0mii_ = get_cpu_unit_code ("1b_0mii.");
7067 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
7068 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
7069 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
7070 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
7071 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
7072 _0mib_ = get_cpu_unit_code ("1b_0mib.");
7073 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
7074 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
7075 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
7076 _1mii_ = get_cpu_unit_code ("1b_1mii.");
7077 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
7078 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
7079 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
7080 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
7081 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
7082 _1mib_ = get_cpu_unit_code ("1b_1mib.");
7083 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
7084 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
7085 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
7087 schedule_ebbs (dump_file);
7088 finish_bundle_states ();
7089 if (ia64_tune == PROCESSOR_ITANIUM)
7095 emit_insn_group_barriers (dump_file);
7097 ia64_final_schedule = 0;
7098 timevar_pop (TV_SCHED2);
7101 emit_all_insn_group_barriers (dump_file);
7103 /* A call must not be the last instruction in a function, so that the
7104 return address is still within the function, so that unwinding works
7105 properly. Note that IA-64 differs from dwarf2 on this point. */
7106 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7111 insn = get_last_insn ();
7112 if (! INSN_P (insn))
7113 insn = prev_active_insn (insn);
7114 /* Skip over insns that expand to nothing. */
7115 while (GET_CODE (insn) == INSN && get_attr_empty (insn) == EMPTY_YES)
7117 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
7118 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
7120 insn = prev_active_insn (insn);
7122 if (GET_CODE (insn) == CALL_INSN)
7125 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7126 emit_insn (gen_break_f ());
7127 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7132 emit_predicate_relation_info ();
7134 if (ia64_flag_var_tracking)
7136 timevar_push (TV_VAR_TRACKING);
7137 variable_tracking_main ();
7138 timevar_pop (TV_VAR_TRACKING);
7142 /* Return true if REGNO is used by the epilogue. */
7145 ia64_epilogue_uses (int regno)
7150 /* With a call to a function in another module, we will write a new
7151 value to "gp". After returning from such a call, we need to make
7152 sure the function restores the original gp-value, even if the
7153 function itself does not use the gp anymore. */
7154 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
7156 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
7157 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
7158 /* For functions defined with the syscall_linkage attribute, all
7159 input registers are marked as live at all function exits. This
7160 prevents the register allocator from using the input registers,
7161 which in turn makes it possible to restart a system call after
7162 an interrupt without having to save/restore the input registers.
7163 This also prevents kernel data from leaking to application code. */
7164 return lookup_attribute ("syscall_linkage",
7165 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
7168 /* Conditional return patterns can't represent the use of `b0' as
7169 the return address, so we force the value live this way. */
7173 /* Likewise for ar.pfs, which is used by br.ret. */
7181 /* Return true if REGNO is used by the frame unwinder. */
7184 ia64_eh_uses (int regno)
7186 if (! reload_completed)
7189 if (current_frame_info.reg_save_b0
7190 && regno == current_frame_info.reg_save_b0)
7192 if (current_frame_info.reg_save_pr
7193 && regno == current_frame_info.reg_save_pr)
7195 if (current_frame_info.reg_save_ar_pfs
7196 && regno == current_frame_info.reg_save_ar_pfs)
7198 if (current_frame_info.reg_save_ar_unat
7199 && regno == current_frame_info.reg_save_ar_unat)
7201 if (current_frame_info.reg_save_ar_lc
7202 && regno == current_frame_info.reg_save_ar_lc)
7208 /* Return true if this goes in small data/bss. */
7210 /* ??? We could also support own long data here. Generating movl/add/ld8
7211 instead of addl,ld8/ld8. This makes the code bigger, but should make the
7212 code faster because there is one less load. This also includes incomplete
7213 types which can't go in sdata/sbss. */
7216 ia64_in_small_data_p (tree exp)
7218 if (TARGET_NO_SDATA)
7221 /* We want to merge strings, so we never consider them small data. */
7222 if (TREE_CODE (exp) == STRING_CST)
7225 /* Functions are never small data. */
7226 if (TREE_CODE (exp) == FUNCTION_DECL)
7229 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
7231 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
7232 if (strcmp (section, ".sdata") == 0
7233 || strcmp (section, ".sbss") == 0)
7238 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
7240 /* If this is an incomplete type with size 0, then we can't put it
7241 in sdata because it might be too big when completed. */
7242 if (size > 0 && size <= ia64_section_threshold)
7249 /* Output assembly directives for prologue regions. */
7251 /* The current basic block number. */
7253 static bool last_block;
7255 /* True if we need a copy_state command at the start of the next block. */
7257 static bool need_copy_state;
7259 /* The function emits unwind directives for the start of an epilogue. */
7262 process_epilogue (void)
7264 /* If this isn't the last block of the function, then we need to label the
7265 current state, and copy it back in at the start of the next block. */
7269 fprintf (asm_out_file, "\t.label_state 1\n");
7270 need_copy_state = true;
7273 fprintf (asm_out_file, "\t.restore sp\n");
7276 /* This function processes a SET pattern looking for specific patterns
7277 which result in emitting an assembly directive required for unwinding. */
7280 process_set (FILE *asm_out_file, rtx pat)
7282 rtx src = SET_SRC (pat);
7283 rtx dest = SET_DEST (pat);
7284 int src_regno, dest_regno;
7286 /* Look for the ALLOC insn. */
7287 if (GET_CODE (src) == UNSPEC_VOLATILE
7288 && XINT (src, 1) == UNSPECV_ALLOC
7289 && GET_CODE (dest) == REG)
7291 dest_regno = REGNO (dest);
7293 /* If this isn't the final destination for ar.pfs, the alloc
7294 shouldn't have been marked frame related. */
7295 if (dest_regno != current_frame_info.reg_save_ar_pfs)
7298 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
7299 ia64_dbx_register_number (dest_regno));
7303 /* Look for SP = .... */
7304 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
7306 if (GET_CODE (src) == PLUS)
7308 rtx op0 = XEXP (src, 0);
7309 rtx op1 = XEXP (src, 1);
7310 if (op0 == dest && GET_CODE (op1) == CONST_INT)
7312 if (INTVAL (op1) < 0)
7313 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
7316 process_epilogue ();
7321 else if (GET_CODE (src) == REG
7322 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
7323 process_epilogue ();
7330 /* Register move we need to look at. */
7331 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
7333 src_regno = REGNO (src);
7334 dest_regno = REGNO (dest);
7339 /* Saving return address pointer. */
7340 if (dest_regno != current_frame_info.reg_save_b0)
7342 fprintf (asm_out_file, "\t.save rp, r%d\n",
7343 ia64_dbx_register_number (dest_regno));
7347 if (dest_regno != current_frame_info.reg_save_pr)
7349 fprintf (asm_out_file, "\t.save pr, r%d\n",
7350 ia64_dbx_register_number (dest_regno));
7353 case AR_UNAT_REGNUM:
7354 if (dest_regno != current_frame_info.reg_save_ar_unat)
7356 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
7357 ia64_dbx_register_number (dest_regno));
7361 if (dest_regno != current_frame_info.reg_save_ar_lc)
7363 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
7364 ia64_dbx_register_number (dest_regno));
7367 case STACK_POINTER_REGNUM:
7368 if (dest_regno != HARD_FRAME_POINTER_REGNUM
7369 || ! frame_pointer_needed)
7371 fprintf (asm_out_file, "\t.vframe r%d\n",
7372 ia64_dbx_register_number (dest_regno));
7376 /* Everything else should indicate being stored to memory. */
7381 /* Memory store we need to look at. */
7382 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
7388 if (GET_CODE (XEXP (dest, 0)) == REG)
7390 base = XEXP (dest, 0);
7393 else if (GET_CODE (XEXP (dest, 0)) == PLUS
7394 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT)
7396 base = XEXP (XEXP (dest, 0), 0);
7397 off = INTVAL (XEXP (XEXP (dest, 0), 1));
7402 if (base == hard_frame_pointer_rtx)
7404 saveop = ".savepsp";
7407 else if (base == stack_pointer_rtx)
7412 src_regno = REGNO (src);
7416 if (current_frame_info.reg_save_b0 != 0)
7418 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
7422 if (current_frame_info.reg_save_pr != 0)
7424 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
7428 if (current_frame_info.reg_save_ar_lc != 0)
7430 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
7434 if (current_frame_info.reg_save_ar_pfs != 0)
7436 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
7439 case AR_UNAT_REGNUM:
7440 if (current_frame_info.reg_save_ar_unat != 0)
7442 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
7449 fprintf (asm_out_file, "\t.save.g 0x%x\n",
7450 1 << (src_regno - GR_REG (4)));
7458 fprintf (asm_out_file, "\t.save.b 0x%x\n",
7459 1 << (src_regno - BR_REG (1)));
7466 fprintf (asm_out_file, "\t.save.f 0x%x\n",
7467 1 << (src_regno - FR_REG (2)));
7470 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
7471 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
7472 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
7473 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
7474 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
7475 1 << (src_regno - FR_REG (12)));
7487 /* This function looks at a single insn and emits any directives
7488 required to unwind this insn. */
7490 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
7492 if (flag_unwind_tables
7493 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7497 if (GET_CODE (insn) == NOTE
7498 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
7500 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
7502 /* Restore unwind state from immediately before the epilogue. */
7503 if (need_copy_state)
7505 fprintf (asm_out_file, "\t.body\n");
7506 fprintf (asm_out_file, "\t.copy_state 1\n");
7507 need_copy_state = false;
7511 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
7514 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
7516 pat = XEXP (pat, 0);
7518 pat = PATTERN (insn);
7520 switch (GET_CODE (pat))
7523 process_set (asm_out_file, pat);
7529 int limit = XVECLEN (pat, 0);
7530 for (par_index = 0; par_index < limit; par_index++)
7532 rtx x = XVECEXP (pat, 0, par_index);
7533 if (GET_CODE (x) == SET)
7534 process_set (asm_out_file, x);
7547 ia64_init_builtins (void)
7549 tree psi_type_node = build_pointer_type (integer_type_node);
7550 tree pdi_type_node = build_pointer_type (long_integer_type_node);
7552 /* __sync_val_compare_and_swap_si, __sync_bool_compare_and_swap_si */
7553 tree si_ftype_psi_si_si
7554 = build_function_type_list (integer_type_node,
7555 psi_type_node, integer_type_node,
7556 integer_type_node, NULL_TREE);
7558 /* __sync_val_compare_and_swap_di */
7559 tree di_ftype_pdi_di_di
7560 = build_function_type_list (long_integer_type_node,
7561 pdi_type_node, long_integer_type_node,
7562 long_integer_type_node, NULL_TREE);
7563 /* __sync_bool_compare_and_swap_di */
7564 tree si_ftype_pdi_di_di
7565 = build_function_type_list (integer_type_node,
7566 pdi_type_node, long_integer_type_node,
7567 long_integer_type_node, NULL_TREE);
7568 /* __sync_synchronize */
7569 tree void_ftype_void
7570 = build_function_type (void_type_node, void_list_node);
7572 /* __sync_lock_test_and_set_si */
7573 tree si_ftype_psi_si
7574 = build_function_type_list (integer_type_node,
7575 psi_type_node, integer_type_node, NULL_TREE);
7577 /* __sync_lock_test_and_set_di */
7578 tree di_ftype_pdi_di
7579 = build_function_type_list (long_integer_type_node,
7580 pdi_type_node, long_integer_type_node,
7583 /* __sync_lock_release_si */
7585 = build_function_type_list (void_type_node, psi_type_node, NULL_TREE);
7587 /* __sync_lock_release_di */
7589 = build_function_type_list (void_type_node, pdi_type_node, NULL_TREE);
7594 /* The __fpreg type. */
7595 fpreg_type = make_node (REAL_TYPE);
7596 /* ??? The back end should know to load/save __fpreg variables using
7597 the ldf.fill and stf.spill instructions. */
7598 TYPE_PRECISION (fpreg_type) = 80;
7599 layout_type (fpreg_type);
7600 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
7602 /* The __float80 type. */
7603 float80_type = make_node (REAL_TYPE);
7604 TYPE_PRECISION (float80_type) = 80;
7605 layout_type (float80_type);
7606 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
7608 /* The __float128 type. */
7611 tree float128_type = make_node (REAL_TYPE);
7612 TYPE_PRECISION (float128_type) = 128;
7613 layout_type (float128_type);
7614 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
7617 /* Under HPUX, this is a synonym for "long double". */
7618 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
7621 #define def_builtin(name, type, code) \
7622 lang_hooks.builtin_function ((name), (type), (code), BUILT_IN_MD, \
7625 def_builtin ("__sync_val_compare_and_swap_si", si_ftype_psi_si_si,
7626 IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI);
7627 def_builtin ("__sync_val_compare_and_swap_di", di_ftype_pdi_di_di,
7628 IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI);
7629 def_builtin ("__sync_bool_compare_and_swap_si", si_ftype_psi_si_si,
7630 IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI);
7631 def_builtin ("__sync_bool_compare_and_swap_di", si_ftype_pdi_di_di,
7632 IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI);
7634 def_builtin ("__sync_synchronize", void_ftype_void,
7635 IA64_BUILTIN_SYNCHRONIZE);
7637 def_builtin ("__sync_lock_test_and_set_si", si_ftype_psi_si,
7638 IA64_BUILTIN_LOCK_TEST_AND_SET_SI);
7639 def_builtin ("__sync_lock_test_and_set_di", di_ftype_pdi_di,
7640 IA64_BUILTIN_LOCK_TEST_AND_SET_DI);
7641 def_builtin ("__sync_lock_release_si", void_ftype_psi,
7642 IA64_BUILTIN_LOCK_RELEASE_SI);
7643 def_builtin ("__sync_lock_release_di", void_ftype_pdi,
7644 IA64_BUILTIN_LOCK_RELEASE_DI);
7646 def_builtin ("__builtin_ia64_bsp",
7647 build_function_type (ptr_type_node, void_list_node),
7650 def_builtin ("__builtin_ia64_flushrs",
7651 build_function_type (void_type_node, void_list_node),
7652 IA64_BUILTIN_FLUSHRS);
7654 def_builtin ("__sync_fetch_and_add_si", si_ftype_psi_si,
7655 IA64_BUILTIN_FETCH_AND_ADD_SI);
7656 def_builtin ("__sync_fetch_and_sub_si", si_ftype_psi_si,
7657 IA64_BUILTIN_FETCH_AND_SUB_SI);
7658 def_builtin ("__sync_fetch_and_or_si", si_ftype_psi_si,
7659 IA64_BUILTIN_FETCH_AND_OR_SI);
7660 def_builtin ("__sync_fetch_and_and_si", si_ftype_psi_si,
7661 IA64_BUILTIN_FETCH_AND_AND_SI);
7662 def_builtin ("__sync_fetch_and_xor_si", si_ftype_psi_si,
7663 IA64_BUILTIN_FETCH_AND_XOR_SI);
7664 def_builtin ("__sync_fetch_and_nand_si", si_ftype_psi_si,
7665 IA64_BUILTIN_FETCH_AND_NAND_SI);
7667 def_builtin ("__sync_add_and_fetch_si", si_ftype_psi_si,
7668 IA64_BUILTIN_ADD_AND_FETCH_SI);
7669 def_builtin ("__sync_sub_and_fetch_si", si_ftype_psi_si,
7670 IA64_BUILTIN_SUB_AND_FETCH_SI);
7671 def_builtin ("__sync_or_and_fetch_si", si_ftype_psi_si,
7672 IA64_BUILTIN_OR_AND_FETCH_SI);
7673 def_builtin ("__sync_and_and_fetch_si", si_ftype_psi_si,
7674 IA64_BUILTIN_AND_AND_FETCH_SI);
7675 def_builtin ("__sync_xor_and_fetch_si", si_ftype_psi_si,
7676 IA64_BUILTIN_XOR_AND_FETCH_SI);
7677 def_builtin ("__sync_nand_and_fetch_si", si_ftype_psi_si,
7678 IA64_BUILTIN_NAND_AND_FETCH_SI);
7680 def_builtin ("__sync_fetch_and_add_di", di_ftype_pdi_di,
7681 IA64_BUILTIN_FETCH_AND_ADD_DI);
7682 def_builtin ("__sync_fetch_and_sub_di", di_ftype_pdi_di,
7683 IA64_BUILTIN_FETCH_AND_SUB_DI);
7684 def_builtin ("__sync_fetch_and_or_di", di_ftype_pdi_di,
7685 IA64_BUILTIN_FETCH_AND_OR_DI);
7686 def_builtin ("__sync_fetch_and_and_di", di_ftype_pdi_di,
7687 IA64_BUILTIN_FETCH_AND_AND_DI);
7688 def_builtin ("__sync_fetch_and_xor_di", di_ftype_pdi_di,
7689 IA64_BUILTIN_FETCH_AND_XOR_DI);
7690 def_builtin ("__sync_fetch_and_nand_di", di_ftype_pdi_di,
7691 IA64_BUILTIN_FETCH_AND_NAND_DI);
7693 def_builtin ("__sync_add_and_fetch_di", di_ftype_pdi_di,
7694 IA64_BUILTIN_ADD_AND_FETCH_DI);
7695 def_builtin ("__sync_sub_and_fetch_di", di_ftype_pdi_di,
7696 IA64_BUILTIN_SUB_AND_FETCH_DI);
7697 def_builtin ("__sync_or_and_fetch_di", di_ftype_pdi_di,
7698 IA64_BUILTIN_OR_AND_FETCH_DI);
7699 def_builtin ("__sync_and_and_fetch_di", di_ftype_pdi_di,
7700 IA64_BUILTIN_AND_AND_FETCH_DI);
7701 def_builtin ("__sync_xor_and_fetch_di", di_ftype_pdi_di,
7702 IA64_BUILTIN_XOR_AND_FETCH_DI);
7703 def_builtin ("__sync_nand_and_fetch_di", di_ftype_pdi_di,
7704 IA64_BUILTIN_NAND_AND_FETCH_DI);
7709 /* Expand fetch_and_op intrinsics. The basic code sequence is:
7717 cmpxchgsz.acq tmp = [ptr], tmp
7718 } while (tmp != ret)
7722 ia64_expand_fetch_and_op (optab binoptab, enum machine_mode mode,
7723 tree arglist, rtx target)
7725 rtx ret, label, tmp, ccv, insn, mem, value;
7728 arg0 = TREE_VALUE (arglist);
7729 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
7730 mem = expand_expr (arg0, NULL_RTX, Pmode, 0);
7731 #ifdef POINTERS_EXTEND_UNSIGNED
7732 if (GET_MODE(mem) != Pmode)
7733 mem = convert_memory_address (Pmode, mem);
7735 value = expand_expr (arg1, NULL_RTX, mode, 0);
7737 mem = gen_rtx_MEM (mode, force_reg (Pmode, mem));
7738 MEM_VOLATILE_P (mem) = 1;
7740 if (target && register_operand (target, mode))
7743 ret = gen_reg_rtx (mode);
7745 emit_insn (gen_mf ());
7747 /* Special case for fetchadd instructions. */
7748 if (binoptab == add_optab && fetchadd_operand (value, VOIDmode))
7751 insn = gen_fetchadd_acq_si (ret, mem, value);
7753 insn = gen_fetchadd_acq_di (ret, mem, value);
7758 tmp = gen_reg_rtx (mode);
7759 /* ar.ccv must always be loaded with a zero-extended DImode value. */
7760 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
7761 emit_move_insn (tmp, mem);
7763 label = gen_label_rtx ();
7765 emit_move_insn (ret, tmp);
7766 convert_move (ccv, tmp, /*unsignedp=*/1);
7768 /* Perform the specific operation. Special case NAND by noticing
7769 one_cmpl_optab instead. */
7770 if (binoptab == one_cmpl_optab)
7772 tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN);
7773 binoptab = and_optab;
7775 tmp = expand_binop (mode, binoptab, tmp, value, tmp, 1, OPTAB_WIDEN);
7778 insn = gen_cmpxchg_acq_si (tmp, mem, tmp, ccv);
7780 insn = gen_cmpxchg_acq_di (tmp, mem, tmp, ccv);
7783 emit_cmp_and_jump_insns (tmp, ret, NE, 0, mode, 1, label);
7788 /* Expand op_and_fetch intrinsics. The basic code sequence is:
7795 ret = tmp <op> value;
7796 cmpxchgsz.acq tmp = [ptr], ret
7797 } while (tmp != old)
7801 ia64_expand_op_and_fetch (optab binoptab, enum machine_mode mode,
7802 tree arglist, rtx target)
7804 rtx old, label, tmp, ret, ccv, insn, mem, value;
7807 arg0 = TREE_VALUE (arglist);
7808 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
7809 mem = expand_expr (arg0, NULL_RTX, Pmode, 0);
7810 #ifdef POINTERS_EXTEND_UNSIGNED
7811 if (GET_MODE(mem) != Pmode)
7812 mem = convert_memory_address (Pmode, mem);
7815 value = expand_expr (arg1, NULL_RTX, mode, 0);
7817 mem = gen_rtx_MEM (mode, force_reg (Pmode, mem));
7818 MEM_VOLATILE_P (mem) = 1;
7820 if (target && ! register_operand (target, mode))
7823 emit_insn (gen_mf ());
7824 tmp = gen_reg_rtx (mode);
7825 old = gen_reg_rtx (mode);
7826 /* ar.ccv must always be loaded with a zero-extended DImode value. */
7827 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
7829 emit_move_insn (tmp, mem);
7831 label = gen_label_rtx ();
7833 emit_move_insn (old, tmp);
7834 convert_move (ccv, tmp, /*unsignedp=*/1);
7836 /* Perform the specific operation. Special case NAND by noticing
7837 one_cmpl_optab instead. */
7838 if (binoptab == one_cmpl_optab)
7840 tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN);
7841 binoptab = and_optab;
7843 ret = expand_binop (mode, binoptab, tmp, value, target, 1, OPTAB_WIDEN);
7846 insn = gen_cmpxchg_acq_si (tmp, mem, ret, ccv);
7848 insn = gen_cmpxchg_acq_di (tmp, mem, ret, ccv);
7851 emit_cmp_and_jump_insns (tmp, old, NE, 0, mode, 1, label);
7856 /* Expand val_ and bool_compare_and_swap. For val_ we want:
7860 cmpxchgsz.acq ret = [ptr], newval, ar.ccv
7863 For bool_ it's the same except return ret == oldval.
7867 ia64_expand_compare_and_swap (enum machine_mode rmode, enum machine_mode mode,
7868 int boolp, tree arglist, rtx target)
7870 tree arg0, arg1, arg2;
7871 rtx mem, old, new, ccv, tmp, insn;
7873 arg0 = TREE_VALUE (arglist);
7874 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
7875 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
7876 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
7877 old = expand_expr (arg1, NULL_RTX, mode, 0);
7878 new = expand_expr (arg2, NULL_RTX, mode, 0);
7880 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
7881 MEM_VOLATILE_P (mem) = 1;
7883 if (GET_MODE (old) != mode)
7884 old = convert_to_mode (mode, old, /*unsignedp=*/1);
7885 if (GET_MODE (new) != mode)
7886 new = convert_to_mode (mode, new, /*unsignedp=*/1);
7888 if (! register_operand (old, mode))
7889 old = copy_to_mode_reg (mode, old);
7890 if (! register_operand (new, mode))
7891 new = copy_to_mode_reg (mode, new);
7893 if (! boolp && target && register_operand (target, mode))
7896 tmp = gen_reg_rtx (mode);
7898 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
7899 convert_move (ccv, old, /*unsignedp=*/1);
7900 emit_insn (gen_mf ());
7902 insn = gen_cmpxchg_acq_si (tmp, mem, new, ccv);
7904 insn = gen_cmpxchg_acq_di (tmp, mem, new, ccv);
7910 target = gen_reg_rtx (rmode);
7911 return emit_store_flag_force (target, EQ, tmp, old, mode, 1, 1);
7917 /* Expand lock_test_and_set. I.e. `xchgsz ret = [ptr], new'. */
7920 ia64_expand_lock_test_and_set (enum machine_mode mode, tree arglist,
7924 rtx mem, new, ret, insn;
7926 arg0 = TREE_VALUE (arglist);
7927 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
7928 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
7929 new = expand_expr (arg1, NULL_RTX, mode, 0);
7931 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
7932 MEM_VOLATILE_P (mem) = 1;
7933 if (! register_operand (new, mode))
7934 new = copy_to_mode_reg (mode, new);
7936 if (target && register_operand (target, mode))
7939 ret = gen_reg_rtx (mode);
7942 insn = gen_xchgsi (ret, mem, new);
7944 insn = gen_xchgdi (ret, mem, new);
7950 /* Expand lock_release. I.e. `stsz.rel [ptr] = r0'. */
7953 ia64_expand_lock_release (enum machine_mode mode, tree arglist,
7954 rtx target ATTRIBUTE_UNUSED)
7959 arg0 = TREE_VALUE (arglist);
7960 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
7962 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
7963 MEM_VOLATILE_P (mem) = 1;
7965 emit_move_insn (mem, const0_rtx);
7971 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
7972 enum machine_mode mode ATTRIBUTE_UNUSED,
7973 int ignore ATTRIBUTE_UNUSED)
7975 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
7976 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
7977 tree arglist = TREE_OPERAND (exp, 1);
7978 enum machine_mode rmode = VOIDmode;
7982 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI:
7983 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI:
7988 case IA64_BUILTIN_LOCK_TEST_AND_SET_SI:
7989 case IA64_BUILTIN_LOCK_RELEASE_SI:
7990 case IA64_BUILTIN_FETCH_AND_ADD_SI:
7991 case IA64_BUILTIN_FETCH_AND_SUB_SI:
7992 case IA64_BUILTIN_FETCH_AND_OR_SI:
7993 case IA64_BUILTIN_FETCH_AND_AND_SI:
7994 case IA64_BUILTIN_FETCH_AND_XOR_SI:
7995 case IA64_BUILTIN_FETCH_AND_NAND_SI:
7996 case IA64_BUILTIN_ADD_AND_FETCH_SI:
7997 case IA64_BUILTIN_SUB_AND_FETCH_SI:
7998 case IA64_BUILTIN_OR_AND_FETCH_SI:
7999 case IA64_BUILTIN_AND_AND_FETCH_SI:
8000 case IA64_BUILTIN_XOR_AND_FETCH_SI:
8001 case IA64_BUILTIN_NAND_AND_FETCH_SI:
8005 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI:
8010 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI:
8015 case IA64_BUILTIN_LOCK_TEST_AND_SET_DI:
8016 case IA64_BUILTIN_LOCK_RELEASE_DI:
8017 case IA64_BUILTIN_FETCH_AND_ADD_DI:
8018 case IA64_BUILTIN_FETCH_AND_SUB_DI:
8019 case IA64_BUILTIN_FETCH_AND_OR_DI:
8020 case IA64_BUILTIN_FETCH_AND_AND_DI:
8021 case IA64_BUILTIN_FETCH_AND_XOR_DI:
8022 case IA64_BUILTIN_FETCH_AND_NAND_DI:
8023 case IA64_BUILTIN_ADD_AND_FETCH_DI:
8024 case IA64_BUILTIN_SUB_AND_FETCH_DI:
8025 case IA64_BUILTIN_OR_AND_FETCH_DI:
8026 case IA64_BUILTIN_AND_AND_FETCH_DI:
8027 case IA64_BUILTIN_XOR_AND_FETCH_DI:
8028 case IA64_BUILTIN_NAND_AND_FETCH_DI:
8038 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI:
8039 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI:
8040 return ia64_expand_compare_and_swap (rmode, mode, 1, arglist,
8043 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI:
8044 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI:
8045 return ia64_expand_compare_and_swap (rmode, mode, 0, arglist,
8048 case IA64_BUILTIN_SYNCHRONIZE:
8049 emit_insn (gen_mf ());
8052 case IA64_BUILTIN_LOCK_TEST_AND_SET_SI:
8053 case IA64_BUILTIN_LOCK_TEST_AND_SET_DI:
8054 return ia64_expand_lock_test_and_set (mode, arglist, target);
8056 case IA64_BUILTIN_LOCK_RELEASE_SI:
8057 case IA64_BUILTIN_LOCK_RELEASE_DI:
8058 return ia64_expand_lock_release (mode, arglist, target);
8060 case IA64_BUILTIN_BSP:
8061 if (! target || ! register_operand (target, DImode))
8062 target = gen_reg_rtx (DImode);
8063 emit_insn (gen_bsp_value (target));
8064 #ifdef POINTERS_EXTEND_UNSIGNED
8065 target = convert_memory_address (ptr_mode, target);
8069 case IA64_BUILTIN_FLUSHRS:
8070 emit_insn (gen_flushrs ());
8073 case IA64_BUILTIN_FETCH_AND_ADD_SI:
8074 case IA64_BUILTIN_FETCH_AND_ADD_DI:
8075 return ia64_expand_fetch_and_op (add_optab, mode, arglist, target);
8077 case IA64_BUILTIN_FETCH_AND_SUB_SI:
8078 case IA64_BUILTIN_FETCH_AND_SUB_DI:
8079 return ia64_expand_fetch_and_op (sub_optab, mode, arglist, target);
8081 case IA64_BUILTIN_FETCH_AND_OR_SI:
8082 case IA64_BUILTIN_FETCH_AND_OR_DI:
8083 return ia64_expand_fetch_and_op (ior_optab, mode, arglist, target);
8085 case IA64_BUILTIN_FETCH_AND_AND_SI:
8086 case IA64_BUILTIN_FETCH_AND_AND_DI:
8087 return ia64_expand_fetch_and_op (and_optab, mode, arglist, target);
8089 case IA64_BUILTIN_FETCH_AND_XOR_SI:
8090 case IA64_BUILTIN_FETCH_AND_XOR_DI:
8091 return ia64_expand_fetch_and_op (xor_optab, mode, arglist, target);
8093 case IA64_BUILTIN_FETCH_AND_NAND_SI:
8094 case IA64_BUILTIN_FETCH_AND_NAND_DI:
8095 return ia64_expand_fetch_and_op (one_cmpl_optab, mode, arglist, target);
8097 case IA64_BUILTIN_ADD_AND_FETCH_SI:
8098 case IA64_BUILTIN_ADD_AND_FETCH_DI:
8099 return ia64_expand_op_and_fetch (add_optab, mode, arglist, target);
8101 case IA64_BUILTIN_SUB_AND_FETCH_SI:
8102 case IA64_BUILTIN_SUB_AND_FETCH_DI:
8103 return ia64_expand_op_and_fetch (sub_optab, mode, arglist, target);
8105 case IA64_BUILTIN_OR_AND_FETCH_SI:
8106 case IA64_BUILTIN_OR_AND_FETCH_DI:
8107 return ia64_expand_op_and_fetch (ior_optab, mode, arglist, target);
8109 case IA64_BUILTIN_AND_AND_FETCH_SI:
8110 case IA64_BUILTIN_AND_AND_FETCH_DI:
8111 return ia64_expand_op_and_fetch (and_optab, mode, arglist, target);
8113 case IA64_BUILTIN_XOR_AND_FETCH_SI:
8114 case IA64_BUILTIN_XOR_AND_FETCH_DI:
8115 return ia64_expand_op_and_fetch (xor_optab, mode, arglist, target);
8117 case IA64_BUILTIN_NAND_AND_FETCH_SI:
8118 case IA64_BUILTIN_NAND_AND_FETCH_DI:
8119 return ia64_expand_op_and_fetch (one_cmpl_optab, mode, arglist, target);
8128 /* For the HP-UX IA64 aggregate parameters are passed stored in the
8129 most significant bits of the stack slot. */
8132 ia64_hpux_function_arg_padding (enum machine_mode mode, tree type)
8134 /* Exception to normal case for structures/unions/etc. */
8136 if (type && AGGREGATE_TYPE_P (type)
8137 && int_size_in_bytes (type) < UNITS_PER_WORD)
8140 /* Fall back to the default. */
8141 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
8144 /* Linked list of all external functions that are to be emitted by GCC.
8145 We output the name if and only if TREE_SYMBOL_REFERENCED is set in
8146 order to avoid putting out names that are never really used. */
8148 struct extern_func_list GTY(())
8150 struct extern_func_list *next;
8154 static GTY(()) struct extern_func_list *extern_func_head;
8157 ia64_hpux_add_extern_decl (tree decl)
8159 struct extern_func_list *p = ggc_alloc (sizeof (struct extern_func_list));
8162 p->next = extern_func_head;
8163 extern_func_head = p;
8166 /* Print out the list of used global functions. */
8169 ia64_hpux_file_end (void)
8171 struct extern_func_list *p;
8173 for (p = extern_func_head; p; p = p->next)
8175 tree decl = p->decl;
8176 tree id = DECL_ASSEMBLER_NAME (decl);
8181 if (!TREE_ASM_WRITTEN (decl) && TREE_SYMBOL_REFERENCED (id))
8183 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
8185 TREE_ASM_WRITTEN (decl) = 1;
8186 (*targetm.asm_out.globalize_label) (asm_out_file, name);
8187 fputs (TYPE_ASM_OP, asm_out_file);
8188 assemble_name (asm_out_file, name);
8189 fprintf (asm_out_file, "," TYPE_OPERAND_FMT "\n", "function");
8193 extern_func_head = 0;
8196 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
8197 modes of word_mode and larger. Rename the TFmode libfuncs using the
8198 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
8199 backward compatibility. */
8202 ia64_init_libfuncs (void)
8204 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
8205 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
8206 set_optab_libfunc (smod_optab, SImode, "__modsi3");
8207 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
8209 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
8210 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
8211 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
8212 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
8213 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
8215 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
8216 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
8217 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
8218 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
8219 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
8220 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
8222 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
8223 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
8224 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
8225 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
8227 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
8228 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
8231 /* Rename all the TFmode libfuncs using the HPUX conventions. */
8234 ia64_hpux_init_libfuncs (void)
8236 ia64_init_libfuncs ();
8238 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
8239 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
8240 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
8242 /* ia64_expand_compare uses this. */
8243 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
8245 /* These should never be used. */
8246 set_optab_libfunc (eq_optab, TFmode, 0);
8247 set_optab_libfunc (ne_optab, TFmode, 0);
8248 set_optab_libfunc (gt_optab, TFmode, 0);
8249 set_optab_libfunc (ge_optab, TFmode, 0);
8250 set_optab_libfunc (lt_optab, TFmode, 0);
8251 set_optab_libfunc (le_optab, TFmode, 0);
8254 /* Rename the division and modulus functions in VMS. */
8257 ia64_vms_init_libfuncs (void)
8259 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
8260 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
8261 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
8262 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
8263 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
8264 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
8265 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
8266 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
8269 /* Rename the TFmode libfuncs available from soft-fp in glibc using
8270 the HPUX conventions. */
8273 ia64_sysv4_init_libfuncs (void)
8275 ia64_init_libfuncs ();
8277 /* These functions are not part of the HPUX TFmode interface. We
8278 use them instead of _U_Qfcmp, which doesn't work the way we
8280 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
8281 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
8282 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
8283 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
8284 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
8285 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
8287 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
8288 glibc doesn't have them. */
8291 /* Switch to the section to which we should output X. The only thing
8292 special we do here is to honor small data. */
8295 ia64_select_rtx_section (enum machine_mode mode, rtx x,
8296 unsigned HOST_WIDE_INT align)
8298 if (GET_MODE_SIZE (mode) > 0
8299 && GET_MODE_SIZE (mode) <= ia64_section_threshold)
8302 default_elf_select_rtx_section (mode, x, align);
8305 /* It is illegal to have relocations in shared segments on AIX and HPUX.
8306 Pretend flag_pic is always set. */
8309 ia64_rwreloc_select_section (tree exp, int reloc, unsigned HOST_WIDE_INT align)
8311 default_elf_select_section_1 (exp, reloc, align, true);
8315 ia64_rwreloc_unique_section (tree decl, int reloc)
8317 default_unique_section_1 (decl, reloc, true);
8321 ia64_rwreloc_select_rtx_section (enum machine_mode mode, rtx x,
8322 unsigned HOST_WIDE_INT align)
8324 int save_pic = flag_pic;
8326 ia64_select_rtx_section (mode, x, align);
8327 flag_pic = save_pic;
8331 ia64_rwreloc_section_type_flags (tree decl, const char *name, int reloc)
8333 return default_section_type_flags_1 (decl, name, reloc, true);
8336 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
8337 structure type and that the address of that type should be passed
8338 in out0, rather than in r8. */
8341 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
8343 tree ret_type = TREE_TYPE (fntype);
8345 /* The Itanium C++ ABI requires that out0, rather than r8, be used
8346 as the structure return address parameter, if the return value
8347 type has a non-trivial copy constructor or destructor. It is not
8348 clear if this same convention should be used for other
8349 programming languages. Until G++ 3.4, we incorrectly used r8 for
8350 these return values. */
8351 return (abi_version_at_least (2)
8353 && TYPE_MODE (ret_type) == BLKmode
8354 && TREE_ADDRESSABLE (ret_type)
8355 && strcmp (lang_hooks.name, "GNU C++") == 0);
8358 /* Output the assembler code for a thunk function. THUNK_DECL is the
8359 declaration for the thunk function itself, FUNCTION is the decl for
8360 the target function. DELTA is an immediate constant offset to be
8361 added to THIS. If VCALL_OFFSET is nonzero, the word at
8362 *(*this + vcall_offset) should be added to THIS. */
8365 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
8366 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8369 rtx this, insn, funexp;
8370 unsigned int this_parmno;
8371 unsigned int this_regno;
8373 reload_completed = 1;
8374 epilogue_completed = 1;
8376 reset_block_changes ();
8378 /* Set things up as ia64_expand_prologue might. */
8379 last_scratch_gr_reg = 15;
8381 memset (¤t_frame_info, 0, sizeof (current_frame_info));
8382 current_frame_info.spill_cfa_off = -16;
8383 current_frame_info.n_input_regs = 1;
8384 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
8386 /* Mark the end of the (empty) prologue. */
8387 emit_note (NOTE_INSN_PROLOGUE_END);
8389 /* Figure out whether "this" will be the first parameter (the
8390 typical case) or the second parameter (as happens when the
8391 virtual function returns certain class objects). */
8393 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
8395 this_regno = IN_REG (this_parmno);
8396 if (!TARGET_REG_NAMES)
8397 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
8399 this = gen_rtx_REG (Pmode, this_regno);
8402 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
8403 REG_POINTER (tmp) = 1;
8404 if (delta && CONST_OK_FOR_I (delta))
8406 emit_insn (gen_ptr_extend_plus_imm (this, tmp, GEN_INT (delta)));
8410 emit_insn (gen_ptr_extend (this, tmp));
8413 /* Apply the constant offset, if required. */
8416 rtx delta_rtx = GEN_INT (delta);
8418 if (!CONST_OK_FOR_I (delta))
8420 rtx tmp = gen_rtx_REG (Pmode, 2);
8421 emit_move_insn (tmp, delta_rtx);
8424 emit_insn (gen_adddi3 (this, this, delta_rtx));
8427 /* Apply the offset from the vtable, if required. */
8430 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8431 rtx tmp = gen_rtx_REG (Pmode, 2);
8435 rtx t = gen_rtx_REG (ptr_mode, 2);
8436 REG_POINTER (t) = 1;
8437 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this));
8438 if (CONST_OK_FOR_I (vcall_offset))
8440 emit_insn (gen_ptr_extend_plus_imm (tmp, t,
8445 emit_insn (gen_ptr_extend (tmp, t));
8448 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8452 if (!CONST_OK_FOR_J (vcall_offset))
8454 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
8455 emit_move_insn (tmp2, vcall_offset_rtx);
8456 vcall_offset_rtx = tmp2;
8458 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
8462 emit_move_insn (gen_rtx_REG (ptr_mode, 2),
8463 gen_rtx_MEM (ptr_mode, tmp));
8465 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
8467 emit_insn (gen_adddi3 (this, this, tmp));
8470 /* Generate a tail call to the target function. */
8471 if (! TREE_USED (function))
8473 assemble_external (function);
8474 TREE_USED (function) = 1;
8476 funexp = XEXP (DECL_RTL (function), 0);
8477 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8478 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
8479 insn = get_last_insn ();
8480 SIBLING_CALL_P (insn) = 1;
8482 /* Code generation for calls relies on splitting. */
8483 reload_completed = 1;
8484 epilogue_completed = 1;
8485 try_split (PATTERN (insn), insn, 0);
8489 /* Run just enough of rest_of_compilation to get the insns emitted.
8490 There's not really enough bulk here to make other passes such as
8491 instruction scheduling worth while. Note that use_thunk calls
8492 assemble_start_function and assemble_end_function. */
8494 insn_locators_initialize ();
8495 emit_all_insn_group_barriers (NULL);
8496 insn = get_insns ();
8497 shorten_branches (insn);
8498 final_start_function (insn, file, 1);
8499 final (insn, file, 1, 0);
8500 final_end_function ();
8502 reload_completed = 0;
8503 epilogue_completed = 0;
8507 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
8510 ia64_struct_value_rtx (tree fntype,
8511 int incoming ATTRIBUTE_UNUSED)
8513 if (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype))
8515 return gen_rtx_REG (Pmode, GR_REG (8));
8519 ia64_scalar_mode_supported_p (enum machine_mode mode)
8543 #include "gt-ia64.h"