1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to
21 the Free Software Foundation, 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
44 #include "basic-block.h"
46 #include "sched-int.h"
49 #include "target-def.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
56 /* This is used for communication between ASM_OUTPUT_LABEL and
57 ASM_OUTPUT_LABELREF. */
58 int ia64_asm_output_label = 0;
60 /* Define the information needed to generate branch and scc insns. This is
61 stored from the compare operation. */
62 struct rtx_def * ia64_compare_op0;
63 struct rtx_def * ia64_compare_op1;
65 /* Register names for ia64_expand_prologue. */
66 static const char * const ia64_reg_numbers[96] =
67 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
68 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
69 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
70 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
71 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
72 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
73 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
74 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
75 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
76 "r104","r105","r106","r107","r108","r109","r110","r111",
77 "r112","r113","r114","r115","r116","r117","r118","r119",
78 "r120","r121","r122","r123","r124","r125","r126","r127"};
80 /* ??? These strings could be shared with REGISTER_NAMES. */
81 static const char * const ia64_input_reg_names[8] =
82 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
84 /* ??? These strings could be shared with REGISTER_NAMES. */
85 static const char * const ia64_local_reg_names[80] =
86 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
87 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
88 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
89 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
90 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
91 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
92 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
93 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
94 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
95 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
97 /* ??? These strings could be shared with REGISTER_NAMES. */
98 static const char * const ia64_output_reg_names[8] =
99 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
101 /* String used with the -mfixed-range= option. */
102 const char *ia64_fixed_range_string;
104 /* Determines whether we use adds, addl, or movl to generate our
105 TLS immediate offsets. */
106 int ia64_tls_size = 22;
108 /* String used with the -mtls-size= option. */
109 const char *ia64_tls_size_string;
111 /* Which cpu are we scheduling for. */
112 enum processor_type ia64_tune;
114 /* String used with the -tune= option. */
115 const char *ia64_tune_string;
117 /* Determines whether we run our final scheduling pass or not. We always
118 avoid the normal second scheduling pass. */
119 static int ia64_flag_schedule_insns2;
121 /* Determines whether we run variable tracking in machine dependent
123 static int ia64_flag_var_tracking;
125 /* Variables which are this size or smaller are put in the sdata/sbss
128 unsigned int ia64_section_threshold;
130 /* The following variable is used by the DFA insn scheduler. The value is
131 TRUE if we do insn bundling instead of insn scheduling. */
134 /* Structure to be filled in by ia64_compute_frame_size with register
135 save masks and offsets for the current function. */
137 struct ia64_frame_info
139 HOST_WIDE_INT total_size; /* size of the stack frame, not including
140 the caller's scratch area. */
141 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
142 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
143 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
144 HARD_REG_SET mask; /* mask of saved registers. */
145 unsigned int gr_used_mask; /* mask of registers in use as gr spill
146 registers or long-term scratches. */
147 int n_spilled; /* number of spilled registers. */
148 int reg_fp; /* register for fp. */
149 int reg_save_b0; /* save register for b0. */
150 int reg_save_pr; /* save register for prs. */
151 int reg_save_ar_pfs; /* save register for ar.pfs. */
152 int reg_save_ar_unat; /* save register for ar.unat. */
153 int reg_save_ar_lc; /* save register for ar.lc. */
154 int reg_save_gp; /* save register for gp. */
155 int n_input_regs; /* number of input registers used. */
156 int n_local_regs; /* number of local registers used. */
157 int n_output_regs; /* number of output registers used. */
158 int n_rotate_regs; /* number of rotating registers used. */
160 char need_regstk; /* true if a .regstk directive needed. */
161 char initialized; /* true if the data is finalized. */
164 /* Current frame information calculated by ia64_compute_frame_size. */
165 static struct ia64_frame_info current_frame_info;
167 static int ia64_first_cycle_multipass_dfa_lookahead (void);
168 static void ia64_dependencies_evaluation_hook (rtx, rtx);
169 static void ia64_init_dfa_pre_cycle_insn (void);
170 static rtx ia64_dfa_pre_cycle_insn (void);
171 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
172 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
173 static rtx gen_tls_get_addr (void);
174 static rtx gen_thread_pointer (void);
175 static rtx ia64_expand_tls_address (enum tls_model, rtx, rtx);
176 static int find_gr_spill (int);
177 static int next_scratch_gr_reg (void);
178 static void mark_reg_gr_used_mask (rtx, void *);
179 static void ia64_compute_frame_size (HOST_WIDE_INT);
180 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
181 static void finish_spill_pointers (void);
182 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
183 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
184 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
185 static rtx gen_movdi_x (rtx, rtx, rtx);
186 static rtx gen_fr_spill_x (rtx, rtx, rtx);
187 static rtx gen_fr_restore_x (rtx, rtx, rtx);
189 static enum machine_mode hfa_element_mode (tree, int);
190 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
192 static bool ia64_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
194 static bool ia64_function_ok_for_sibcall (tree, tree);
195 static bool ia64_return_in_memory (tree, tree);
196 static bool ia64_rtx_costs (rtx, int, int, int *);
197 static void fix_range (const char *);
198 static struct machine_function * ia64_init_machine_status (void);
199 static void emit_insn_group_barriers (FILE *);
200 static void emit_all_insn_group_barriers (FILE *);
201 static void final_emit_insn_group_barriers (FILE *);
202 static void emit_predicate_relation_info (void);
203 static void ia64_reorg (void);
204 static bool ia64_in_small_data_p (tree);
205 static void process_epilogue (void);
206 static int process_set (FILE *, rtx);
208 static rtx ia64_expand_fetch_and_op (optab, enum machine_mode, tree, rtx);
209 static rtx ia64_expand_op_and_fetch (optab, enum machine_mode, tree, rtx);
210 static rtx ia64_expand_compare_and_swap (enum machine_mode, enum machine_mode,
212 static rtx ia64_expand_lock_test_and_set (enum machine_mode, tree, rtx);
213 static rtx ia64_expand_lock_release (enum machine_mode, tree, rtx);
214 static bool ia64_assemble_integer (rtx, unsigned int, int);
215 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
216 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
217 static void ia64_output_function_end_prologue (FILE *);
219 static int ia64_issue_rate (void);
220 static int ia64_adjust_cost (rtx, rtx, rtx, int);
221 static void ia64_sched_init (FILE *, int, int);
222 static void ia64_sched_finish (FILE *, int);
223 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
224 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
225 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
226 static int ia64_variable_issue (FILE *, int, rtx, int);
228 static struct bundle_state *get_free_bundle_state (void);
229 static void free_bundle_state (struct bundle_state *);
230 static void initiate_bundle_states (void);
231 static void finish_bundle_states (void);
232 static unsigned bundle_state_hash (const void *);
233 static int bundle_state_eq_p (const void *, const void *);
234 static int insert_bundle_state (struct bundle_state *);
235 static void initiate_bundle_state_table (void);
236 static void finish_bundle_state_table (void);
237 static int try_issue_nops (struct bundle_state *, int);
238 static int try_issue_insn (struct bundle_state *, rtx);
239 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
240 static int get_max_pos (state_t);
241 static int get_template (state_t, int);
243 static rtx get_next_important_insn (rtx, rtx);
244 static void bundling (FILE *, int, rtx, rtx);
246 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
247 HOST_WIDE_INT, tree);
248 static void ia64_file_start (void);
250 static void ia64_select_rtx_section (enum machine_mode, rtx,
251 unsigned HOST_WIDE_INT);
252 static void ia64_rwreloc_select_section (tree, int, unsigned HOST_WIDE_INT)
254 static void ia64_rwreloc_unique_section (tree, int)
256 static void ia64_rwreloc_select_rtx_section (enum machine_mode, rtx,
257 unsigned HOST_WIDE_INT)
259 static unsigned int ia64_rwreloc_section_type_flags (tree, const char *, int)
262 static void ia64_hpux_add_extern_decl (tree decl)
264 static void ia64_hpux_file_end (void)
266 static void ia64_init_libfuncs (void)
268 static void ia64_hpux_init_libfuncs (void)
270 static void ia64_sysv4_init_libfuncs (void)
272 static void ia64_vms_init_libfuncs (void)
275 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
276 static void ia64_encode_section_info (tree, rtx, int);
277 static rtx ia64_struct_value_rtx (tree, int);
278 static tree ia64_gimplify_va_arg (tree, tree, tree *, tree *);
279 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
282 /* Table of valid machine attributes. */
283 static const struct attribute_spec ia64_attribute_table[] =
285 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
286 { "syscall_linkage", 0, 0, false, true, true, NULL },
287 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
288 { NULL, 0, 0, false, false, false, NULL }
291 /* Initialize the GCC target structure. */
292 #undef TARGET_ATTRIBUTE_TABLE
293 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
295 #undef TARGET_INIT_BUILTINS
296 #define TARGET_INIT_BUILTINS ia64_init_builtins
298 #undef TARGET_EXPAND_BUILTIN
299 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
301 #undef TARGET_ASM_BYTE_OP
302 #define TARGET_ASM_BYTE_OP "\tdata1\t"
303 #undef TARGET_ASM_ALIGNED_HI_OP
304 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
305 #undef TARGET_ASM_ALIGNED_SI_OP
306 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
307 #undef TARGET_ASM_ALIGNED_DI_OP
308 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
309 #undef TARGET_ASM_UNALIGNED_HI_OP
310 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
311 #undef TARGET_ASM_UNALIGNED_SI_OP
312 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
313 #undef TARGET_ASM_UNALIGNED_DI_OP
314 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
315 #undef TARGET_ASM_INTEGER
316 #define TARGET_ASM_INTEGER ia64_assemble_integer
318 #undef TARGET_ASM_FUNCTION_PROLOGUE
319 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
320 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
321 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
322 #undef TARGET_ASM_FUNCTION_EPILOGUE
323 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
325 #undef TARGET_IN_SMALL_DATA_P
326 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
328 #undef TARGET_SCHED_ADJUST_COST
329 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
330 #undef TARGET_SCHED_ISSUE_RATE
331 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
332 #undef TARGET_SCHED_VARIABLE_ISSUE
333 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
334 #undef TARGET_SCHED_INIT
335 #define TARGET_SCHED_INIT ia64_sched_init
336 #undef TARGET_SCHED_FINISH
337 #define TARGET_SCHED_FINISH ia64_sched_finish
338 #undef TARGET_SCHED_REORDER
339 #define TARGET_SCHED_REORDER ia64_sched_reorder
340 #undef TARGET_SCHED_REORDER2
341 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
343 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
344 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
346 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
347 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
349 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
350 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
351 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
352 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
354 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
355 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
356 ia64_first_cycle_multipass_dfa_lookahead_guard
358 #undef TARGET_SCHED_DFA_NEW_CYCLE
359 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
361 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
362 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
363 #undef TARGET_PASS_BY_REFERENCE
364 #define TARGET_PASS_BY_REFERENCE ia64_pass_by_reference
366 #undef TARGET_ASM_OUTPUT_MI_THUNK
367 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
368 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
369 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
371 #undef TARGET_ASM_FILE_START
372 #define TARGET_ASM_FILE_START ia64_file_start
374 #undef TARGET_RTX_COSTS
375 #define TARGET_RTX_COSTS ia64_rtx_costs
376 #undef TARGET_ADDRESS_COST
377 #define TARGET_ADDRESS_COST hook_int_rtx_0
379 #undef TARGET_MACHINE_DEPENDENT_REORG
380 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
382 #undef TARGET_ENCODE_SECTION_INFO
383 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
385 /* ??? ABI doesn't allow us to define this. */
387 #undef TARGET_PROMOTE_FUNCTION_ARGS
388 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
391 /* ??? ABI doesn't allow us to define this. */
393 #undef TARGET_PROMOTE_FUNCTION_RETURN
394 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
397 /* ??? Investigate. */
399 #undef TARGET_PROMOTE_PROTOTYPES
400 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
403 #undef TARGET_STRUCT_VALUE_RTX
404 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
405 #undef TARGET_RETURN_IN_MEMORY
406 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
407 #undef TARGET_SETUP_INCOMING_VARARGS
408 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
409 #undef TARGET_STRICT_ARGUMENT_NAMING
410 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
411 #undef TARGET_MUST_PASS_IN_STACK
412 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
414 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
415 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
417 #undef TARGET_UNWIND_EMIT
418 #define TARGET_UNWIND_EMIT process_for_unwind_directive
420 #undef TARGET_SCALAR_MODE_SUPPORTED_P
421 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
423 struct gcc_target targetm = TARGET_INITIALIZER;
427 ADDR_AREA_NORMAL, /* normal address area */
428 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
432 static GTY(()) tree small_ident1;
433 static GTY(()) tree small_ident2;
438 if (small_ident1 == 0)
440 small_ident1 = get_identifier ("small");
441 small_ident2 = get_identifier ("__small__");
445 /* Retrieve the address area that has been chosen for the given decl. */
447 static ia64_addr_area
448 ia64_get_addr_area (tree decl)
452 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
458 id = TREE_VALUE (TREE_VALUE (model_attr));
459 if (id == small_ident1 || id == small_ident2)
460 return ADDR_AREA_SMALL;
462 return ADDR_AREA_NORMAL;
466 ia64_handle_model_attribute (tree *node, tree name, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
468 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
470 tree arg, decl = *node;
473 arg = TREE_VALUE (args);
474 if (arg == small_ident1 || arg == small_ident2)
476 addr_area = ADDR_AREA_SMALL;
480 warning ("invalid argument of %qs attribute",
481 IDENTIFIER_POINTER (name));
482 *no_add_attrs = true;
485 switch (TREE_CODE (decl))
488 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
490 && !TREE_STATIC (decl))
492 error ("%Jan address area attribute cannot be specified for "
493 "local variables", decl, decl);
494 *no_add_attrs = true;
496 area = ia64_get_addr_area (decl);
497 if (area != ADDR_AREA_NORMAL && addr_area != area)
499 error ("%Jaddress area of '%s' conflicts with previous "
500 "declaration", decl, decl);
501 *no_add_attrs = true;
506 error ("%Jaddress area attribute cannot be specified for functions",
508 *no_add_attrs = true;
512 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
513 *no_add_attrs = true;
521 ia64_encode_addr_area (tree decl, rtx symbol)
525 flags = SYMBOL_REF_FLAGS (symbol);
526 switch (ia64_get_addr_area (decl))
528 case ADDR_AREA_NORMAL: break;
529 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
532 SYMBOL_REF_FLAGS (symbol) = flags;
536 ia64_encode_section_info (tree decl, rtx rtl, int first)
538 default_encode_section_info (decl, rtl, first);
540 /* Careful not to prod global register variables. */
541 if (TREE_CODE (decl) == VAR_DECL
542 && GET_CODE (DECL_RTL (decl)) == MEM
543 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
544 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
545 ia64_encode_addr_area (decl, XEXP (rtl, 0));
548 /* Return 1 if the operands of a move are ok. */
551 ia64_move_ok (rtx dst, rtx src)
553 /* If we're under init_recog_no_volatile, we'll not be able to use
554 memory_operand. So check the code directly and don't worry about
555 the validity of the underlying address, which should have been
556 checked elsewhere anyway. */
557 if (GET_CODE (dst) != MEM)
559 if (GET_CODE (src) == MEM)
561 if (register_operand (src, VOIDmode))
564 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
565 if (INTEGRAL_MODE_P (GET_MODE (dst)))
566 return src == const0_rtx;
568 return GET_CODE (src) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (src);
572 addp4_optimize_ok (rtx op1, rtx op2)
574 return (basereg_operand (op1, GET_MODE(op1)) !=
575 basereg_operand (op2, GET_MODE(op2)));
578 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
579 Return the length of the field, or <= 0 on failure. */
582 ia64_depz_field_mask (rtx rop, rtx rshift)
584 unsigned HOST_WIDE_INT op = INTVAL (rop);
585 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
587 /* Get rid of the zero bits we're shifting in. */
590 /* We must now have a solid block of 1's at bit 0. */
591 return exact_log2 (op + 1);
594 /* Expand a symbolic constant load. */
597 ia64_expand_load_address (rtx dest, rtx src)
599 if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (src))
601 if (GET_CODE (dest) != REG)
604 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
605 having to pointer-extend the value afterward. Other forms of address
606 computation below are also more natural to compute as 64-bit quantities.
607 If we've been given an SImode destination register, change it. */
608 if (GET_MODE (dest) != Pmode)
609 dest = gen_rtx_REG (Pmode, REGNO (dest));
611 if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_SMALL_ADDR_P (src))
613 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
616 else if (TARGET_AUTO_PIC)
618 emit_insn (gen_load_gprel64 (dest, src));
621 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
623 emit_insn (gen_load_fptr (dest, src));
626 else if (sdata_symbolic_operand (src, VOIDmode))
628 emit_insn (gen_load_gprel (dest, src));
632 if (GET_CODE (src) == CONST
633 && GET_CODE (XEXP (src, 0)) == PLUS
634 && GET_CODE (XEXP (XEXP (src, 0), 1)) == CONST_INT
635 && (INTVAL (XEXP (XEXP (src, 0), 1)) & 0x3fff) != 0)
637 rtx sym = XEXP (XEXP (src, 0), 0);
638 HOST_WIDE_INT ofs, hi, lo;
640 /* Split the offset into a sign extended 14-bit low part
641 and a complementary high part. */
642 ofs = INTVAL (XEXP (XEXP (src, 0), 1));
643 lo = ((ofs & 0x3fff) ^ 0x2000) - 0x2000;
646 ia64_expand_load_address (dest, plus_constant (sym, hi));
647 emit_insn (gen_adddi3 (dest, dest, GEN_INT (lo)));
653 tmp = gen_rtx_HIGH (Pmode, src);
654 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
655 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
657 tmp = gen_rtx_LO_SUM (GET_MODE (dest), dest, src);
658 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
662 static GTY(()) rtx gen_tls_tga;
664 gen_tls_get_addr (void)
667 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
671 static GTY(()) rtx thread_pointer_rtx;
673 gen_thread_pointer (void)
675 if (!thread_pointer_rtx)
676 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
677 return thread_pointer_rtx;
681 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1)
683 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
688 case TLS_MODEL_GLOBAL_DYNAMIC:
691 tga_op1 = gen_reg_rtx (Pmode);
692 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
693 tga_op1 = gen_const_mem (Pmode, tga_op1);
695 tga_op2 = gen_reg_rtx (Pmode);
696 emit_insn (gen_load_ltoff_dtprel (tga_op2, op1));
697 tga_op2 = gen_const_mem (Pmode, tga_op2);
699 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
700 LCT_CONST, Pmode, 2, tga_op1,
701 Pmode, tga_op2, Pmode);
703 insns = get_insns ();
706 if (GET_MODE (op0) != Pmode)
708 emit_libcall_block (insns, op0, tga_ret, op1);
711 case TLS_MODEL_LOCAL_DYNAMIC:
712 /* ??? This isn't the completely proper way to do local-dynamic
713 If the call to __tls_get_addr is used only by a single symbol,
714 then we should (somehow) move the dtprel to the second arg
715 to avoid the extra add. */
718 tga_op1 = gen_reg_rtx (Pmode);
719 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
720 tga_op1 = gen_const_mem (Pmode, tga_op1);
722 tga_op2 = const0_rtx;
724 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
725 LCT_CONST, Pmode, 2, tga_op1,
726 Pmode, tga_op2, Pmode);
728 insns = get_insns ();
731 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
733 tmp = gen_reg_rtx (Pmode);
734 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
736 if (!register_operand (op0, Pmode))
737 op0 = gen_reg_rtx (Pmode);
740 emit_insn (gen_load_dtprel (op0, op1));
741 emit_insn (gen_adddi3 (op0, tmp, op0));
744 emit_insn (gen_add_dtprel (op0, tmp, op1));
747 case TLS_MODEL_INITIAL_EXEC:
748 tmp = gen_reg_rtx (Pmode);
749 emit_insn (gen_load_ltoff_tprel (tmp, op1));
750 tmp = gen_const_mem (Pmode, tmp);
751 tmp = force_reg (Pmode, tmp);
753 if (!register_operand (op0, Pmode))
754 op0 = gen_reg_rtx (Pmode);
755 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
758 case TLS_MODEL_LOCAL_EXEC:
759 if (!register_operand (op0, Pmode))
760 op0 = gen_reg_rtx (Pmode);
763 emit_insn (gen_load_tprel (op0, op1));
764 emit_insn (gen_adddi3 (op0, gen_thread_pointer (), op0));
767 emit_insn (gen_add_tprel (op0, gen_thread_pointer (), op1));
776 if (GET_MODE (orig_op0) == Pmode)
778 return gen_lowpart (GET_MODE (orig_op0), op0);
782 ia64_expand_move (rtx op0, rtx op1)
784 enum machine_mode mode = GET_MODE (op0);
786 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
787 op1 = force_reg (mode, op1);
789 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
791 enum tls_model tls_kind;
792 if (GET_CODE (op1) == SYMBOL_REF
793 && (tls_kind = SYMBOL_REF_TLS_MODEL (op1)))
794 return ia64_expand_tls_address (tls_kind, op0, op1);
796 if (!TARGET_NO_PIC && reload_completed)
798 ia64_expand_load_address (op0, op1);
806 /* Split a move from OP1 to OP0 conditional on COND. */
809 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
811 rtx insn, first = get_last_insn ();
813 emit_move_insn (op0, op1);
815 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
817 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
821 /* Split a post-reload TImode or TFmode reference into two DImode
822 components. This is made extra difficult by the fact that we do
823 not get any scratch registers to work with, because reload cannot
824 be prevented from giving us a scratch that overlaps the register
825 pair involved. So instead, when addressing memory, we tweak the
826 pointer register up and back down with POST_INCs. Or up and not
827 back down when we can get away with it.
829 REVERSED is true when the loads must be done in reversed order
830 (high word first) for correctness. DEAD is true when the pointer
831 dies with the second insn we generate and therefore the second
832 address must not carry a postmodify.
834 May return an insn which is to be emitted after the moves. */
837 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
841 switch (GET_CODE (in))
844 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
845 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
850 /* Cannot occur reversed. */
851 if (reversed) abort ();
853 if (GET_MODE (in) != TFmode)
854 split_double (in, &out[0], &out[1]);
856 /* split_double does not understand how to split a TFmode
857 quantity into a pair of DImode constants. */
860 unsigned HOST_WIDE_INT p[2];
861 long l[4]; /* TFmode is 128 bits */
863 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
864 real_to_target (l, &r, TFmode);
866 if (FLOAT_WORDS_BIG_ENDIAN)
868 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
869 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
873 p[0] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
874 p[1] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
876 out[0] = GEN_INT (p[0]);
877 out[1] = GEN_INT (p[1]);
883 rtx base = XEXP (in, 0);
886 switch (GET_CODE (base))
891 out[0] = adjust_automodify_address
892 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
893 out[1] = adjust_automodify_address
894 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
898 /* Reversal requires a pre-increment, which can only
899 be done as a separate insn. */
900 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
901 out[0] = adjust_automodify_address
902 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
903 out[1] = adjust_address (in, DImode, 0);
908 if (reversed || dead) abort ();
909 /* Just do the increment in two steps. */
910 out[0] = adjust_automodify_address (in, DImode, 0, 0);
911 out[1] = adjust_automodify_address (in, DImode, 0, 8);
915 if (reversed || dead) abort ();
916 /* Add 8, subtract 24. */
917 base = XEXP (base, 0);
918 out[0] = adjust_automodify_address
919 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
920 out[1] = adjust_automodify_address
922 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
927 if (reversed || dead) abort ();
928 /* Extract and adjust the modification. This case is
929 trickier than the others, because we might have an
930 index register, or we might have a combined offset that
931 doesn't fit a signed 9-bit displacement field. We can
932 assume the incoming expression is already legitimate. */
933 offset = XEXP (base, 1);
934 base = XEXP (base, 0);
936 out[0] = adjust_automodify_address
937 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
939 if (GET_CODE (XEXP (offset, 1)) == REG)
941 /* Can't adjust the postmodify to match. Emit the
942 original, then a separate addition insn. */
943 out[1] = adjust_automodify_address (in, DImode, 0, 8);
944 fixup = gen_adddi3 (base, base, GEN_INT (-8));
946 else if (GET_CODE (XEXP (offset, 1)) != CONST_INT)
948 else if (INTVAL (XEXP (offset, 1)) < -256 + 8)
950 /* Again the postmodify cannot be made to match, but
951 in this case it's more efficient to get rid of the
952 postmodify entirely and fix up with an add insn. */
953 out[1] = adjust_automodify_address (in, DImode, base, 8);
954 fixup = gen_adddi3 (base, base,
955 GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
959 /* Combined offset still fits in the displacement field.
960 (We cannot overflow it at the high end.) */
961 out[1] = adjust_automodify_address
963 gen_rtx_POST_MODIFY (Pmode, base,
964 gen_rtx_PLUS (Pmode, base,
965 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
983 /* Split a TImode or TFmode move instruction after reload.
984 This is used by *movtf_internal and *movti_internal. */
986 ia64_split_tmode_move (rtx operands[])
988 rtx in[2], out[2], insn;
991 bool reversed = false;
993 /* It is possible for reload to decide to overwrite a pointer with
994 the value it points to. In that case we have to do the loads in
995 the appropriate order so that the pointer is not destroyed too
996 early. Also we must not generate a postmodify for that second
997 load, or rws_access_regno will abort. */
998 if (GET_CODE (operands[1]) == MEM
999 && reg_overlap_mentioned_p (operands[0], operands[1]))
1001 rtx base = XEXP (operands[1], 0);
1002 while (GET_CODE (base) != REG)
1003 base = XEXP (base, 0);
1005 if (REGNO (base) == REGNO (operands[0]))
1009 /* Another reason to do the moves in reversed order is if the first
1010 element of the target register pair is also the second element of
1011 the source register pair. */
1012 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1013 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1016 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1017 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1019 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1020 if (GET_CODE (EXP) == MEM \
1021 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1022 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1023 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1024 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1025 XEXP (XEXP (EXP, 0), 0), \
1028 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1029 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1030 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1032 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1033 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1034 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1037 emit_insn (fixup[0]);
1039 emit_insn (fixup[1]);
1041 #undef MAYBE_ADD_REG_INC_NOTE
1044 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1045 through memory plus an extra GR scratch register. Except that you can
1046 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1047 SECONDARY_RELOAD_CLASS, but not both.
1049 We got into problems in the first place by allowing a construct like
1050 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1051 This solution attempts to prevent this situation from occurring. When
1052 we see something like the above, we spill the inner register to memory. */
1055 spill_xfmode_operand (rtx in, int force)
1057 if (GET_CODE (in) == SUBREG
1058 && GET_MODE (SUBREG_REG (in)) == TImode
1059 && GET_CODE (SUBREG_REG (in)) == REG)
1061 rtx memt = assign_stack_temp (TImode, 16, 0);
1062 emit_move_insn (memt, SUBREG_REG (in));
1063 return adjust_address (memt, XFmode, 0);
1065 else if (force && GET_CODE (in) == REG)
1067 rtx memx = assign_stack_temp (XFmode, 16, 0);
1068 emit_move_insn (memx, in);
1075 /* Emit comparison instruction if necessary, returning the expression
1076 that holds the compare result in the proper mode. */
1078 static GTY(()) rtx cmptf_libfunc;
1081 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1083 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1086 /* If we have a BImode input, then we already have a compare result, and
1087 do not need to emit another comparison. */
1088 if (GET_MODE (op0) == BImode)
1090 if ((code == NE || code == EQ) && op1 == const0_rtx)
1095 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1096 magic number as its third argument, that indicates what to do.
1097 The return value is an integer to be compared against zero. */
1098 else if (GET_MODE (op0) == TFmode)
1101 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1107 enum rtx_code ncode;
1109 if (!cmptf_libfunc || GET_MODE (op1) != TFmode)
1113 /* 1 = equal, 0 = not equal. Equality operators do
1114 not raise FP_INVALID when given an SNaN operand. */
1115 case EQ: magic = QCMP_EQ; ncode = NE; break;
1116 case NE: magic = QCMP_EQ; ncode = EQ; break;
1117 /* isunordered() from C99. */
1118 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1119 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1120 /* Relational operators raise FP_INVALID when given
1122 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1123 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1124 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1125 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1126 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1127 Expanders for buneq etc. weuld have to be added to ia64.md
1128 for this to be useful. */
1134 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1135 op0, TFmode, op1, TFmode,
1136 GEN_INT (magic), DImode);
1137 cmp = gen_reg_rtx (BImode);
1138 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1139 gen_rtx_fmt_ee (ncode, BImode,
1142 insns = get_insns ();
1145 emit_libcall_block (insns, cmp, cmp,
1146 gen_rtx_fmt_ee (code, BImode, op0, op1));
1151 cmp = gen_reg_rtx (BImode);
1152 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1153 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1157 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1160 /* Emit the appropriate sequence for a call. */
1163 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1168 addr = XEXP (addr, 0);
1169 addr = convert_memory_address (DImode, addr);
1170 b0 = gen_rtx_REG (DImode, R_BR (0));
1172 /* ??? Should do this for functions known to bind local too. */
1173 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1176 insn = gen_sibcall_nogp (addr);
1178 insn = gen_call_nogp (addr, b0);
1180 insn = gen_call_value_nogp (retval, addr, b0);
1181 insn = emit_call_insn (insn);
1186 insn = gen_sibcall_gp (addr);
1188 insn = gen_call_gp (addr, b0);
1190 insn = gen_call_value_gp (retval, addr, b0);
1191 insn = emit_call_insn (insn);
1193 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1197 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1201 ia64_reload_gp (void)
1205 if (current_frame_info.reg_save_gp)
1206 tmp = gen_rtx_REG (DImode, current_frame_info.reg_save_gp);
1209 HOST_WIDE_INT offset;
1211 offset = (current_frame_info.spill_cfa_off
1212 + current_frame_info.spill_size);
1213 if (frame_pointer_needed)
1215 tmp = hard_frame_pointer_rtx;
1220 tmp = stack_pointer_rtx;
1221 offset = current_frame_info.total_size - offset;
1224 if (CONST_OK_FOR_I (offset))
1225 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1226 tmp, GEN_INT (offset)));
1229 emit_move_insn (pic_offset_table_rtx, GEN_INT (offset));
1230 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1231 pic_offset_table_rtx, tmp));
1234 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1237 emit_move_insn (pic_offset_table_rtx, tmp);
1241 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1242 rtx scratch_b, int noreturn_p, int sibcall_p)
1245 bool is_desc = false;
1247 /* If we find we're calling through a register, then we're actually
1248 calling through a descriptor, so load up the values. */
1249 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1254 /* ??? We are currently constrained to *not* use peep2, because
1255 we can legitimately change the global lifetime of the GP
1256 (in the form of killing where previously live). This is
1257 because a call through a descriptor doesn't use the previous
1258 value of the GP, while a direct call does, and we do not
1259 commit to either form until the split here.
1261 That said, this means that we lack precise life info for
1262 whether ADDR is dead after this call. This is not terribly
1263 important, since we can fix things up essentially for free
1264 with the POST_DEC below, but it's nice to not use it when we
1265 can immediately tell it's not necessary. */
1266 addr_dead_p = ((noreturn_p || sibcall_p
1267 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1269 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1271 /* Load the code address into scratch_b. */
1272 tmp = gen_rtx_POST_INC (Pmode, addr);
1273 tmp = gen_rtx_MEM (Pmode, tmp);
1274 emit_move_insn (scratch_r, tmp);
1275 emit_move_insn (scratch_b, scratch_r);
1277 /* Load the GP address. If ADDR is not dead here, then we must
1278 revert the change made above via the POST_INCREMENT. */
1280 tmp = gen_rtx_POST_DEC (Pmode, addr);
1283 tmp = gen_rtx_MEM (Pmode, tmp);
1284 emit_move_insn (pic_offset_table_rtx, tmp);
1291 insn = gen_sibcall_nogp (addr);
1293 insn = gen_call_value_nogp (retval, addr, retaddr);
1295 insn = gen_call_nogp (addr, retaddr);
1296 emit_call_insn (insn);
1298 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
1302 /* Begin the assembly file. */
1305 ia64_file_start (void)
1307 default_file_start ();
1308 emit_safe_across_calls ();
1312 emit_safe_across_calls (void)
1314 unsigned int rs, re;
1321 while (rs < 64 && call_used_regs[PR_REG (rs)])
1325 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
1329 fputs ("\t.pred.safe_across_calls ", asm_out_file);
1333 fputc (',', asm_out_file);
1335 fprintf (asm_out_file, "p%u", rs);
1337 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
1341 fputc ('\n', asm_out_file);
1344 /* Helper function for ia64_compute_frame_size: find an appropriate general
1345 register to spill some special register to. SPECIAL_SPILL_MASK contains
1346 bits in GR0 to GR31 that have already been allocated by this routine.
1347 TRY_LOCALS is true if we should attempt to locate a local regnum. */
1350 find_gr_spill (int try_locals)
1354 /* If this is a leaf function, first try an otherwise unused
1355 call-clobbered register. */
1356 if (current_function_is_leaf)
1358 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1359 if (! regs_ever_live[regno]
1360 && call_used_regs[regno]
1361 && ! fixed_regs[regno]
1362 && ! global_regs[regno]
1363 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1365 current_frame_info.gr_used_mask |= 1 << regno;
1372 regno = current_frame_info.n_local_regs;
1373 /* If there is a frame pointer, then we can't use loc79, because
1374 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
1375 reg_name switching code in ia64_expand_prologue. */
1376 if (regno < (80 - frame_pointer_needed))
1378 current_frame_info.n_local_regs = regno + 1;
1379 return LOC_REG (0) + regno;
1383 /* Failed to find a general register to spill to. Must use stack. */
1387 /* In order to make for nice schedules, we try to allocate every temporary
1388 to a different register. We must of course stay away from call-saved,
1389 fixed, and global registers. We must also stay away from registers
1390 allocated in current_frame_info.gr_used_mask, since those include regs
1391 used all through the prologue.
1393 Any register allocated here must be used immediately. The idea is to
1394 aid scheduling, not to solve data flow problems. */
1396 static int last_scratch_gr_reg;
1399 next_scratch_gr_reg (void)
1403 for (i = 0; i < 32; ++i)
1405 regno = (last_scratch_gr_reg + i + 1) & 31;
1406 if (call_used_regs[regno]
1407 && ! fixed_regs[regno]
1408 && ! global_regs[regno]
1409 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1411 last_scratch_gr_reg = regno;
1416 /* There must be _something_ available. */
1420 /* Helper function for ia64_compute_frame_size, called through
1421 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
1424 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
1426 unsigned int regno = REGNO (reg);
1429 unsigned int i, n = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1430 for (i = 0; i < n; ++i)
1431 current_frame_info.gr_used_mask |= 1 << (regno + i);
1435 /* Returns the number of bytes offset between the frame pointer and the stack
1436 pointer for the current function. SIZE is the number of bytes of space
1437 needed for local variables. */
1440 ia64_compute_frame_size (HOST_WIDE_INT size)
1442 HOST_WIDE_INT total_size;
1443 HOST_WIDE_INT spill_size = 0;
1444 HOST_WIDE_INT extra_spill_size = 0;
1445 HOST_WIDE_INT pretend_args_size;
1448 int spilled_gr_p = 0;
1449 int spilled_fr_p = 0;
1453 if (current_frame_info.initialized)
1456 memset (¤t_frame_info, 0, sizeof current_frame_info);
1457 CLEAR_HARD_REG_SET (mask);
1459 /* Don't allocate scratches to the return register. */
1460 diddle_return_value (mark_reg_gr_used_mask, NULL);
1462 /* Don't allocate scratches to the EH scratch registers. */
1463 if (cfun->machine->ia64_eh_epilogue_sp)
1464 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
1465 if (cfun->machine->ia64_eh_epilogue_bsp)
1466 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
1468 /* Find the size of the register stack frame. We have only 80 local
1469 registers, because we reserve 8 for the inputs and 8 for the
1472 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
1473 since we'll be adjusting that down later. */
1474 regno = LOC_REG (78) + ! frame_pointer_needed;
1475 for (; regno >= LOC_REG (0); regno--)
1476 if (regs_ever_live[regno])
1478 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
1480 /* For functions marked with the syscall_linkage attribute, we must mark
1481 all eight input registers as in use, so that locals aren't visible to
1484 if (cfun->machine->n_varargs > 0
1485 || lookup_attribute ("syscall_linkage",
1486 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
1487 current_frame_info.n_input_regs = 8;
1490 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
1491 if (regs_ever_live[regno])
1493 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
1496 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
1497 if (regs_ever_live[regno])
1499 i = regno - OUT_REG (0) + 1;
1501 /* When -p profiling, we need one output register for the mcount argument.
1502 Likewise for -a profiling for the bb_init_func argument. For -ax
1503 profiling, we need two output registers for the two bb_init_trace_func
1505 if (current_function_profile)
1507 current_frame_info.n_output_regs = i;
1509 /* ??? No rotating register support yet. */
1510 current_frame_info.n_rotate_regs = 0;
1512 /* Discover which registers need spilling, and how much room that
1513 will take. Begin with floating point and general registers,
1514 which will always wind up on the stack. */
1516 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
1517 if (regs_ever_live[regno] && ! call_used_regs[regno])
1519 SET_HARD_REG_BIT (mask, regno);
1525 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1526 if (regs_ever_live[regno] && ! call_used_regs[regno])
1528 SET_HARD_REG_BIT (mask, regno);
1534 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
1535 if (regs_ever_live[regno] && ! call_used_regs[regno])
1537 SET_HARD_REG_BIT (mask, regno);
1542 /* Now come all special registers that might get saved in other
1543 general registers. */
1545 if (frame_pointer_needed)
1547 current_frame_info.reg_fp = find_gr_spill (1);
1548 /* If we did not get a register, then we take LOC79. This is guaranteed
1549 to be free, even if regs_ever_live is already set, because this is
1550 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
1551 as we don't count loc79 above. */
1552 if (current_frame_info.reg_fp == 0)
1554 current_frame_info.reg_fp = LOC_REG (79);
1555 current_frame_info.n_local_regs++;
1559 if (! current_function_is_leaf)
1561 /* Emit a save of BR0 if we call other functions. Do this even
1562 if this function doesn't return, as EH depends on this to be
1563 able to unwind the stack. */
1564 SET_HARD_REG_BIT (mask, BR_REG (0));
1566 current_frame_info.reg_save_b0 = find_gr_spill (1);
1567 if (current_frame_info.reg_save_b0 == 0)
1573 /* Similarly for ar.pfs. */
1574 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
1575 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
1576 if (current_frame_info.reg_save_ar_pfs == 0)
1578 extra_spill_size += 8;
1582 /* Similarly for gp. Note that if we're calling setjmp, the stacked
1583 registers are clobbered, so we fall back to the stack. */
1584 current_frame_info.reg_save_gp
1585 = (current_function_calls_setjmp ? 0 : find_gr_spill (1));
1586 if (current_frame_info.reg_save_gp == 0)
1588 SET_HARD_REG_BIT (mask, GR_REG (1));
1595 if (regs_ever_live[BR_REG (0)] && ! call_used_regs[BR_REG (0)])
1597 SET_HARD_REG_BIT (mask, BR_REG (0));
1602 if (regs_ever_live[AR_PFS_REGNUM])
1604 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
1605 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
1606 if (current_frame_info.reg_save_ar_pfs == 0)
1608 extra_spill_size += 8;
1614 /* Unwind descriptor hackery: things are most efficient if we allocate
1615 consecutive GR save registers for RP, PFS, FP in that order. However,
1616 it is absolutely critical that FP get the only hard register that's
1617 guaranteed to be free, so we allocated it first. If all three did
1618 happen to be allocated hard regs, and are consecutive, rearrange them
1619 into the preferred order now. */
1620 if (current_frame_info.reg_fp != 0
1621 && current_frame_info.reg_save_b0 == current_frame_info.reg_fp + 1
1622 && current_frame_info.reg_save_ar_pfs == current_frame_info.reg_fp + 2)
1624 current_frame_info.reg_save_b0 = current_frame_info.reg_fp;
1625 current_frame_info.reg_save_ar_pfs = current_frame_info.reg_fp + 1;
1626 current_frame_info.reg_fp = current_frame_info.reg_fp + 2;
1629 /* See if we need to store the predicate register block. */
1630 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
1631 if (regs_ever_live[regno] && ! call_used_regs[regno])
1633 if (regno <= PR_REG (63))
1635 SET_HARD_REG_BIT (mask, PR_REG (0));
1636 current_frame_info.reg_save_pr = find_gr_spill (1);
1637 if (current_frame_info.reg_save_pr == 0)
1639 extra_spill_size += 8;
1643 /* ??? Mark them all as used so that register renaming and such
1644 are free to use them. */
1645 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
1646 regs_ever_live[regno] = 1;
1649 /* If we're forced to use st8.spill, we're forced to save and restore
1650 ar.unat as well. The check for existing liveness allows inline asm
1651 to touch ar.unat. */
1652 if (spilled_gr_p || cfun->machine->n_varargs
1653 || regs_ever_live[AR_UNAT_REGNUM])
1655 regs_ever_live[AR_UNAT_REGNUM] = 1;
1656 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
1657 current_frame_info.reg_save_ar_unat = find_gr_spill (spill_size == 0);
1658 if (current_frame_info.reg_save_ar_unat == 0)
1660 extra_spill_size += 8;
1665 if (regs_ever_live[AR_LC_REGNUM])
1667 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
1668 current_frame_info.reg_save_ar_lc = find_gr_spill (spill_size == 0);
1669 if (current_frame_info.reg_save_ar_lc == 0)
1671 extra_spill_size += 8;
1676 /* If we have an odd number of words of pretend arguments written to
1677 the stack, then the FR save area will be unaligned. We round the
1678 size of this area up to keep things 16 byte aligned. */
1680 pretend_args_size = IA64_STACK_ALIGN (current_function_pretend_args_size);
1682 pretend_args_size = current_function_pretend_args_size;
1684 total_size = (spill_size + extra_spill_size + size + pretend_args_size
1685 + current_function_outgoing_args_size);
1686 total_size = IA64_STACK_ALIGN (total_size);
1688 /* We always use the 16-byte scratch area provided by the caller, but
1689 if we are a leaf function, there's no one to which we need to provide
1691 if (current_function_is_leaf)
1692 total_size = MAX (0, total_size - 16);
1694 current_frame_info.total_size = total_size;
1695 current_frame_info.spill_cfa_off = pretend_args_size - 16;
1696 current_frame_info.spill_size = spill_size;
1697 current_frame_info.extra_spill_size = extra_spill_size;
1698 COPY_HARD_REG_SET (current_frame_info.mask, mask);
1699 current_frame_info.n_spilled = n_spilled;
1700 current_frame_info.initialized = reload_completed;
1703 /* Compute the initial difference between the specified pair of registers. */
1706 ia64_initial_elimination_offset (int from, int to)
1708 HOST_WIDE_INT offset;
1710 ia64_compute_frame_size (get_frame_size ());
1713 case FRAME_POINTER_REGNUM:
1714 if (to == HARD_FRAME_POINTER_REGNUM)
1716 if (current_function_is_leaf)
1717 offset = -current_frame_info.total_size;
1719 offset = -(current_frame_info.total_size
1720 - current_function_outgoing_args_size - 16);
1722 else if (to == STACK_POINTER_REGNUM)
1724 if (current_function_is_leaf)
1727 offset = 16 + current_function_outgoing_args_size;
1733 case ARG_POINTER_REGNUM:
1734 /* Arguments start above the 16 byte save area, unless stdarg
1735 in which case we store through the 16 byte save area. */
1736 if (to == HARD_FRAME_POINTER_REGNUM)
1737 offset = 16 - current_function_pretend_args_size;
1738 else if (to == STACK_POINTER_REGNUM)
1739 offset = (current_frame_info.total_size
1740 + 16 - current_function_pretend_args_size);
1752 /* If there are more than a trivial number of register spills, we use
1753 two interleaved iterators so that we can get two memory references
1756 In order to simplify things in the prologue and epilogue expanders,
1757 we use helper functions to fix up the memory references after the
1758 fact with the appropriate offsets to a POST_MODIFY memory mode.
1759 The following data structure tracks the state of the two iterators
1760 while insns are being emitted. */
1762 struct spill_fill_data
1764 rtx init_after; /* point at which to emit initializations */
1765 rtx init_reg[2]; /* initial base register */
1766 rtx iter_reg[2]; /* the iterator registers */
1767 rtx *prev_addr[2]; /* address of last memory use */
1768 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
1769 HOST_WIDE_INT prev_off[2]; /* last offset */
1770 int n_iter; /* number of iterators in use */
1771 int next_iter; /* next iterator to use */
1772 unsigned int save_gr_used_mask;
1775 static struct spill_fill_data spill_fill_data;
1778 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
1782 spill_fill_data.init_after = get_last_insn ();
1783 spill_fill_data.init_reg[0] = init_reg;
1784 spill_fill_data.init_reg[1] = init_reg;
1785 spill_fill_data.prev_addr[0] = NULL;
1786 spill_fill_data.prev_addr[1] = NULL;
1787 spill_fill_data.prev_insn[0] = NULL;
1788 spill_fill_data.prev_insn[1] = NULL;
1789 spill_fill_data.prev_off[0] = cfa_off;
1790 spill_fill_data.prev_off[1] = cfa_off;
1791 spill_fill_data.next_iter = 0;
1792 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
1794 spill_fill_data.n_iter = 1 + (n_spills > 2);
1795 for (i = 0; i < spill_fill_data.n_iter; ++i)
1797 int regno = next_scratch_gr_reg ();
1798 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
1799 current_frame_info.gr_used_mask |= 1 << regno;
1804 finish_spill_pointers (void)
1806 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
1810 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
1812 int iter = spill_fill_data.next_iter;
1813 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
1814 rtx disp_rtx = GEN_INT (disp);
1817 if (spill_fill_data.prev_addr[iter])
1819 if (CONST_OK_FOR_N (disp))
1821 *spill_fill_data.prev_addr[iter]
1822 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
1823 gen_rtx_PLUS (DImode,
1824 spill_fill_data.iter_reg[iter],
1826 REG_NOTES (spill_fill_data.prev_insn[iter])
1827 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
1828 REG_NOTES (spill_fill_data.prev_insn[iter]));
1832 /* ??? Could use register post_modify for loads. */
1833 if (! CONST_OK_FOR_I (disp))
1835 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
1836 emit_move_insn (tmp, disp_rtx);
1839 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
1840 spill_fill_data.iter_reg[iter], disp_rtx));
1843 /* Micro-optimization: if we've created a frame pointer, it's at
1844 CFA 0, which may allow the real iterator to be initialized lower,
1845 slightly increasing parallelism. Also, if there are few saves
1846 it may eliminate the iterator entirely. */
1848 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
1849 && frame_pointer_needed)
1851 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
1852 set_mem_alias_set (mem, get_varargs_alias_set ());
1860 seq = gen_movdi (spill_fill_data.iter_reg[iter],
1861 spill_fill_data.init_reg[iter]);
1866 if (! CONST_OK_FOR_I (disp))
1868 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
1869 emit_move_insn (tmp, disp_rtx);
1873 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
1874 spill_fill_data.init_reg[iter],
1881 /* Careful for being the first insn in a sequence. */
1882 if (spill_fill_data.init_after)
1883 insn = emit_insn_after (seq, spill_fill_data.init_after);
1886 rtx first = get_insns ();
1888 insn = emit_insn_before (seq, first);
1890 insn = emit_insn (seq);
1892 spill_fill_data.init_after = insn;
1894 /* If DISP is 0, we may or may not have a further adjustment
1895 afterward. If we do, then the load/store insn may be modified
1896 to be a post-modify. If we don't, then this copy may be
1897 eliminated by copyprop_hardreg_forward, which makes this
1898 insn garbage, which runs afoul of the sanity check in
1899 propagate_one_insn. So mark this insn as legal to delete. */
1901 REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx,
1905 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
1907 /* ??? Not all of the spills are for varargs, but some of them are.
1908 The rest of the spills belong in an alias set of their own. But
1909 it doesn't actually hurt to include them here. */
1910 set_mem_alias_set (mem, get_varargs_alias_set ());
1912 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
1913 spill_fill_data.prev_off[iter] = cfa_off;
1915 if (++iter >= spill_fill_data.n_iter)
1917 spill_fill_data.next_iter = iter;
1923 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
1926 int iter = spill_fill_data.next_iter;
1929 mem = spill_restore_mem (reg, cfa_off);
1930 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
1931 spill_fill_data.prev_insn[iter] = insn;
1938 RTX_FRAME_RELATED_P (insn) = 1;
1940 /* Don't even pretend that the unwind code can intuit its way
1941 through a pair of interleaved post_modify iterators. Just
1942 provide the correct answer. */
1944 if (frame_pointer_needed)
1946 base = hard_frame_pointer_rtx;
1951 base = stack_pointer_rtx;
1952 off = current_frame_info.total_size - cfa_off;
1956 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
1957 gen_rtx_SET (VOIDmode,
1958 gen_rtx_MEM (GET_MODE (reg),
1959 plus_constant (base, off)),
1966 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
1968 int iter = spill_fill_data.next_iter;
1971 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
1972 GEN_INT (cfa_off)));
1973 spill_fill_data.prev_insn[iter] = insn;
1976 /* Wrapper functions that discards the CONST_INT spill offset. These
1977 exist so that we can give gr_spill/gr_fill the offset they need and
1978 use a consistent function interface. */
1981 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
1983 return gen_movdi (dest, src);
1987 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
1989 return gen_fr_spill (dest, src);
1993 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
1995 return gen_fr_restore (dest, src);
1998 /* Called after register allocation to add any instructions needed for the
1999 prologue. Using a prologue insn is favored compared to putting all of the
2000 instructions in output_function_prologue(), since it allows the scheduler
2001 to intermix instructions with the saves of the caller saved registers. In
2002 some cases, it might be necessary to emit a barrier instruction as the last
2003 insn to prevent such scheduling.
2005 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2006 so that the debug info generation code can handle them properly.
2008 The register save area is layed out like so:
2010 [ varargs spill area ]
2011 [ fr register spill area ]
2012 [ br register spill area ]
2013 [ ar register spill area ]
2014 [ pr register spill area ]
2015 [ gr register spill area ] */
2017 /* ??? Get inefficient code when the frame size is larger than can fit in an
2018 adds instruction. */
2021 ia64_expand_prologue (void)
2023 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2024 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2027 ia64_compute_frame_size (get_frame_size ());
2028 last_scratch_gr_reg = 15;
2030 /* If there is no epilogue, then we don't need some prologue insns.
2031 We need to avoid emitting the dead prologue insns, because flow
2032 will complain about them. */
2038 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
2039 if ((e->flags & EDGE_FAKE) == 0
2040 && (e->flags & EDGE_FALLTHRU) != 0)
2042 epilogue_p = (e != NULL);
2047 /* Set the local, input, and output register names. We need to do this
2048 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2049 half. If we use in/loc/out register names, then we get assembler errors
2050 in crtn.S because there is no alloc insn or regstk directive in there. */
2051 if (! TARGET_REG_NAMES)
2053 int inputs = current_frame_info.n_input_regs;
2054 int locals = current_frame_info.n_local_regs;
2055 int outputs = current_frame_info.n_output_regs;
2057 for (i = 0; i < inputs; i++)
2058 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2059 for (i = 0; i < locals; i++)
2060 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2061 for (i = 0; i < outputs; i++)
2062 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2065 /* Set the frame pointer register name. The regnum is logically loc79,
2066 but of course we'll not have allocated that many locals. Rather than
2067 worrying about renumbering the existing rtxs, we adjust the name. */
2068 /* ??? This code means that we can never use one local register when
2069 there is a frame pointer. loc79 gets wasted in this case, as it is
2070 renamed to a register that will never be used. See also the try_locals
2071 code in find_gr_spill. */
2072 if (current_frame_info.reg_fp)
2074 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2075 reg_names[HARD_FRAME_POINTER_REGNUM]
2076 = reg_names[current_frame_info.reg_fp];
2077 reg_names[current_frame_info.reg_fp] = tmp;
2080 /* We don't need an alloc instruction if we've used no outputs or locals. */
2081 if (current_frame_info.n_local_regs == 0
2082 && current_frame_info.n_output_regs == 0
2083 && current_frame_info.n_input_regs <= current_function_args_info.int_regs
2084 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2086 /* If there is no alloc, but there are input registers used, then we
2087 need a .regstk directive. */
2088 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
2089 ar_pfs_save_reg = NULL_RTX;
2093 current_frame_info.need_regstk = 0;
2095 if (current_frame_info.reg_save_ar_pfs)
2096 regno = current_frame_info.reg_save_ar_pfs;
2098 regno = next_scratch_gr_reg ();
2099 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
2101 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
2102 GEN_INT (current_frame_info.n_input_regs),
2103 GEN_INT (current_frame_info.n_local_regs),
2104 GEN_INT (current_frame_info.n_output_regs),
2105 GEN_INT (current_frame_info.n_rotate_regs)));
2106 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_pfs != 0);
2109 /* Set up frame pointer, stack pointer, and spill iterators. */
2111 n_varargs = cfun->machine->n_varargs;
2112 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
2113 stack_pointer_rtx, 0);
2115 if (frame_pointer_needed)
2117 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
2118 RTX_FRAME_RELATED_P (insn) = 1;
2121 if (current_frame_info.total_size != 0)
2123 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
2126 if (CONST_OK_FOR_I (- current_frame_info.total_size))
2127 offset = frame_size_rtx;
2130 regno = next_scratch_gr_reg ();
2131 offset = gen_rtx_REG (DImode, regno);
2132 emit_move_insn (offset, frame_size_rtx);
2135 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
2136 stack_pointer_rtx, offset));
2138 if (! frame_pointer_needed)
2140 RTX_FRAME_RELATED_P (insn) = 1;
2141 if (GET_CODE (offset) != CONST_INT)
2144 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2145 gen_rtx_SET (VOIDmode,
2147 gen_rtx_PLUS (DImode,
2154 /* ??? At this point we must generate a magic insn that appears to
2155 modify the stack pointer, the frame pointer, and all spill
2156 iterators. This would allow the most scheduling freedom. For
2157 now, just hard stop. */
2158 emit_insn (gen_blockage ());
2161 /* Must copy out ar.unat before doing any integer spills. */
2162 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2164 if (current_frame_info.reg_save_ar_unat)
2166 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2169 alt_regno = next_scratch_gr_reg ();
2170 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2171 current_frame_info.gr_used_mask |= 1 << alt_regno;
2174 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2175 insn = emit_move_insn (ar_unat_save_reg, reg);
2176 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_unat != 0);
2178 /* Even if we're not going to generate an epilogue, we still
2179 need to save the register so that EH works. */
2180 if (! epilogue_p && current_frame_info.reg_save_ar_unat)
2181 emit_insn (gen_prologue_use (ar_unat_save_reg));
2184 ar_unat_save_reg = NULL_RTX;
2186 /* Spill all varargs registers. Do this before spilling any GR registers,
2187 since we want the UNAT bits for the GR registers to override the UNAT
2188 bits from varargs, which we don't care about. */
2191 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
2193 reg = gen_rtx_REG (DImode, regno);
2194 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
2197 /* Locate the bottom of the register save area. */
2198 cfa_off = (current_frame_info.spill_cfa_off
2199 + current_frame_info.spill_size
2200 + current_frame_info.extra_spill_size);
2202 /* Save the predicate register block either in a register or in memory. */
2203 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2205 reg = gen_rtx_REG (DImode, PR_REG (0));
2206 if (current_frame_info.reg_save_pr != 0)
2208 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2209 insn = emit_move_insn (alt_reg, reg);
2211 /* ??? Denote pr spill/fill by a DImode move that modifies all
2212 64 hard registers. */
2213 RTX_FRAME_RELATED_P (insn) = 1;
2215 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2216 gen_rtx_SET (VOIDmode, alt_reg, reg),
2219 /* Even if we're not going to generate an epilogue, we still
2220 need to save the register so that EH works. */
2222 emit_insn (gen_prologue_use (alt_reg));
2226 alt_regno = next_scratch_gr_reg ();
2227 alt_reg = gen_rtx_REG (DImode, alt_regno);
2228 insn = emit_move_insn (alt_reg, reg);
2229 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2234 /* Handle AR regs in numerical order. All of them get special handling. */
2235 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
2236 && current_frame_info.reg_save_ar_unat == 0)
2238 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2239 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
2243 /* The alloc insn already copied ar.pfs into a general register. The
2244 only thing we have to do now is copy that register to a stack slot
2245 if we'd not allocated a local register for the job. */
2246 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
2247 && current_frame_info.reg_save_ar_pfs == 0)
2249 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2250 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
2254 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2256 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2257 if (current_frame_info.reg_save_ar_lc != 0)
2259 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2260 insn = emit_move_insn (alt_reg, reg);
2261 RTX_FRAME_RELATED_P (insn) = 1;
2263 /* Even if we're not going to generate an epilogue, we still
2264 need to save the register so that EH works. */
2266 emit_insn (gen_prologue_use (alt_reg));
2270 alt_regno = next_scratch_gr_reg ();
2271 alt_reg = gen_rtx_REG (DImode, alt_regno);
2272 emit_move_insn (alt_reg, reg);
2273 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2278 if (current_frame_info.reg_save_gp)
2280 insn = emit_move_insn (gen_rtx_REG (DImode,
2281 current_frame_info.reg_save_gp),
2282 pic_offset_table_rtx);
2283 /* We don't know for sure yet if this is actually needed, since
2284 we've not split the PIC call patterns. If all of the calls
2285 are indirect, and not followed by any uses of the gp, then
2286 this save is dead. Allow it to go away. */
2288 = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, REG_NOTES (insn));
2291 /* We should now be at the base of the gr/br/fr spill area. */
2292 if (cfa_off != (current_frame_info.spill_cfa_off
2293 + current_frame_info.spill_size))
2296 /* Spill all general registers. */
2297 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
2298 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2300 reg = gen_rtx_REG (DImode, regno);
2301 do_spill (gen_gr_spill, reg, cfa_off, reg);
2305 /* Handle BR0 specially -- it may be getting stored permanently in
2306 some GR register. */
2307 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2309 reg = gen_rtx_REG (DImode, BR_REG (0));
2310 if (current_frame_info.reg_save_b0 != 0)
2312 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2313 insn = emit_move_insn (alt_reg, reg);
2314 RTX_FRAME_RELATED_P (insn) = 1;
2316 /* Even if we're not going to generate an epilogue, we still
2317 need to save the register so that EH works. */
2319 emit_insn (gen_prologue_use (alt_reg));
2323 alt_regno = next_scratch_gr_reg ();
2324 alt_reg = gen_rtx_REG (DImode, alt_regno);
2325 emit_move_insn (alt_reg, reg);
2326 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2331 /* Spill the rest of the BR registers. */
2332 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2333 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2335 alt_regno = next_scratch_gr_reg ();
2336 alt_reg = gen_rtx_REG (DImode, alt_regno);
2337 reg = gen_rtx_REG (DImode, regno);
2338 emit_move_insn (alt_reg, reg);
2339 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2343 /* Align the frame and spill all FR registers. */
2344 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2345 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2349 reg = gen_rtx_REG (XFmode, regno);
2350 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
2354 if (cfa_off != current_frame_info.spill_cfa_off)
2357 finish_spill_pointers ();
2360 /* Called after register allocation to add any instructions needed for the
2361 epilogue. Using an epilogue insn is favored compared to putting all of the
2362 instructions in output_function_prologue(), since it allows the scheduler
2363 to intermix instructions with the saves of the caller saved registers. In
2364 some cases, it might be necessary to emit a barrier instruction as the last
2365 insn to prevent such scheduling. */
2368 ia64_expand_epilogue (int sibcall_p)
2370 rtx insn, reg, alt_reg, ar_unat_save_reg;
2371 int regno, alt_regno, cfa_off;
2373 ia64_compute_frame_size (get_frame_size ());
2375 /* If there is a frame pointer, then we use it instead of the stack
2376 pointer, so that the stack pointer does not need to be valid when
2377 the epilogue starts. See EXIT_IGNORE_STACK. */
2378 if (frame_pointer_needed)
2379 setup_spill_pointers (current_frame_info.n_spilled,
2380 hard_frame_pointer_rtx, 0);
2382 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
2383 current_frame_info.total_size);
2385 if (current_frame_info.total_size != 0)
2387 /* ??? At this point we must generate a magic insn that appears to
2388 modify the spill iterators and the frame pointer. This would
2389 allow the most scheduling freedom. For now, just hard stop. */
2390 emit_insn (gen_blockage ());
2393 /* Locate the bottom of the register save area. */
2394 cfa_off = (current_frame_info.spill_cfa_off
2395 + current_frame_info.spill_size
2396 + current_frame_info.extra_spill_size);
2398 /* Restore the predicate registers. */
2399 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2401 if (current_frame_info.reg_save_pr != 0)
2402 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2405 alt_regno = next_scratch_gr_reg ();
2406 alt_reg = gen_rtx_REG (DImode, alt_regno);
2407 do_restore (gen_movdi_x, alt_reg, cfa_off);
2410 reg = gen_rtx_REG (DImode, PR_REG (0));
2411 emit_move_insn (reg, alt_reg);
2414 /* Restore the application registers. */
2416 /* Load the saved unat from the stack, but do not restore it until
2417 after the GRs have been restored. */
2418 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2420 if (current_frame_info.reg_save_ar_unat != 0)
2422 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2425 alt_regno = next_scratch_gr_reg ();
2426 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2427 current_frame_info.gr_used_mask |= 1 << alt_regno;
2428 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
2433 ar_unat_save_reg = NULL_RTX;
2435 if (current_frame_info.reg_save_ar_pfs != 0)
2437 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_pfs);
2438 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2439 emit_move_insn (reg, alt_reg);
2441 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2443 alt_regno = next_scratch_gr_reg ();
2444 alt_reg = gen_rtx_REG (DImode, alt_regno);
2445 do_restore (gen_movdi_x, alt_reg, cfa_off);
2447 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2448 emit_move_insn (reg, alt_reg);
2451 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2453 if (current_frame_info.reg_save_ar_lc != 0)
2454 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2457 alt_regno = next_scratch_gr_reg ();
2458 alt_reg = gen_rtx_REG (DImode, alt_regno);
2459 do_restore (gen_movdi_x, alt_reg, cfa_off);
2462 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2463 emit_move_insn (reg, alt_reg);
2466 /* We should now be at the base of the gr/br/fr spill area. */
2467 if (cfa_off != (current_frame_info.spill_cfa_off
2468 + current_frame_info.spill_size))
2471 /* The GP may be stored on the stack in the prologue, but it's
2472 never restored in the epilogue. Skip the stack slot. */
2473 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
2476 /* Restore all general registers. */
2477 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
2478 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2480 reg = gen_rtx_REG (DImode, regno);
2481 do_restore (gen_gr_restore, reg, cfa_off);
2485 /* Restore the branch registers. Handle B0 specially, as it may
2486 have gotten stored in some GR register. */
2487 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2489 if (current_frame_info.reg_save_b0 != 0)
2490 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2493 alt_regno = next_scratch_gr_reg ();
2494 alt_reg = gen_rtx_REG (DImode, alt_regno);
2495 do_restore (gen_movdi_x, alt_reg, cfa_off);
2498 reg = gen_rtx_REG (DImode, BR_REG (0));
2499 emit_move_insn (reg, alt_reg);
2502 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2503 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2505 alt_regno = next_scratch_gr_reg ();
2506 alt_reg = gen_rtx_REG (DImode, alt_regno);
2507 do_restore (gen_movdi_x, alt_reg, cfa_off);
2509 reg = gen_rtx_REG (DImode, regno);
2510 emit_move_insn (reg, alt_reg);
2513 /* Restore floating point registers. */
2514 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2515 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2519 reg = gen_rtx_REG (XFmode, regno);
2520 do_restore (gen_fr_restore_x, reg, cfa_off);
2524 /* Restore ar.unat for real. */
2525 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2527 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2528 emit_move_insn (reg, ar_unat_save_reg);
2531 if (cfa_off != current_frame_info.spill_cfa_off)
2534 finish_spill_pointers ();
2536 if (current_frame_info.total_size || cfun->machine->ia64_eh_epilogue_sp)
2538 /* ??? At this point we must generate a magic insn that appears to
2539 modify the spill iterators, the stack pointer, and the frame
2540 pointer. This would allow the most scheduling freedom. For now,
2542 emit_insn (gen_blockage ());
2545 if (cfun->machine->ia64_eh_epilogue_sp)
2546 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
2547 else if (frame_pointer_needed)
2549 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
2550 RTX_FRAME_RELATED_P (insn) = 1;
2552 else if (current_frame_info.total_size)
2554 rtx offset, frame_size_rtx;
2556 frame_size_rtx = GEN_INT (current_frame_info.total_size);
2557 if (CONST_OK_FOR_I (current_frame_info.total_size))
2558 offset = frame_size_rtx;
2561 regno = next_scratch_gr_reg ();
2562 offset = gen_rtx_REG (DImode, regno);
2563 emit_move_insn (offset, frame_size_rtx);
2566 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
2569 RTX_FRAME_RELATED_P (insn) = 1;
2570 if (GET_CODE (offset) != CONST_INT)
2573 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2574 gen_rtx_SET (VOIDmode,
2576 gen_rtx_PLUS (DImode,
2583 if (cfun->machine->ia64_eh_epilogue_bsp)
2584 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
2587 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
2590 int fp = GR_REG (2);
2591 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
2592 first available call clobbered register. If there was a frame_pointer
2593 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
2594 so we have to make sure we're using the string "r2" when emitting
2595 the register name for the assembler. */
2596 if (current_frame_info.reg_fp && current_frame_info.reg_fp == GR_REG (2))
2597 fp = HARD_FRAME_POINTER_REGNUM;
2599 /* We must emit an alloc to force the input registers to become output
2600 registers. Otherwise, if the callee tries to pass its parameters
2601 through to another call without an intervening alloc, then these
2603 /* ??? We don't need to preserve all input registers. We only need to
2604 preserve those input registers used as arguments to the sibling call.
2605 It is unclear how to compute that number here. */
2606 if (current_frame_info.n_input_regs != 0)
2608 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
2609 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
2610 const0_rtx, const0_rtx,
2611 n_inputs, const0_rtx));
2612 RTX_FRAME_RELATED_P (insn) = 1;
2617 /* Return 1 if br.ret can do all the work required to return from a
2621 ia64_direct_return (void)
2623 if (reload_completed && ! frame_pointer_needed)
2625 ia64_compute_frame_size (get_frame_size ());
2627 return (current_frame_info.total_size == 0
2628 && current_frame_info.n_spilled == 0
2629 && current_frame_info.reg_save_b0 == 0
2630 && current_frame_info.reg_save_pr == 0
2631 && current_frame_info.reg_save_ar_pfs == 0
2632 && current_frame_info.reg_save_ar_unat == 0
2633 && current_frame_info.reg_save_ar_lc == 0);
2638 /* Return the magic cookie that we use to hold the return address
2639 during early compilation. */
2642 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
2646 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
2649 /* Split this value after reload, now that we know where the return
2650 address is saved. */
2653 ia64_split_return_addr_rtx (rtx dest)
2657 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2659 if (current_frame_info.reg_save_b0 != 0)
2660 src = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2666 /* Compute offset from CFA for BR0. */
2667 /* ??? Must be kept in sync with ia64_expand_prologue. */
2668 off = (current_frame_info.spill_cfa_off
2669 + current_frame_info.spill_size);
2670 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
2671 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2674 /* Convert CFA offset to a register based offset. */
2675 if (frame_pointer_needed)
2676 src = hard_frame_pointer_rtx;
2679 src = stack_pointer_rtx;
2680 off += current_frame_info.total_size;
2683 /* Load address into scratch register. */
2684 if (CONST_OK_FOR_I (off))
2685 emit_insn (gen_adddi3 (dest, src, GEN_INT (off)));
2688 emit_move_insn (dest, GEN_INT (off));
2689 emit_insn (gen_adddi3 (dest, src, dest));
2692 src = gen_rtx_MEM (Pmode, dest);
2696 src = gen_rtx_REG (DImode, BR_REG (0));
2698 emit_move_insn (dest, src);
2702 ia64_hard_regno_rename_ok (int from, int to)
2704 /* Don't clobber any of the registers we reserved for the prologue. */
2705 if (to == current_frame_info.reg_fp
2706 || to == current_frame_info.reg_save_b0
2707 || to == current_frame_info.reg_save_pr
2708 || to == current_frame_info.reg_save_ar_pfs
2709 || to == current_frame_info.reg_save_ar_unat
2710 || to == current_frame_info.reg_save_ar_lc)
2713 if (from == current_frame_info.reg_fp
2714 || from == current_frame_info.reg_save_b0
2715 || from == current_frame_info.reg_save_pr
2716 || from == current_frame_info.reg_save_ar_pfs
2717 || from == current_frame_info.reg_save_ar_unat
2718 || from == current_frame_info.reg_save_ar_lc)
2721 /* Don't use output registers outside the register frame. */
2722 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
2725 /* Retain even/oddness on predicate register pairs. */
2726 if (PR_REGNO_P (from) && PR_REGNO_P (to))
2727 return (from & 1) == (to & 1);
2732 /* Target hook for assembling integer objects. Handle word-sized
2733 aligned objects and detect the cases when @fptr is needed. */
2736 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
2738 if (size == POINTER_SIZE / BITS_PER_UNIT
2740 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
2741 && GET_CODE (x) == SYMBOL_REF
2742 && SYMBOL_REF_FUNCTION_P (x))
2744 if (POINTER_SIZE == 32)
2745 fputs ("\tdata4\t@fptr(", asm_out_file);
2747 fputs ("\tdata8\t@fptr(", asm_out_file);
2748 output_addr_const (asm_out_file, x);
2749 fputs (")\n", asm_out_file);
2752 return default_assemble_integer (x, size, aligned_p);
2755 /* Emit the function prologue. */
2758 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
2760 int mask, grsave, grsave_prev;
2762 if (current_frame_info.need_regstk)
2763 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
2764 current_frame_info.n_input_regs,
2765 current_frame_info.n_local_regs,
2766 current_frame_info.n_output_regs,
2767 current_frame_info.n_rotate_regs);
2769 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
2772 /* Emit the .prologue directive. */
2775 grsave = grsave_prev = 0;
2776 if (current_frame_info.reg_save_b0 != 0)
2779 grsave = grsave_prev = current_frame_info.reg_save_b0;
2781 if (current_frame_info.reg_save_ar_pfs != 0
2782 && (grsave_prev == 0
2783 || current_frame_info.reg_save_ar_pfs == grsave_prev + 1))
2786 if (grsave_prev == 0)
2787 grsave = current_frame_info.reg_save_ar_pfs;
2788 grsave_prev = current_frame_info.reg_save_ar_pfs;
2790 if (current_frame_info.reg_fp != 0
2791 && (grsave_prev == 0
2792 || current_frame_info.reg_fp == grsave_prev + 1))
2795 if (grsave_prev == 0)
2796 grsave = HARD_FRAME_POINTER_REGNUM;
2797 grsave_prev = current_frame_info.reg_fp;
2799 if (current_frame_info.reg_save_pr != 0
2800 && (grsave_prev == 0
2801 || current_frame_info.reg_save_pr == grsave_prev + 1))
2804 if (grsave_prev == 0)
2805 grsave = current_frame_info.reg_save_pr;
2808 if (mask && TARGET_GNU_AS)
2809 fprintf (file, "\t.prologue %d, %d\n", mask,
2810 ia64_dbx_register_number (grsave));
2812 fputs ("\t.prologue\n", file);
2814 /* Emit a .spill directive, if necessary, to relocate the base of
2815 the register spill area. */
2816 if (current_frame_info.spill_cfa_off != -16)
2817 fprintf (file, "\t.spill %ld\n",
2818 (long) (current_frame_info.spill_cfa_off
2819 + current_frame_info.spill_size));
2822 /* Emit the .body directive at the scheduled end of the prologue. */
2825 ia64_output_function_end_prologue (FILE *file)
2827 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
2830 fputs ("\t.body\n", file);
2833 /* Emit the function epilogue. */
2836 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
2837 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
2841 if (current_frame_info.reg_fp)
2843 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2844 reg_names[HARD_FRAME_POINTER_REGNUM]
2845 = reg_names[current_frame_info.reg_fp];
2846 reg_names[current_frame_info.reg_fp] = tmp;
2848 if (! TARGET_REG_NAMES)
2850 for (i = 0; i < current_frame_info.n_input_regs; i++)
2851 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
2852 for (i = 0; i < current_frame_info.n_local_regs; i++)
2853 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
2854 for (i = 0; i < current_frame_info.n_output_regs; i++)
2855 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
2858 current_frame_info.initialized = 0;
2862 ia64_dbx_register_number (int regno)
2864 /* In ia64_expand_prologue we quite literally renamed the frame pointer
2865 from its home at loc79 to something inside the register frame. We
2866 must perform the same renumbering here for the debug info. */
2867 if (current_frame_info.reg_fp)
2869 if (regno == HARD_FRAME_POINTER_REGNUM)
2870 regno = current_frame_info.reg_fp;
2871 else if (regno == current_frame_info.reg_fp)
2872 regno = HARD_FRAME_POINTER_REGNUM;
2875 if (IN_REGNO_P (regno))
2876 return 32 + regno - IN_REG (0);
2877 else if (LOC_REGNO_P (regno))
2878 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
2879 else if (OUT_REGNO_P (regno))
2880 return (32 + current_frame_info.n_input_regs
2881 + current_frame_info.n_local_regs + regno - OUT_REG (0));
2887 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
2889 rtx addr_reg, eight = GEN_INT (8);
2891 /* The Intel assembler requires that the global __ia64_trampoline symbol
2892 be declared explicitly */
2895 static bool declared_ia64_trampoline = false;
2897 if (!declared_ia64_trampoline)
2899 declared_ia64_trampoline = true;
2900 (*targetm.asm_out.globalize_label) (asm_out_file,
2901 "__ia64_trampoline");
2905 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
2906 addr = convert_memory_address (Pmode, addr);
2907 fnaddr = convert_memory_address (Pmode, fnaddr);
2908 static_chain = convert_memory_address (Pmode, static_chain);
2910 /* Load up our iterator. */
2911 addr_reg = gen_reg_rtx (Pmode);
2912 emit_move_insn (addr_reg, addr);
2914 /* The first two words are the fake descriptor:
2915 __ia64_trampoline, ADDR+16. */
2916 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
2917 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
2918 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
2920 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
2921 copy_to_reg (plus_constant (addr, 16)));
2922 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
2924 /* The third word is the target descriptor. */
2925 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
2926 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
2928 /* The fourth word is the static chain. */
2929 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
2932 /* Do any needed setup for a variadic function. CUM has not been updated
2933 for the last named argument which has type TYPE and mode MODE.
2935 We generate the actual spill instructions during prologue generation. */
2938 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
2939 tree type, int * pretend_size,
2940 int second_time ATTRIBUTE_UNUSED)
2942 CUMULATIVE_ARGS next_cum = *cum;
2944 /* Skip the current argument. */
2945 ia64_function_arg_advance (&next_cum, mode, type, 1);
2947 if (next_cum.words < MAX_ARGUMENT_SLOTS)
2949 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
2950 *pretend_size = n * UNITS_PER_WORD;
2951 cfun->machine->n_varargs = n;
2955 /* Check whether TYPE is a homogeneous floating point aggregate. If
2956 it is, return the mode of the floating point type that appears
2957 in all leafs. If it is not, return VOIDmode.
2959 An aggregate is a homogeneous floating point aggregate is if all
2960 fields/elements in it have the same floating point type (e.g,
2961 SFmode). 128-bit quad-precision floats are excluded. */
2963 static enum machine_mode
2964 hfa_element_mode (tree type, int nested)
2966 enum machine_mode element_mode = VOIDmode;
2967 enum machine_mode mode;
2968 enum tree_code code = TREE_CODE (type);
2969 int know_element_mode = 0;
2974 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
2975 case BOOLEAN_TYPE: case CHAR_TYPE: case POINTER_TYPE:
2976 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
2977 case FILE_TYPE: case SET_TYPE: case LANG_TYPE:
2981 /* Fortran complex types are supposed to be HFAs, so we need to handle
2982 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
2985 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
2986 && TYPE_MODE (type) != TCmode)
2987 return GET_MODE_INNER (TYPE_MODE (type));
2992 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
2993 mode if this is contained within an aggregate. */
2994 if (nested && TYPE_MODE (type) != TFmode)
2995 return TYPE_MODE (type);
3000 return hfa_element_mode (TREE_TYPE (type), 1);
3004 case QUAL_UNION_TYPE:
3005 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3007 if (TREE_CODE (t) != FIELD_DECL)
3010 mode = hfa_element_mode (TREE_TYPE (t), 1);
3011 if (know_element_mode)
3013 if (mode != element_mode)
3016 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3020 know_element_mode = 1;
3021 element_mode = mode;
3024 return element_mode;
3027 /* If we reach here, we probably have some front-end specific type
3028 that the backend doesn't know about. This can happen via the
3029 aggregate_value_p call in init_function_start. All we can do is
3030 ignore unknown tree types. */
3037 /* Return the number of words required to hold a quantity of TYPE and MODE
3038 when passed as an argument. */
3040 ia64_function_arg_words (tree type, enum machine_mode mode)
3044 if (mode == BLKmode)
3045 words = int_size_in_bytes (type);
3047 words = GET_MODE_SIZE (mode);
3049 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
3052 /* Return the number of registers that should be skipped so the current
3053 argument (described by TYPE and WORDS) will be properly aligned.
3055 Integer and float arguments larger than 8 bytes start at the next
3056 even boundary. Aggregates larger than 8 bytes start at the next
3057 even boundary if the aggregate has 16 byte alignment. Note that
3058 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
3059 but are still to be aligned in registers.
3061 ??? The ABI does not specify how to handle aggregates with
3062 alignment from 9 to 15 bytes, or greater than 16. We handle them
3063 all as if they had 16 byte alignment. Such aggregates can occur
3064 only if gcc extensions are used. */
3066 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
3068 if ((cum->words & 1) == 0)
3072 && TREE_CODE (type) != INTEGER_TYPE
3073 && TREE_CODE (type) != REAL_TYPE)
3074 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
3079 /* Return rtx for register where argument is passed, or zero if it is passed
3081 /* ??? 128-bit quad-precision floats are always passed in general
3085 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
3086 int named, int incoming)
3088 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
3089 int words = ia64_function_arg_words (type, mode);
3090 int offset = ia64_function_arg_offset (cum, type, words);
3091 enum machine_mode hfa_mode = VOIDmode;
3093 /* If all argument slots are used, then it must go on the stack. */
3094 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3097 /* Check for and handle homogeneous FP aggregates. */
3099 hfa_mode = hfa_element_mode (type, 0);
3101 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3102 and unprototyped hfas are passed specially. */
3103 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3107 int fp_regs = cum->fp_regs;
3108 int int_regs = cum->words + offset;
3109 int hfa_size = GET_MODE_SIZE (hfa_mode);
3113 /* If prototyped, pass it in FR regs then GR regs.
3114 If not prototyped, pass it in both FR and GR regs.
3116 If this is an SFmode aggregate, then it is possible to run out of
3117 FR regs while GR regs are still left. In that case, we pass the
3118 remaining part in the GR regs. */
3120 /* Fill the FP regs. We do this always. We stop if we reach the end
3121 of the argument, the last FP register, or the last argument slot. */
3123 byte_size = ((mode == BLKmode)
3124 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3125 args_byte_size = int_regs * UNITS_PER_WORD;
3127 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3128 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
3130 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3131 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
3135 args_byte_size += hfa_size;
3139 /* If no prototype, then the whole thing must go in GR regs. */
3140 if (! cum->prototype)
3142 /* If this is an SFmode aggregate, then we might have some left over
3143 that needs to go in GR regs. */
3144 else if (byte_size != offset)
3145 int_regs += offset / UNITS_PER_WORD;
3147 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
3149 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
3151 enum machine_mode gr_mode = DImode;
3152 unsigned int gr_size;
3154 /* If we have an odd 4 byte hunk because we ran out of FR regs,
3155 then this goes in a GR reg left adjusted/little endian, right
3156 adjusted/big endian. */
3157 /* ??? Currently this is handled wrong, because 4-byte hunks are
3158 always right adjusted/little endian. */
3161 /* If we have an even 4 byte hunk because the aggregate is a
3162 multiple of 4 bytes in size, then this goes in a GR reg right
3163 adjusted/little endian. */
3164 else if (byte_size - offset == 4)
3167 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3168 gen_rtx_REG (gr_mode, (basereg
3172 gr_size = GET_MODE_SIZE (gr_mode);
3174 if (gr_size == UNITS_PER_WORD
3175 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
3177 else if (gr_size > UNITS_PER_WORD)
3178 int_regs += gr_size / UNITS_PER_WORD;
3180 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3183 /* Integral and aggregates go in general registers. If we have run out of
3184 FR registers, then FP values must also go in general registers. This can
3185 happen when we have a SFmode HFA. */
3186 else if (mode == TFmode || mode == TCmode
3187 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
3189 int byte_size = ((mode == BLKmode)
3190 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3191 if (BYTES_BIG_ENDIAN
3192 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
3193 && byte_size < UNITS_PER_WORD
3196 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3197 gen_rtx_REG (DImode,
3198 (basereg + cum->words
3201 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
3204 return gen_rtx_REG (mode, basereg + cum->words + offset);
3208 /* If there is a prototype, then FP values go in a FR register when
3209 named, and in a GR register when unnamed. */
3210 else if (cum->prototype)
3213 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
3214 /* In big-endian mode, an anonymous SFmode value must be represented
3215 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
3216 the value into the high half of the general register. */
3217 else if (BYTES_BIG_ENDIAN && mode == SFmode)
3218 return gen_rtx_PARALLEL (mode,
3220 gen_rtx_EXPR_LIST (VOIDmode,
3221 gen_rtx_REG (DImode, basereg + cum->words + offset),
3224 return gen_rtx_REG (mode, basereg + cum->words + offset);
3226 /* If there is no prototype, then FP values go in both FR and GR
3230 /* See comment above. */
3231 enum machine_mode inner_mode =
3232 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
3234 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
3235 gen_rtx_REG (mode, (FR_ARG_FIRST
3238 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3239 gen_rtx_REG (inner_mode,
3240 (basereg + cum->words
3244 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
3248 /* Return number of words, at the beginning of the argument, that must be
3249 put in registers. 0 is the argument is entirely in registers or entirely
3253 ia64_function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3254 tree type, int named ATTRIBUTE_UNUSED)
3256 int words = ia64_function_arg_words (type, mode);
3257 int offset = ia64_function_arg_offset (cum, type, words);
3259 /* If all argument slots are used, then it must go on the stack. */
3260 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3263 /* It doesn't matter whether the argument goes in FR or GR regs. If
3264 it fits within the 8 argument slots, then it goes entirely in
3265 registers. If it extends past the last argument slot, then the rest
3266 goes on the stack. */
3268 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
3271 return MAX_ARGUMENT_SLOTS - cum->words - offset;
3274 /* Update CUM to point after this argument. This is patterned after
3275 ia64_function_arg. */
3278 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3279 tree type, int named)
3281 int words = ia64_function_arg_words (type, mode);
3282 int offset = ia64_function_arg_offset (cum, type, words);
3283 enum machine_mode hfa_mode = VOIDmode;
3285 /* If all arg slots are already full, then there is nothing to do. */
3286 if (cum->words >= MAX_ARGUMENT_SLOTS)
3289 cum->words += words + offset;
3291 /* Check for and handle homogeneous FP aggregates. */
3293 hfa_mode = hfa_element_mode (type, 0);
3295 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3296 and unprototyped hfas are passed specially. */
3297 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3299 int fp_regs = cum->fp_regs;
3300 /* This is the original value of cum->words + offset. */
3301 int int_regs = cum->words - words;
3302 int hfa_size = GET_MODE_SIZE (hfa_mode);
3306 /* If prototyped, pass it in FR regs then GR regs.
3307 If not prototyped, pass it in both FR and GR regs.
3309 If this is an SFmode aggregate, then it is possible to run out of
3310 FR regs while GR regs are still left. In that case, we pass the
3311 remaining part in the GR regs. */
3313 /* Fill the FP regs. We do this always. We stop if we reach the end
3314 of the argument, the last FP register, or the last argument slot. */
3316 byte_size = ((mode == BLKmode)
3317 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3318 args_byte_size = int_regs * UNITS_PER_WORD;
3320 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3321 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
3324 args_byte_size += hfa_size;
3328 cum->fp_regs = fp_regs;
3331 /* Integral and aggregates go in general registers. So do TFmode FP values.
3332 If we have run out of FR registers, then other FP values must also go in
3333 general registers. This can happen when we have a SFmode HFA. */
3334 else if (mode == TFmode || mode == TCmode
3335 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
3336 cum->int_regs = cum->words;
3338 /* If there is a prototype, then FP values go in a FR register when
3339 named, and in a GR register when unnamed. */
3340 else if (cum->prototype)
3343 cum->int_regs = cum->words;
3345 /* ??? Complex types should not reach here. */
3346 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3348 /* If there is no prototype, then FP values go in both FR and GR
3352 /* ??? Complex types should not reach here. */
3353 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3354 cum->int_regs = cum->words;
3358 /* Arguments with alignment larger than 8 bytes start at the next even
3359 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
3360 even though their normal alignment is 8 bytes. See ia64_function_arg. */
3363 ia64_function_arg_boundary (enum machine_mode mode, tree type)
3366 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
3367 return PARM_BOUNDARY * 2;
3371 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
3372 return PARM_BOUNDARY * 2;
3374 return PARM_BOUNDARY;
3377 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
3378 return PARM_BOUNDARY * 2;
3380 return PARM_BOUNDARY;
3383 /* Variable sized types are passed by reference. */
3384 /* ??? At present this is a GCC extension to the IA-64 ABI. */
3387 ia64_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3388 enum machine_mode mode ATTRIBUTE_UNUSED,
3389 tree type, bool named ATTRIBUTE_UNUSED)
3391 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
3394 /* True if it is OK to do sibling call optimization for the specified
3395 call expression EXP. DECL will be the called function, or NULL if
3396 this is an indirect call. */
3398 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3400 /* We can't perform a sibcall if the current function has the syscall_linkage
3402 if (lookup_attribute ("syscall_linkage",
3403 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
3406 /* We must always return with our current GP. This means we can
3407 only sibcall to functions defined in the current module. */
3408 return decl && (*targetm.binds_local_p) (decl);
3412 /* Implement va_arg. */
3415 ia64_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
3417 /* Variable sized types are passed by reference. */
3418 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
3420 tree ptrtype = build_pointer_type (type);
3421 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
3422 return build_va_arg_indirect_ref (addr);
3425 /* Aggregate arguments with alignment larger than 8 bytes start at
3426 the next even boundary. Integer and floating point arguments
3427 do so if they are larger than 8 bytes, whether or not they are
3428 also aligned larger than 8 bytes. */
3429 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
3430 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
3432 tree t = build (PLUS_EXPR, TREE_TYPE (valist), valist,
3433 build_int_cst (NULL_TREE, 2 * UNITS_PER_WORD - 1));
3434 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
3435 build_int_cst (NULL_TREE, -2 * UNITS_PER_WORD));
3436 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
3437 gimplify_and_add (t, pre_p);
3440 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3443 /* Return 1 if function return value returned in memory. Return 0 if it is
3447 ia64_return_in_memory (tree valtype, tree fntype ATTRIBUTE_UNUSED)
3449 enum machine_mode mode;
3450 enum machine_mode hfa_mode;
3451 HOST_WIDE_INT byte_size;
3453 mode = TYPE_MODE (valtype);
3454 byte_size = GET_MODE_SIZE (mode);
3455 if (mode == BLKmode)
3457 byte_size = int_size_in_bytes (valtype);
3462 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
3464 hfa_mode = hfa_element_mode (valtype, 0);
3465 if (hfa_mode != VOIDmode)
3467 int hfa_size = GET_MODE_SIZE (hfa_mode);
3469 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
3474 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
3480 /* Return rtx for register that holds the function return value. */
3483 ia64_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
3485 enum machine_mode mode;
3486 enum machine_mode hfa_mode;
3488 mode = TYPE_MODE (valtype);
3489 hfa_mode = hfa_element_mode (valtype, 0);
3491 if (hfa_mode != VOIDmode)
3499 hfa_size = GET_MODE_SIZE (hfa_mode);
3500 byte_size = ((mode == BLKmode)
3501 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
3503 for (i = 0; offset < byte_size; i++)
3505 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3506 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
3510 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3512 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
3513 return gen_rtx_REG (mode, FR_ARG_FIRST);
3516 if (BYTES_BIG_ENDIAN
3517 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
3525 bytesize = int_size_in_bytes (valtype);
3526 for (i = 0; offset < bytesize; i++)
3528 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3529 gen_rtx_REG (DImode,
3532 offset += UNITS_PER_WORD;
3534 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3537 return gen_rtx_REG (mode, GR_RET_FIRST);
3541 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
3542 We need to emit DTP-relative relocations. */
3545 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
3549 fputs ("\tdata8.ua\t@dtprel(", file);
3550 output_addr_const (file, x);
3554 /* Print a memory address as an operand to reference that memory location. */
3556 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
3557 also call this from ia64_print_operand for memory addresses. */
3560 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
3561 rtx address ATTRIBUTE_UNUSED)
3565 /* Print an operand to an assembler instruction.
3566 C Swap and print a comparison operator.
3567 D Print an FP comparison operator.
3568 E Print 32 - constant, for SImode shifts as extract.
3569 e Print 64 - constant, for DImode rotates.
3570 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
3571 a floating point register emitted normally.
3572 I Invert a predicate register by adding 1.
3573 J Select the proper predicate register for a condition.
3574 j Select the inverse predicate register for a condition.
3575 O Append .acq for volatile load.
3576 P Postincrement of a MEM.
3577 Q Append .rel for volatile store.
3578 S Shift amount for shladd instruction.
3579 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
3580 for Intel assembler.
3581 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
3582 for Intel assembler.
3583 r Print register name, or constant 0 as r0. HP compatibility for
3586 ia64_print_operand (FILE * file, rtx x, int code)
3593 /* Handled below. */
3598 enum rtx_code c = swap_condition (GET_CODE (x));
3599 fputs (GET_RTX_NAME (c), file);
3604 switch (GET_CODE (x))
3616 str = GET_RTX_NAME (GET_CODE (x));
3623 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
3627 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
3631 if (x == CONST0_RTX (GET_MODE (x)))
3632 str = reg_names [FR_REG (0)];
3633 else if (x == CONST1_RTX (GET_MODE (x)))
3634 str = reg_names [FR_REG (1)];
3635 else if (GET_CODE (x) == REG)
3636 str = reg_names [REGNO (x)];
3643 fputs (reg_names [REGNO (x) + 1], file);
3649 unsigned int regno = REGNO (XEXP (x, 0));
3650 if (GET_CODE (x) == EQ)
3654 fputs (reg_names [regno], file);
3659 if (MEM_VOLATILE_P (x))
3660 fputs(".acq", file);
3665 HOST_WIDE_INT value;
3667 switch (GET_CODE (XEXP (x, 0)))
3673 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
3674 if (GET_CODE (x) == CONST_INT)
3676 else if (GET_CODE (x) == REG)
3678 fprintf (file, ", %s", reg_names[REGNO (x)]);
3686 value = GET_MODE_SIZE (GET_MODE (x));
3690 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
3694 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
3699 if (MEM_VOLATILE_P (x))
3700 fputs(".rel", file);
3704 fprintf (file, "%d", exact_log2 (INTVAL (x)));
3708 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
3710 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
3716 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
3718 const char *prefix = "0x";
3719 if (INTVAL (x) & 0x80000000)
3721 fprintf (file, "0xffffffff");
3724 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
3730 /* If this operand is the constant zero, write it as register zero.
3731 Any register, zero, or CONST_INT value is OK here. */
3732 if (GET_CODE (x) == REG)
3733 fputs (reg_names[REGNO (x)], file);
3734 else if (x == CONST0_RTX (GET_MODE (x)))
3736 else if (GET_CODE (x) == CONST_INT)
3737 output_addr_const (file, x);
3739 output_operand_lossage ("invalid %%r value");
3746 /* For conditional branches, returns or calls, substitute
3747 sptk, dptk, dpnt, or spnt for %s. */
3748 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
3751 int pred_val = INTVAL (XEXP (x, 0));
3753 /* Guess top and bottom 10% statically predicted. */
3754 if (pred_val < REG_BR_PROB_BASE / 50)
3756 else if (pred_val < REG_BR_PROB_BASE / 2)
3758 else if (pred_val < REG_BR_PROB_BASE / 100 * 98)
3763 else if (GET_CODE (current_output_insn) == CALL_INSN)
3768 fputs (which, file);
3773 x = current_insn_predicate;
3776 unsigned int regno = REGNO (XEXP (x, 0));
3777 if (GET_CODE (x) == EQ)
3779 fprintf (file, "(%s) ", reg_names [regno]);
3784 output_operand_lossage ("ia64_print_operand: unknown code");
3788 switch (GET_CODE (x))
3790 /* This happens for the spill/restore instructions. */
3795 /* ... fall through ... */
3798 fputs (reg_names [REGNO (x)], file);
3803 rtx addr = XEXP (x, 0);
3804 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
3805 addr = XEXP (addr, 0);
3806 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
3811 output_addr_const (file, x);
3818 /* Compute a (partial) cost for rtx X. Return true if the complete
3819 cost has been computed, and false if subexpressions should be
3820 scanned. In either case, *TOTAL contains the cost result. */
3821 /* ??? This is incomplete. */
3824 ia64_rtx_costs (rtx x, int code, int outer_code, int *total)
3832 *total = CONST_OK_FOR_J (INTVAL (x)) ? 0 : COSTS_N_INSNS (1);
3835 if (CONST_OK_FOR_I (INTVAL (x)))
3837 else if (CONST_OK_FOR_J (INTVAL (x)))
3840 *total = COSTS_N_INSNS (1);
3843 if (CONST_OK_FOR_K (INTVAL (x)) || CONST_OK_FOR_L (INTVAL (x)))
3846 *total = COSTS_N_INSNS (1);
3851 *total = COSTS_N_INSNS (1);
3857 *total = COSTS_N_INSNS (3);
3861 /* For multiplies wider than HImode, we have to go to the FPU,
3862 which normally involves copies. Plus there's the latency
3863 of the multiply itself, and the latency of the instructions to
3864 transfer integer regs to FP regs. */
3865 /* ??? Check for FP mode. */
3866 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
3867 *total = COSTS_N_INSNS (10);
3869 *total = COSTS_N_INSNS (2);
3877 *total = COSTS_N_INSNS (1);
3884 /* We make divide expensive, so that divide-by-constant will be
3885 optimized to a multiply. */
3886 *total = COSTS_N_INSNS (60);
3894 /* Calculate the cost of moving data from a register in class FROM to
3895 one in class TO, using MODE. */
3898 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
3901 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
3902 if (to == ADDL_REGS)
3904 if (from == ADDL_REGS)
3907 /* All costs are symmetric, so reduce cases by putting the
3908 lower number class as the destination. */
3911 enum reg_class tmp = to;
3912 to = from, from = tmp;
3915 /* Moving from FR<->GR in XFmode must be more expensive than 2,
3916 so that we get secondary memory reloads. Between FR_REGS,
3917 we have to make this at least as expensive as MEMORY_MOVE_COST
3918 to avoid spectacularly poor register class preferencing. */
3921 if (to != GR_REGS || from != GR_REGS)
3922 return MEMORY_MOVE_COST (mode, to, 0);
3930 /* Moving between PR registers takes two insns. */
3931 if (from == PR_REGS)
3933 /* Moving between PR and anything but GR is impossible. */
3934 if (from != GR_REGS)
3935 return MEMORY_MOVE_COST (mode, to, 0);
3939 /* Moving between BR and anything but GR is impossible. */
3940 if (from != GR_REGS && from != GR_AND_BR_REGS)
3941 return MEMORY_MOVE_COST (mode, to, 0);
3946 /* Moving between AR and anything but GR is impossible. */
3947 if (from != GR_REGS)
3948 return MEMORY_MOVE_COST (mode, to, 0);
3953 case GR_AND_FR_REGS:
3954 case GR_AND_BR_REGS:
3965 /* This function returns the register class required for a secondary
3966 register when copying between one of the registers in CLASS, and X,
3967 using MODE. A return value of NO_REGS means that no secondary register
3971 ia64_secondary_reload_class (enum reg_class class,
3972 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
3976 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
3977 regno = true_regnum (x);
3984 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
3985 interaction. We end up with two pseudos with overlapping lifetimes
3986 both of which are equiv to the same constant, and both which need
3987 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
3988 changes depending on the path length, which means the qty_first_reg
3989 check in make_regs_eqv can give different answers at different times.
3990 At some point I'll probably need a reload_indi pattern to handle
3993 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
3994 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
3995 non-general registers for good measure. */
3996 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
3999 /* This is needed if a pseudo used as a call_operand gets spilled to a
4001 if (GET_CODE (x) == MEM)
4006 /* Need to go through general registers to get to other class regs. */
4007 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
4010 /* This can happen when a paradoxical subreg is an operand to the
4012 /* ??? This shouldn't be necessary after instruction scheduling is
4013 enabled, because paradoxical subregs are not accepted by
4014 register_operand when INSN_SCHEDULING is defined. Or alternatively,
4015 stop the paradoxical subreg stupidity in the *_operand functions
4017 if (GET_CODE (x) == MEM
4018 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
4019 || GET_MODE (x) == QImode))
4022 /* This can happen because of the ior/and/etc patterns that accept FP
4023 registers as operands. If the third operand is a constant, then it
4024 needs to be reloaded into a FP register. */
4025 if (GET_CODE (x) == CONST_INT)
4028 /* This can happen because of register elimination in a muldi3 insn.
4029 E.g. `26107 * (unsigned long)&u'. */
4030 if (GET_CODE (x) == PLUS)
4035 /* ??? This happens if we cse/gcse a BImode value across a call,
4036 and the function has a nonlocal goto. This is because global
4037 does not allocate call crossing pseudos to hard registers when
4038 current_function_has_nonlocal_goto is true. This is relatively
4039 common for C++ programs that use exceptions. To reproduce,
4040 return NO_REGS and compile libstdc++. */
4041 if (GET_CODE (x) == MEM)
4044 /* This can happen when we take a BImode subreg of a DImode value,
4045 and that DImode value winds up in some non-GR register. */
4046 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
4058 /* Emit text to declare externally defined variables and functions, because
4059 the Intel assembler does not support undefined externals. */
4062 ia64_asm_output_external (FILE *file, tree decl, const char *name)
4064 int save_referenced;
4066 /* GNU as does not need anything here, but the HP linker does need
4067 something for external functions. */
4071 || TREE_CODE (decl) != FUNCTION_DECL
4072 || strstr (name, "__builtin_") == name))
4075 /* ??? The Intel assembler creates a reference that needs to be satisfied by
4076 the linker when we do this, so we need to be careful not to do this for
4077 builtin functions which have no library equivalent. Unfortunately, we
4078 can't tell here whether or not a function will actually be called by
4079 expand_expr, so we pull in library functions even if we may not need
4081 if (! strcmp (name, "__builtin_next_arg")
4082 || ! strcmp (name, "alloca")
4083 || ! strcmp (name, "__builtin_constant_p")
4084 || ! strcmp (name, "__builtin_args_info"))
4088 ia64_hpux_add_extern_decl (decl);
4091 /* assemble_name will set TREE_SYMBOL_REFERENCED, so we must save and
4093 save_referenced = TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl));
4094 if (TREE_CODE (decl) == FUNCTION_DECL)
4095 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
4096 (*targetm.asm_out.globalize_label) (file, name);
4097 TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)) = save_referenced;
4101 /* Parse the -mfixed-range= option string. */
4104 fix_range (const char *const_str)
4107 char *str, *dash, *comma;
4109 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
4110 REG2 are either register names or register numbers. The effect
4111 of this option is to mark the registers in the range from REG1 to
4112 REG2 as ``fixed'' so they won't be used by the compiler. This is
4113 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
4115 i = strlen (const_str);
4116 str = (char *) alloca (i + 1);
4117 memcpy (str, const_str, i + 1);
4121 dash = strchr (str, '-');
4124 warning ("value of -mfixed-range must have form REG1-REG2");
4129 comma = strchr (dash + 1, ',');
4133 first = decode_reg_name (str);
4136 warning ("unknown register name: %s", str);
4140 last = decode_reg_name (dash + 1);
4143 warning ("unknown register name: %s", dash + 1);
4151 warning ("%s-%s is an empty range", str, dash + 1);
4155 for (i = first; i <= last; ++i)
4156 fixed_regs[i] = call_used_regs[i] = 1;
4166 static struct machine_function *
4167 ia64_init_machine_status (void)
4169 return ggc_alloc_cleared (sizeof (struct machine_function));
4172 /* Handle TARGET_OPTIONS switches. */
4175 ia64_override_options (void)
4179 const char *const name; /* processor name or nickname. */
4180 const enum processor_type processor;
4182 const processor_alias_table[] =
4184 {"itanium", PROCESSOR_ITANIUM},
4185 {"itanium1", PROCESSOR_ITANIUM},
4186 {"merced", PROCESSOR_ITANIUM},
4187 {"itanium2", PROCESSOR_ITANIUM2},
4188 {"mckinley", PROCESSOR_ITANIUM2},
4191 int const pta_size = ARRAY_SIZE (processor_alias_table);
4194 if (TARGET_AUTO_PIC)
4195 target_flags |= MASK_CONST_GP;
4197 if (TARGET_INLINE_FLOAT_DIV_LAT && TARGET_INLINE_FLOAT_DIV_THR)
4199 if ((target_flags_explicit & MASK_INLINE_FLOAT_DIV_LAT)
4200 && (target_flags_explicit & MASK_INLINE_FLOAT_DIV_THR))
4202 warning ("cannot optimize floating point division for both latency and throughput");
4203 target_flags &= ~MASK_INLINE_FLOAT_DIV_THR;
4207 if (target_flags_explicit & MASK_INLINE_FLOAT_DIV_THR)
4208 target_flags &= ~MASK_INLINE_FLOAT_DIV_LAT;
4210 target_flags &= ~MASK_INLINE_FLOAT_DIV_THR;
4214 if (TARGET_INLINE_INT_DIV_LAT && TARGET_INLINE_INT_DIV_THR)
4216 if ((target_flags_explicit & MASK_INLINE_INT_DIV_LAT)
4217 && (target_flags_explicit & MASK_INLINE_INT_DIV_THR))
4219 warning ("cannot optimize integer division for both latency and throughput");
4220 target_flags &= ~MASK_INLINE_INT_DIV_THR;
4224 if (target_flags_explicit & MASK_INLINE_INT_DIV_THR)
4225 target_flags &= ~MASK_INLINE_INT_DIV_LAT;
4227 target_flags &= ~MASK_INLINE_INT_DIV_THR;
4231 if (TARGET_INLINE_SQRT_LAT && TARGET_INLINE_SQRT_THR)
4233 if ((target_flags_explicit & MASK_INLINE_SQRT_LAT)
4234 && (target_flags_explicit & MASK_INLINE_SQRT_THR))
4236 warning ("cannot optimize square root for both latency and throughput");
4237 target_flags &= ~MASK_INLINE_SQRT_THR;
4241 if (target_flags_explicit & MASK_INLINE_SQRT_THR)
4242 target_flags &= ~MASK_INLINE_SQRT_LAT;
4244 target_flags &= ~MASK_INLINE_SQRT_THR;
4248 if (TARGET_INLINE_SQRT_LAT)
4250 warning ("not yet implemented: latency-optimized inline square root");
4251 target_flags &= ~MASK_INLINE_SQRT_LAT;
4254 if (ia64_fixed_range_string)
4255 fix_range (ia64_fixed_range_string);
4257 if (ia64_tls_size_string)
4260 unsigned long tmp = strtoul (ia64_tls_size_string, &end, 10);
4261 if (*end || (tmp != 14 && tmp != 22 && tmp != 64))
4262 error ("bad value (%s) for -mtls-size= switch", ia64_tls_size_string);
4264 ia64_tls_size = tmp;
4267 if (!ia64_tune_string)
4268 ia64_tune_string = "itanium2";
4270 for (i = 0; i < pta_size; i++)
4271 if (! strcmp (ia64_tune_string, processor_alias_table[i].name))
4273 ia64_tune = processor_alias_table[i].processor;
4278 error ("bad value (%s) for -tune= switch", ia64_tune_string);
4280 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
4281 flag_schedule_insns_after_reload = 0;
4283 /* Variable tracking should be run after all optimizations which change order
4284 of insns. It also needs a valid CFG. */
4285 ia64_flag_var_tracking = flag_var_tracking;
4286 flag_var_tracking = 0;
4288 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
4290 init_machine_status = ia64_init_machine_status;
4293 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
4294 static enum attr_type ia64_safe_type (rtx);
4296 static enum attr_itanium_class
4297 ia64_safe_itanium_class (rtx insn)
4299 if (recog_memoized (insn) >= 0)
4300 return get_attr_itanium_class (insn);
4302 return ITANIUM_CLASS_UNKNOWN;
4305 static enum attr_type
4306 ia64_safe_type (rtx insn)
4308 if (recog_memoized (insn) >= 0)
4309 return get_attr_type (insn);
4311 return TYPE_UNKNOWN;
4314 /* The following collection of routines emit instruction group stop bits as
4315 necessary to avoid dependencies. */
4317 /* Need to track some additional registers as far as serialization is
4318 concerned so we can properly handle br.call and br.ret. We could
4319 make these registers visible to gcc, but since these registers are
4320 never explicitly used in gcc generated code, it seems wasteful to
4321 do so (plus it would make the call and return patterns needlessly
4323 #define REG_RP (BR_REG (0))
4324 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
4325 /* This is used for volatile asms which may require a stop bit immediately
4326 before and after them. */
4327 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
4328 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
4329 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
4331 /* For each register, we keep track of how it has been written in the
4332 current instruction group.
4334 If a register is written unconditionally (no qualifying predicate),
4335 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
4337 If a register is written if its qualifying predicate P is true, we
4338 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
4339 may be written again by the complement of P (P^1) and when this happens,
4340 WRITE_COUNT gets set to 2.
4342 The result of this is that whenever an insn attempts to write a register
4343 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
4345 If a predicate register is written by a floating-point insn, we set
4346 WRITTEN_BY_FP to true.
4348 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
4349 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
4351 struct reg_write_state
4353 unsigned int write_count : 2;
4354 unsigned int first_pred : 16;
4355 unsigned int written_by_fp : 1;
4356 unsigned int written_by_and : 1;
4357 unsigned int written_by_or : 1;
4360 /* Cumulative info for the current instruction group. */
4361 struct reg_write_state rws_sum[NUM_REGS];
4362 /* Info for the current instruction. This gets copied to rws_sum after a
4363 stop bit is emitted. */
4364 struct reg_write_state rws_insn[NUM_REGS];
4366 /* Indicates whether this is the first instruction after a stop bit,
4367 in which case we don't need another stop bit. Without this, we hit
4368 the abort in ia64_variable_issue when scheduling an alloc. */
4369 static int first_instruction;
4371 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
4372 RTL for one instruction. */
4375 unsigned int is_write : 1; /* Is register being written? */
4376 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
4377 unsigned int is_branch : 1; /* Is register used as part of a branch? */
4378 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
4379 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
4380 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
4383 static void rws_update (struct reg_write_state *, int, struct reg_flags, int);
4384 static int rws_access_regno (int, struct reg_flags, int);
4385 static int rws_access_reg (rtx, struct reg_flags, int);
4386 static void update_set_flags (rtx, struct reg_flags *, int *, rtx *);
4387 static int set_src_needs_barrier (rtx, struct reg_flags, int, rtx);
4388 static int rtx_needs_barrier (rtx, struct reg_flags, int);
4389 static void init_insn_group_barriers (void);
4390 static int group_barrier_needed_p (rtx);
4391 static int safe_group_barrier_needed_p (rtx);
4393 /* Update *RWS for REGNO, which is being written by the current instruction,
4394 with predicate PRED, and associated register flags in FLAGS. */
4397 rws_update (struct reg_write_state *rws, int regno, struct reg_flags flags, int pred)
4400 rws[regno].write_count++;
4402 rws[regno].write_count = 2;
4403 rws[regno].written_by_fp |= flags.is_fp;
4404 /* ??? Not tracking and/or across differing predicates. */
4405 rws[regno].written_by_and = flags.is_and;
4406 rws[regno].written_by_or = flags.is_or;
4407 rws[regno].first_pred = pred;
4410 /* Handle an access to register REGNO of type FLAGS using predicate register
4411 PRED. Update rws_insn and rws_sum arrays. Return 1 if this access creates
4412 a dependency with an earlier instruction in the same group. */
4415 rws_access_regno (int regno, struct reg_flags flags, int pred)
4417 int need_barrier = 0;
4419 if (regno >= NUM_REGS)
4422 if (! PR_REGNO_P (regno))
4423 flags.is_and = flags.is_or = 0;
4429 /* One insn writes same reg multiple times? */
4430 if (rws_insn[regno].write_count > 0)
4433 /* Update info for current instruction. */
4434 rws_update (rws_insn, regno, flags, pred);
4435 write_count = rws_sum[regno].write_count;
4437 switch (write_count)
4440 /* The register has not been written yet. */
4441 rws_update (rws_sum, regno, flags, pred);
4445 /* The register has been written via a predicate. If this is
4446 not a complementary predicate, then we need a barrier. */
4447 /* ??? This assumes that P and P+1 are always complementary
4448 predicates for P even. */
4449 if (flags.is_and && rws_sum[regno].written_by_and)
4451 else if (flags.is_or && rws_sum[regno].written_by_or)
4453 else if ((rws_sum[regno].first_pred ^ 1) != pred)
4455 rws_update (rws_sum, regno, flags, pred);
4459 /* The register has been unconditionally written already. We
4461 if (flags.is_and && rws_sum[regno].written_by_and)
4463 else if (flags.is_or && rws_sum[regno].written_by_or)
4467 rws_sum[regno].written_by_and = flags.is_and;
4468 rws_sum[regno].written_by_or = flags.is_or;
4477 if (flags.is_branch)
4479 /* Branches have several RAW exceptions that allow to avoid
4482 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
4483 /* RAW dependencies on branch regs are permissible as long
4484 as the writer is a non-branch instruction. Since we
4485 never generate code that uses a branch register written
4486 by a branch instruction, handling this case is
4490 if (REGNO_REG_CLASS (regno) == PR_REGS
4491 && ! rws_sum[regno].written_by_fp)
4492 /* The predicates of a branch are available within the
4493 same insn group as long as the predicate was written by
4494 something other than a floating-point instruction. */
4498 if (flags.is_and && rws_sum[regno].written_by_and)
4500 if (flags.is_or && rws_sum[regno].written_by_or)
4503 switch (rws_sum[regno].write_count)
4506 /* The register has not been written yet. */
4510 /* The register has been written via a predicate. If this is
4511 not a complementary predicate, then we need a barrier. */
4512 /* ??? This assumes that P and P+1 are always complementary
4513 predicates for P even. */
4514 if ((rws_sum[regno].first_pred ^ 1) != pred)
4519 /* The register has been unconditionally written already. We
4529 return need_barrier;
4533 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
4535 int regno = REGNO (reg);
4536 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
4539 return rws_access_regno (regno, flags, pred);
4542 int need_barrier = 0;
4544 need_barrier |= rws_access_regno (regno + n, flags, pred);
4545 return need_barrier;
4549 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
4550 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
4553 update_set_flags (rtx x, struct reg_flags *pflags, int *ppred, rtx *pcond)
4555 rtx src = SET_SRC (x);
4559 switch (GET_CODE (src))
4565 if (SET_DEST (x) == pc_rtx)
4566 /* X is a conditional branch. */
4570 int is_complemented = 0;
4572 /* X is a conditional move. */
4573 rtx cond = XEXP (src, 0);
4574 if (GET_CODE (cond) == EQ)
4575 is_complemented = 1;
4576 cond = XEXP (cond, 0);
4577 if (GET_CODE (cond) != REG
4578 && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS)
4581 if (XEXP (src, 1) == SET_DEST (x)
4582 || XEXP (src, 2) == SET_DEST (x))
4584 /* X is a conditional move that conditionally writes the
4587 /* We need another complement in this case. */
4588 if (XEXP (src, 1) == SET_DEST (x))
4589 is_complemented = ! is_complemented;
4591 *ppred = REGNO (cond);
4592 if (is_complemented)
4596 /* ??? If this is a conditional write to the dest, then this
4597 instruction does not actually read one source. This probably
4598 doesn't matter, because that source is also the dest. */
4599 /* ??? Multiple writes to predicate registers are allowed
4600 if they are all AND type compares, or if they are all OR
4601 type compares. We do not generate such instructions
4604 /* ... fall through ... */
4607 if (COMPARISON_P (src)
4608 && GET_MODE_CLASS (GET_MODE (XEXP (src, 0))) == MODE_FLOAT)
4609 /* Set pflags->is_fp to 1 so that we know we're dealing
4610 with a floating point comparison when processing the
4611 destination of the SET. */
4614 /* Discover if this is a parallel comparison. We only handle
4615 and.orcm and or.andcm at present, since we must retain a
4616 strict inverse on the predicate pair. */
4617 else if (GET_CODE (src) == AND)
4619 else if (GET_CODE (src) == IOR)
4626 /* Subroutine of rtx_needs_barrier; this function determines whether the
4627 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
4628 are as in rtx_needs_barrier. COND is an rtx that holds the condition
4632 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred, rtx cond)
4634 int need_barrier = 0;
4636 rtx src = SET_SRC (x);
4638 if (GET_CODE (src) == CALL)
4639 /* We don't need to worry about the result registers that
4640 get written by subroutine call. */
4641 return rtx_needs_barrier (src, flags, pred);
4642 else if (SET_DEST (x) == pc_rtx)
4644 /* X is a conditional branch. */
4645 /* ??? This seems redundant, as the caller sets this bit for
4647 flags.is_branch = 1;
4648 return rtx_needs_barrier (src, flags, pred);
4651 need_barrier = rtx_needs_barrier (src, flags, pred);
4653 /* This instruction unconditionally uses a predicate register. */
4655 need_barrier |= rws_access_reg (cond, flags, 0);
4658 if (GET_CODE (dst) == ZERO_EXTRACT)
4660 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
4661 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
4662 dst = XEXP (dst, 0);
4664 return need_barrier;
4667 /* Handle an access to rtx X of type FLAGS using predicate register
4668 PRED. Return 1 if this access creates a dependency with an earlier
4669 instruction in the same group. */
4672 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
4675 int is_complemented = 0;
4676 int need_barrier = 0;
4677 const char *format_ptr;
4678 struct reg_flags new_flags;
4686 switch (GET_CODE (x))
4689 update_set_flags (x, &new_flags, &pred, &cond);
4690 need_barrier = set_src_needs_barrier (x, new_flags, pred, cond);
4691 if (GET_CODE (SET_SRC (x)) != CALL)
4693 new_flags.is_write = 1;
4694 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
4699 new_flags.is_write = 0;
4700 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
4702 /* Avoid multiple register writes, in case this is a pattern with
4703 multiple CALL rtx. This avoids an abort in rws_access_reg. */
4704 if (! flags.is_sibcall && ! rws_insn[REG_AR_CFM].write_count)
4706 new_flags.is_write = 1;
4707 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
4708 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
4709 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
4714 /* X is a predicated instruction. */
4716 cond = COND_EXEC_TEST (x);
4719 need_barrier = rtx_needs_barrier (cond, flags, 0);
4721 if (GET_CODE (cond) == EQ)
4722 is_complemented = 1;
4723 cond = XEXP (cond, 0);
4724 if (GET_CODE (cond) != REG
4725 && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS)
4727 pred = REGNO (cond);
4728 if (is_complemented)
4731 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
4732 return need_barrier;
4736 /* Clobber & use are for earlier compiler-phases only. */
4741 /* We always emit stop bits for traditional asms. We emit stop bits
4742 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
4743 if (GET_CODE (x) != ASM_OPERANDS
4744 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
4746 /* Avoid writing the register multiple times if we have multiple
4747 asm outputs. This avoids an abort in rws_access_reg. */
4748 if (! rws_insn[REG_VOLATILE].write_count)
4750 new_flags.is_write = 1;
4751 rws_access_regno (REG_VOLATILE, new_flags, pred);
4756 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
4757 We cannot just fall through here since then we would be confused
4758 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
4759 traditional asms unlike their normal usage. */
4761 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
4762 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
4767 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
4769 rtx pat = XVECEXP (x, 0, i);
4770 if (GET_CODE (pat) == SET)
4772 update_set_flags (pat, &new_flags, &pred, &cond);
4773 need_barrier |= set_src_needs_barrier (pat, new_flags, pred, cond);
4775 else if (GET_CODE (pat) == USE
4776 || GET_CODE (pat) == CALL
4777 || GET_CODE (pat) == ASM_OPERANDS)
4778 need_barrier |= rtx_needs_barrier (pat, flags, pred);
4779 else if (GET_CODE (pat) != CLOBBER && GET_CODE (pat) != RETURN)
4782 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
4784 rtx pat = XVECEXP (x, 0, i);
4785 if (GET_CODE (pat) == SET)
4787 if (GET_CODE (SET_SRC (pat)) != CALL)
4789 new_flags.is_write = 1;
4790 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
4794 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
4795 need_barrier |= rtx_needs_barrier (pat, flags, pred);
4803 if (REGNO (x) == AR_UNAT_REGNUM)
4805 for (i = 0; i < 64; ++i)
4806 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
4809 need_barrier = rws_access_reg (x, flags, pred);
4813 /* Find the regs used in memory address computation. */
4814 new_flags.is_write = 0;
4815 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
4818 case CONST_INT: case CONST_DOUBLE:
4819 case SYMBOL_REF: case LABEL_REF: case CONST:
4822 /* Operators with side-effects. */
4823 case POST_INC: case POST_DEC:
4824 if (GET_CODE (XEXP (x, 0)) != REG)
4827 new_flags.is_write = 0;
4828 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
4829 new_flags.is_write = 1;
4830 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
4834 if (GET_CODE (XEXP (x, 0)) != REG)
4837 new_flags.is_write = 0;
4838 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
4839 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
4840 new_flags.is_write = 1;
4841 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
4844 /* Handle common unary and binary ops for efficiency. */
4845 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
4846 case MOD: case UDIV: case UMOD: case AND: case IOR:
4847 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
4848 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
4849 case NE: case EQ: case GE: case GT: case LE:
4850 case LT: case GEU: case GTU: case LEU: case LTU:
4851 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
4852 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
4855 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
4856 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
4857 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
4858 case SQRT: case FFS: case POPCOUNT:
4859 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
4863 switch (XINT (x, 1))
4865 case UNSPEC_LTOFF_DTPMOD:
4866 case UNSPEC_LTOFF_DTPREL:
4868 case UNSPEC_LTOFF_TPREL:
4870 case UNSPEC_PRED_REL_MUTEX:
4871 case UNSPEC_PIC_CALL:
4873 case UNSPEC_FETCHADD_ACQ:
4874 case UNSPEC_BSP_VALUE:
4875 case UNSPEC_FLUSHRS:
4876 case UNSPEC_BUNDLE_SELECTOR:
4879 case UNSPEC_GR_SPILL:
4880 case UNSPEC_GR_RESTORE:
4882 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
4883 HOST_WIDE_INT bit = (offset >> 3) & 63;
4885 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
4886 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
4887 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
4892 case UNSPEC_FR_SPILL:
4893 case UNSPEC_FR_RESTORE:
4894 case UNSPEC_GETF_EXP:
4895 case UNSPEC_SETF_EXP:
4897 case UNSPEC_FR_SQRT_RECIP_APPROX:
4898 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
4901 case UNSPEC_FR_RECIP_APPROX:
4903 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
4904 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
4907 case UNSPEC_CMPXCHG_ACQ:
4908 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
4909 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
4917 case UNSPEC_VOLATILE:
4918 switch (XINT (x, 1))
4921 /* Alloc must always be the first instruction of a group.
4922 We force this by always returning true. */
4923 /* ??? We might get better scheduling if we explicitly check for
4924 input/local/output register dependencies, and modify the
4925 scheduler so that alloc is always reordered to the start of
4926 the current group. We could then eliminate all of the
4927 first_instruction code. */
4928 rws_access_regno (AR_PFS_REGNUM, flags, pred);
4930 new_flags.is_write = 1;
4931 rws_access_regno (REG_AR_CFM, new_flags, pred);
4934 case UNSPECV_SET_BSP:
4938 case UNSPECV_BLOCKAGE:
4939 case UNSPECV_INSN_GROUP_BARRIER:
4941 case UNSPECV_PSAC_ALL:
4942 case UNSPECV_PSAC_NORMAL:
4951 new_flags.is_write = 0;
4952 need_barrier = rws_access_regno (REG_RP, flags, pred);
4953 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
4955 new_flags.is_write = 1;
4956 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
4957 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
4961 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
4962 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
4963 switch (format_ptr[i])
4965 case '0': /* unused field */
4966 case 'i': /* integer */
4967 case 'n': /* note */
4968 case 'w': /* wide integer */
4969 case 's': /* pointer to string */
4970 case 'S': /* optional pointer to string */
4974 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
4979 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
4980 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
4989 return need_barrier;
4992 /* Clear out the state for group_barrier_needed_p at the start of a
4993 sequence of insns. */
4996 init_insn_group_barriers (void)
4998 memset (rws_sum, 0, sizeof (rws_sum));
4999 first_instruction = 1;
5002 /* Given the current state, recorded by previous calls to this function,
5003 determine whether a group barrier (a stop bit) is necessary before INSN.
5004 Return nonzero if so. */
5007 group_barrier_needed_p (rtx insn)
5010 int need_barrier = 0;
5011 struct reg_flags flags;
5013 memset (&flags, 0, sizeof (flags));
5014 switch (GET_CODE (insn))
5020 /* A barrier doesn't imply an instruction group boundary. */
5024 memset (rws_insn, 0, sizeof (rws_insn));
5028 flags.is_branch = 1;
5029 flags.is_sibcall = SIBLING_CALL_P (insn);
5030 memset (rws_insn, 0, sizeof (rws_insn));
5032 /* Don't bundle a call following another call. */
5033 if ((pat = prev_active_insn (insn))
5034 && GET_CODE (pat) == CALL_INSN)
5040 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
5044 flags.is_branch = 1;
5046 /* Don't bundle a jump following a call. */
5047 if ((pat = prev_active_insn (insn))
5048 && GET_CODE (pat) == CALL_INSN)
5056 if (GET_CODE (PATTERN (insn)) == USE
5057 || GET_CODE (PATTERN (insn)) == CLOBBER)
5058 /* Don't care about USE and CLOBBER "insns"---those are used to
5059 indicate to the optimizer that it shouldn't get rid of
5060 certain operations. */
5063 pat = PATTERN (insn);
5065 /* Ug. Hack hacks hacked elsewhere. */
5066 switch (recog_memoized (insn))
5068 /* We play dependency tricks with the epilogue in order
5069 to get proper schedules. Undo this for dv analysis. */
5070 case CODE_FOR_epilogue_deallocate_stack:
5071 case CODE_FOR_prologue_allocate_stack:
5072 pat = XVECEXP (pat, 0, 0);
5075 /* The pattern we use for br.cloop confuses the code above.
5076 The second element of the vector is representative. */
5077 case CODE_FOR_doloop_end_internal:
5078 pat = XVECEXP (pat, 0, 1);
5081 /* Doesn't generate code. */
5082 case CODE_FOR_pred_rel_mutex:
5083 case CODE_FOR_prologue_use:
5090 memset (rws_insn, 0, sizeof (rws_insn));
5091 need_barrier = rtx_needs_barrier (pat, flags, 0);
5093 /* Check to see if the previous instruction was a volatile
5096 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
5103 if (first_instruction && INSN_P (insn)
5104 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
5105 && GET_CODE (PATTERN (insn)) != USE
5106 && GET_CODE (PATTERN (insn)) != CLOBBER)
5109 first_instruction = 0;
5112 return need_barrier;
5115 /* Like group_barrier_needed_p, but do not clobber the current state. */
5118 safe_group_barrier_needed_p (rtx insn)
5120 struct reg_write_state rws_saved[NUM_REGS];
5121 int saved_first_instruction;
5124 memcpy (rws_saved, rws_sum, NUM_REGS * sizeof *rws_saved);
5125 saved_first_instruction = first_instruction;
5127 t = group_barrier_needed_p (insn);
5129 memcpy (rws_sum, rws_saved, NUM_REGS * sizeof *rws_saved);
5130 first_instruction = saved_first_instruction;
5135 /* Scan the current function and insert stop bits as necessary to
5136 eliminate dependencies. This function assumes that a final
5137 instruction scheduling pass has been run which has already
5138 inserted most of the necessary stop bits. This function only
5139 inserts new ones at basic block boundaries, since these are
5140 invisible to the scheduler. */
5143 emit_insn_group_barriers (FILE *dump)
5147 int insns_since_last_label = 0;
5149 init_insn_group_barriers ();
5151 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5153 if (GET_CODE (insn) == CODE_LABEL)
5155 if (insns_since_last_label)
5157 insns_since_last_label = 0;
5159 else if (GET_CODE (insn) == NOTE
5160 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
5162 if (insns_since_last_label)
5164 insns_since_last_label = 0;
5166 else if (GET_CODE (insn) == INSN
5167 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
5168 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
5170 init_insn_group_barriers ();
5173 else if (INSN_P (insn))
5175 insns_since_last_label = 1;
5177 if (group_barrier_needed_p (insn))
5182 fprintf (dump, "Emitting stop before label %d\n",
5183 INSN_UID (last_label));
5184 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
5187 init_insn_group_barriers ();
5195 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
5196 This function has to emit all necessary group barriers. */
5199 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
5203 init_insn_group_barriers ();
5205 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5207 if (GET_CODE (insn) == BARRIER)
5209 rtx last = prev_active_insn (insn);
5213 if (GET_CODE (last) == JUMP_INSN
5214 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
5215 last = prev_active_insn (last);
5216 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
5217 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
5219 init_insn_group_barriers ();
5221 else if (INSN_P (insn))
5223 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
5224 init_insn_group_barriers ();
5225 else if (group_barrier_needed_p (insn))
5227 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5228 init_insn_group_barriers ();
5229 group_barrier_needed_p (insn);
5236 static int errata_find_address_regs (rtx *, void *);
5237 static void errata_emit_nops (rtx);
5238 static void fixup_errata (void);
5240 /* This structure is used to track some details about the previous insns
5241 groups so we can determine if it may be necessary to insert NOPs to
5242 workaround hardware errata. */
5245 HARD_REG_SET p_reg_set;
5246 HARD_REG_SET gr_reg_conditionally_set;
5249 /* Index into the last_group array. */
5250 static int group_idx;
5252 /* Called through for_each_rtx; determines if a hard register that was
5253 conditionally set in the previous group is used as an address register.
5254 It ensures that for_each_rtx returns 1 in that case. */
5256 errata_find_address_regs (rtx *xp, void *data ATTRIBUTE_UNUSED)
5259 if (GET_CODE (x) != MEM)
5262 if (GET_CODE (x) == POST_MODIFY)
5264 if (GET_CODE (x) == REG)
5266 struct group *prev_group = last_group + (group_idx ^ 1);
5267 if (TEST_HARD_REG_BIT (prev_group->gr_reg_conditionally_set,
5275 /* Called for each insn; this function keeps track of the state in
5276 last_group and emits additional NOPs if necessary to work around
5277 an Itanium A/B step erratum. */
5279 errata_emit_nops (rtx insn)
5281 struct group *this_group = last_group + group_idx;
5282 struct group *prev_group = last_group + (group_idx ^ 1);
5283 rtx pat = PATTERN (insn);
5284 rtx cond = GET_CODE (pat) == COND_EXEC ? COND_EXEC_TEST (pat) : 0;
5285 rtx real_pat = cond ? COND_EXEC_CODE (pat) : pat;
5286 enum attr_type type;
5289 if (GET_CODE (real_pat) == USE
5290 || GET_CODE (real_pat) == CLOBBER
5291 || GET_CODE (real_pat) == ASM_INPUT
5292 || GET_CODE (real_pat) == ADDR_VEC
5293 || GET_CODE (real_pat) == ADDR_DIFF_VEC
5294 || asm_noperands (PATTERN (insn)) >= 0)
5297 /* single_set doesn't work for COND_EXEC insns, so we have to duplicate
5300 if (GET_CODE (set) == PARALLEL)
5303 set = XVECEXP (real_pat, 0, 0);
5304 for (i = 1; i < XVECLEN (real_pat, 0); i++)
5305 if (GET_CODE (XVECEXP (real_pat, 0, i)) != USE
5306 && GET_CODE (XVECEXP (real_pat, 0, i)) != CLOBBER)
5313 if (set && GET_CODE (set) != SET)
5316 type = get_attr_type (insn);
5319 && set && REG_P (SET_DEST (set)) && PR_REGNO_P (REGNO (SET_DEST (set))))
5320 SET_HARD_REG_BIT (this_group->p_reg_set, REGNO (SET_DEST (set)));
5322 if ((type == TYPE_M || type == TYPE_A) && cond && set
5323 && REG_P (SET_DEST (set))
5324 && GET_CODE (SET_SRC (set)) != PLUS
5325 && GET_CODE (SET_SRC (set)) != MINUS
5326 && (GET_CODE (SET_SRC (set)) != ASHIFT
5327 || !shladd_operand (XEXP (SET_SRC (set), 1), VOIDmode))
5328 && (GET_CODE (SET_SRC (set)) != MEM
5329 || GET_CODE (XEXP (SET_SRC (set), 0)) != POST_MODIFY)
5330 && GENERAL_REGNO_P (REGNO (SET_DEST (set))))
5332 if (!COMPARISON_P (cond)
5333 || !REG_P (XEXP (cond, 0)))
5336 if (TEST_HARD_REG_BIT (prev_group->p_reg_set, REGNO (XEXP (cond, 0))))
5337 SET_HARD_REG_BIT (this_group->gr_reg_conditionally_set, REGNO (SET_DEST (set)));
5339 if (for_each_rtx (&real_pat, errata_find_address_regs, NULL))
5341 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5342 emit_insn_before (gen_nop (), insn);
5343 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5345 memset (last_group, 0, sizeof last_group);
5349 /* Emit extra nops if they are required to work around hardware errata. */
5356 if (! TARGET_B_STEP)
5360 memset (last_group, 0, sizeof last_group);
5362 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5367 if (ia64_safe_type (insn) == TYPE_S)
5370 memset (last_group + group_idx, 0, sizeof last_group[group_idx]);
5373 errata_emit_nops (insn);
5378 /* Instruction scheduling support. */
5380 #define NR_BUNDLES 10
5382 /* A list of names of all available bundles. */
5384 static const char *bundle_name [NR_BUNDLES] =
5390 #if NR_BUNDLES == 10
5400 /* Nonzero if we should insert stop bits into the schedule. */
5402 int ia64_final_schedule = 0;
5404 /* Codes of the corresponding quieryied units: */
5406 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
5407 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
5409 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
5410 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
5412 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
5414 /* The following variable value is an insn group barrier. */
5416 static rtx dfa_stop_insn;
5418 /* The following variable value is the last issued insn. */
5420 static rtx last_scheduled_insn;
5422 /* The following variable value is size of the DFA state. */
5424 static size_t dfa_state_size;
5426 /* The following variable value is pointer to a DFA state used as
5427 temporary variable. */
5429 static state_t temp_dfa_state = NULL;
5431 /* The following variable value is DFA state after issuing the last
5434 static state_t prev_cycle_state = NULL;
5436 /* The following array element values are TRUE if the corresponding
5437 insn requires to add stop bits before it. */
5439 static char *stops_p;
5441 /* The following variable is used to set up the mentioned above array. */
5443 static int stop_before_p = 0;
5445 /* The following variable value is length of the arrays `clocks' and
5448 static int clocks_length;
5450 /* The following array element values are cycles on which the
5451 corresponding insn will be issued. The array is used only for
5456 /* The following array element values are numbers of cycles should be
5457 added to improve insn scheduling for MM_insns for Itanium1. */
5459 static int *add_cycles;
5461 static rtx ia64_single_set (rtx);
5462 static void ia64_emit_insn_before (rtx, rtx);
5464 /* Map a bundle number to its pseudo-op. */
5467 get_bundle_name (int b)
5469 return bundle_name[b];
5473 /* Return the maximum number of instructions a cpu can issue. */
5476 ia64_issue_rate (void)
5481 /* Helper function - like single_set, but look inside COND_EXEC. */
5484 ia64_single_set (rtx insn)
5486 rtx x = PATTERN (insn), ret;
5487 if (GET_CODE (x) == COND_EXEC)
5488 x = COND_EXEC_CODE (x);
5489 if (GET_CODE (x) == SET)
5492 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
5493 Although they are not classical single set, the second set is there just
5494 to protect it from moving past FP-relative stack accesses. */
5495 switch (recog_memoized (insn))
5497 case CODE_FOR_prologue_allocate_stack:
5498 case CODE_FOR_epilogue_deallocate_stack:
5499 ret = XVECEXP (x, 0, 0);
5503 ret = single_set_2 (insn, x);
5510 /* Adjust the cost of a scheduling dependency. Return the new cost of
5511 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
5514 ia64_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
5516 enum attr_itanium_class dep_class;
5517 enum attr_itanium_class insn_class;
5519 if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT)
5522 insn_class = ia64_safe_itanium_class (insn);
5523 dep_class = ia64_safe_itanium_class (dep_insn);
5524 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
5525 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
5531 /* Like emit_insn_before, but skip cycle_display notes.
5532 ??? When cycle display notes are implemented, update this. */
5535 ia64_emit_insn_before (rtx insn, rtx before)
5537 emit_insn_before (insn, before);
5540 /* The following function marks insns who produce addresses for load
5541 and store insns. Such insns will be placed into M slots because it
5542 decrease latency time for Itanium1 (see function
5543 `ia64_produce_address_p' and the DFA descriptions). */
5546 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
5548 rtx insn, link, next, next_tail;
5550 next_tail = NEXT_INSN (tail);
5551 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5554 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5556 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
5558 for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
5560 next = XEXP (link, 0);
5561 if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_ST
5562 || ia64_safe_itanium_class (next) == ITANIUM_CLASS_STF)
5563 && ia64_st_address_bypass_p (insn, next))
5565 else if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_LD
5566 || ia64_safe_itanium_class (next)
5567 == ITANIUM_CLASS_FLD)
5568 && ia64_ld_address_bypass_p (insn, next))
5571 insn->call = link != 0;
5575 /* We're beginning a new block. Initialize data structures as necessary. */
5578 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
5579 int sched_verbose ATTRIBUTE_UNUSED,
5580 int max_ready ATTRIBUTE_UNUSED)
5582 #ifdef ENABLE_CHECKING
5585 if (reload_completed)
5586 for (insn = NEXT_INSN (current_sched_info->prev_head);
5587 insn != current_sched_info->next_tail;
5588 insn = NEXT_INSN (insn))
5589 if (SCHED_GROUP_P (insn))
5592 last_scheduled_insn = NULL_RTX;
5593 init_insn_group_barriers ();
5596 /* We are about to being issuing insns for this clock cycle.
5597 Override the default sort algorithm to better slot instructions. */
5600 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
5601 int *pn_ready, int clock_var ATTRIBUTE_UNUSED,
5605 int n_ready = *pn_ready;
5606 rtx *e_ready = ready + n_ready;
5610 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
5612 if (reorder_type == 0)
5614 /* First, move all USEs, CLOBBERs and other crud out of the way. */
5616 for (insnp = ready; insnp < e_ready; insnp++)
5617 if (insnp < e_ready)
5620 enum attr_type t = ia64_safe_type (insn);
5621 if (t == TYPE_UNKNOWN)
5623 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
5624 || asm_noperands (PATTERN (insn)) >= 0)
5626 rtx lowest = ready[n_asms];
5627 ready[n_asms] = insn;
5633 rtx highest = ready[n_ready - 1];
5634 ready[n_ready - 1] = insn;
5641 if (n_asms < n_ready)
5643 /* Some normal insns to process. Skip the asms. */
5647 else if (n_ready > 0)
5651 if (ia64_final_schedule)
5654 int nr_need_stop = 0;
5656 for (insnp = ready; insnp < e_ready; insnp++)
5657 if (safe_group_barrier_needed_p (*insnp))
5660 if (reorder_type == 1 && n_ready == nr_need_stop)
5662 if (reorder_type == 0)
5665 /* Move down everything that needs a stop bit, preserving
5667 while (insnp-- > ready + deleted)
5668 while (insnp >= ready + deleted)
5671 if (! safe_group_barrier_needed_p (insn))
5673 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
5684 /* We are about to being issuing insns for this clock cycle. Override
5685 the default sort algorithm to better slot instructions. */
5688 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
5691 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
5692 pn_ready, clock_var, 0);
5695 /* Like ia64_sched_reorder, but called after issuing each insn.
5696 Override the default sort algorithm to better slot instructions. */
5699 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
5700 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
5701 int *pn_ready, int clock_var)
5703 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
5704 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
5705 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
5709 /* We are about to issue INSN. Return the number of insns left on the
5710 ready queue that can be issued this cycle. */
5713 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
5714 int sched_verbose ATTRIBUTE_UNUSED,
5715 rtx insn ATTRIBUTE_UNUSED,
5716 int can_issue_more ATTRIBUTE_UNUSED)
5718 last_scheduled_insn = insn;
5719 memcpy (prev_cycle_state, curr_state, dfa_state_size);
5720 if (reload_completed)
5722 if (group_barrier_needed_p (insn))
5724 if (GET_CODE (insn) == CALL_INSN)
5725 init_insn_group_barriers ();
5726 stops_p [INSN_UID (insn)] = stop_before_p;
5732 /* We are choosing insn from the ready queue. Return nonzero if INSN
5736 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
5738 if (insn == NULL_RTX || !INSN_P (insn))
5740 return (!reload_completed
5741 || !safe_group_barrier_needed_p (insn));
5744 /* The following variable value is pseudo-insn used by the DFA insn
5745 scheduler to change the DFA state when the simulated clock is
5748 static rtx dfa_pre_cycle_insn;
5750 /* We are about to being issuing INSN. Return nonzero if we cannot
5751 issue it on given cycle CLOCK and return zero if we should not sort
5752 the ready queue on the next clock start. */
5755 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
5756 int clock, int *sort_p)
5758 int setup_clocks_p = FALSE;
5760 if (insn == NULL_RTX || !INSN_P (insn))
5762 if ((reload_completed && safe_group_barrier_needed_p (insn))
5763 || (last_scheduled_insn
5764 && (GET_CODE (last_scheduled_insn) == CALL_INSN
5765 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
5766 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
5768 init_insn_group_barriers ();
5769 if (verbose && dump)
5770 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
5771 last_clock == clock ? " + cycle advance" : "");
5773 if (last_clock == clock)
5775 state_transition (curr_state, dfa_stop_insn);
5776 if (TARGET_EARLY_STOP_BITS)
5777 *sort_p = (last_scheduled_insn == NULL_RTX
5778 || GET_CODE (last_scheduled_insn) != CALL_INSN);
5783 else if (reload_completed)
5784 setup_clocks_p = TRUE;
5785 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
5786 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
5787 state_reset (curr_state);
5790 memcpy (curr_state, prev_cycle_state, dfa_state_size);
5791 state_transition (curr_state, dfa_stop_insn);
5792 state_transition (curr_state, dfa_pre_cycle_insn);
5793 state_transition (curr_state, NULL);
5796 else if (reload_completed)
5797 setup_clocks_p = TRUE;
5798 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM
5799 && GET_CODE (PATTERN (insn)) != ASM_INPUT
5800 && asm_noperands (PATTERN (insn)) < 0)
5802 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
5804 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
5809 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
5810 if (REG_NOTE_KIND (link) == 0)
5812 enum attr_itanium_class dep_class;
5813 rtx dep_insn = XEXP (link, 0);
5815 dep_class = ia64_safe_itanium_class (dep_insn);
5816 if ((dep_class == ITANIUM_CLASS_MMMUL
5817 || dep_class == ITANIUM_CLASS_MMSHF)
5818 && last_clock - clocks [INSN_UID (dep_insn)] < 4
5820 || last_clock - clocks [INSN_UID (dep_insn)] < d))
5821 d = last_clock - clocks [INSN_UID (dep_insn)];
5824 add_cycles [INSN_UID (insn)] = 3 - d;
5832 /* The following page contains abstract data `bundle states' which are
5833 used for bundling insns (inserting nops and template generation). */
5835 /* The following describes state of insn bundling. */
5839 /* Unique bundle state number to identify them in the debugging
5842 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
5843 /* number nops before and after the insn */
5844 short before_nops_num, after_nops_num;
5845 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
5847 int cost; /* cost of the state in cycles */
5848 int accumulated_insns_num; /* number of all previous insns including
5849 nops. L is considered as 2 insns */
5850 int branch_deviation; /* deviation of previous branches from 3rd slots */
5851 struct bundle_state *next; /* next state with the same insn_num */
5852 struct bundle_state *originator; /* originator (previous insn state) */
5853 /* All bundle states are in the following chain. */
5854 struct bundle_state *allocated_states_chain;
5855 /* The DFA State after issuing the insn and the nops. */
5859 /* The following is map insn number to the corresponding bundle state. */
5861 static struct bundle_state **index_to_bundle_states;
5863 /* The unique number of next bundle state. */
5865 static int bundle_states_num;
5867 /* All allocated bundle states are in the following chain. */
5869 static struct bundle_state *allocated_bundle_states_chain;
5871 /* All allocated but not used bundle states are in the following
5874 static struct bundle_state *free_bundle_state_chain;
5877 /* The following function returns a free bundle state. */
5879 static struct bundle_state *
5880 get_free_bundle_state (void)
5882 struct bundle_state *result;
5884 if (free_bundle_state_chain != NULL)
5886 result = free_bundle_state_chain;
5887 free_bundle_state_chain = result->next;
5891 result = xmalloc (sizeof (struct bundle_state));
5892 result->dfa_state = xmalloc (dfa_state_size);
5893 result->allocated_states_chain = allocated_bundle_states_chain;
5894 allocated_bundle_states_chain = result;
5896 result->unique_num = bundle_states_num++;
5901 /* The following function frees given bundle state. */
5904 free_bundle_state (struct bundle_state *state)
5906 state->next = free_bundle_state_chain;
5907 free_bundle_state_chain = state;
5910 /* Start work with abstract data `bundle states'. */
5913 initiate_bundle_states (void)
5915 bundle_states_num = 0;
5916 free_bundle_state_chain = NULL;
5917 allocated_bundle_states_chain = NULL;
5920 /* Finish work with abstract data `bundle states'. */
5923 finish_bundle_states (void)
5925 struct bundle_state *curr_state, *next_state;
5927 for (curr_state = allocated_bundle_states_chain;
5929 curr_state = next_state)
5931 next_state = curr_state->allocated_states_chain;
5932 free (curr_state->dfa_state);
5937 /* Hash table of the bundle states. The key is dfa_state and insn_num
5938 of the bundle states. */
5940 static htab_t bundle_state_table;
5942 /* The function returns hash of BUNDLE_STATE. */
5945 bundle_state_hash (const void *bundle_state)
5947 const struct bundle_state *state = (struct bundle_state *) bundle_state;
5950 for (result = i = 0; i < dfa_state_size; i++)
5951 result += (((unsigned char *) state->dfa_state) [i]
5952 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
5953 return result + state->insn_num;
5956 /* The function returns nonzero if the bundle state keys are equal. */
5959 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
5961 const struct bundle_state * state1 = (struct bundle_state *) bundle_state_1;
5962 const struct bundle_state * state2 = (struct bundle_state *) bundle_state_2;
5964 return (state1->insn_num == state2->insn_num
5965 && memcmp (state1->dfa_state, state2->dfa_state,
5966 dfa_state_size) == 0);
5969 /* The function inserts the BUNDLE_STATE into the hash table. The
5970 function returns nonzero if the bundle has been inserted into the
5971 table. The table contains the best bundle state with given key. */
5974 insert_bundle_state (struct bundle_state *bundle_state)
5978 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
5979 if (*entry_ptr == NULL)
5981 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
5982 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
5983 *entry_ptr = (void *) bundle_state;
5986 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
5987 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
5988 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
5989 > bundle_state->accumulated_insns_num
5990 || (((struct bundle_state *)
5991 *entry_ptr)->accumulated_insns_num
5992 == bundle_state->accumulated_insns_num
5993 && ((struct bundle_state *)
5994 *entry_ptr)->branch_deviation
5995 > bundle_state->branch_deviation))))
5998 struct bundle_state temp;
6000 temp = *(struct bundle_state *) *entry_ptr;
6001 *(struct bundle_state *) *entry_ptr = *bundle_state;
6002 ((struct bundle_state *) *entry_ptr)->next = temp.next;
6003 *bundle_state = temp;
6008 /* Start work with the hash table. */
6011 initiate_bundle_state_table (void)
6013 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
6017 /* Finish work with the hash table. */
6020 finish_bundle_state_table (void)
6022 htab_delete (bundle_state_table);
6027 /* The following variable is a insn `nop' used to check bundle states
6028 with different number of inserted nops. */
6030 static rtx ia64_nop;
6032 /* The following function tries to issue NOPS_NUM nops for the current
6033 state without advancing processor cycle. If it failed, the
6034 function returns FALSE and frees the current state. */
6037 try_issue_nops (struct bundle_state *curr_state, int nops_num)
6041 for (i = 0; i < nops_num; i++)
6042 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
6044 free_bundle_state (curr_state);
6050 /* The following function tries to issue INSN for the current
6051 state without advancing processor cycle. If it failed, the
6052 function returns FALSE and frees the current state. */
6055 try_issue_insn (struct bundle_state *curr_state, rtx insn)
6057 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
6059 free_bundle_state (curr_state);
6065 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
6066 starting with ORIGINATOR without advancing processor cycle. If
6067 TRY_BUNDLE_END_P is TRUE, the function also/only (if
6068 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
6069 If it was successful, the function creates new bundle state and
6070 insert into the hash table and into `index_to_bundle_states'. */
6073 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
6074 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
6076 struct bundle_state *curr_state;
6078 curr_state = get_free_bundle_state ();
6079 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
6080 curr_state->insn = insn;
6081 curr_state->insn_num = originator->insn_num + 1;
6082 curr_state->cost = originator->cost;
6083 curr_state->originator = originator;
6084 curr_state->before_nops_num = before_nops_num;
6085 curr_state->after_nops_num = 0;
6086 curr_state->accumulated_insns_num
6087 = originator->accumulated_insns_num + before_nops_num;
6088 curr_state->branch_deviation = originator->branch_deviation;
6089 if (insn == NULL_RTX)
6091 else if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
6093 if (GET_MODE (insn) == TImode)
6095 if (!try_issue_nops (curr_state, before_nops_num))
6097 if (!try_issue_insn (curr_state, insn))
6099 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
6100 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
6101 && curr_state->accumulated_insns_num % 3 != 0)
6103 free_bundle_state (curr_state);
6107 else if (GET_MODE (insn) != TImode)
6109 if (!try_issue_nops (curr_state, before_nops_num))
6111 if (!try_issue_insn (curr_state, insn))
6113 curr_state->accumulated_insns_num++;
6114 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6115 || asm_noperands (PATTERN (insn)) >= 0)
6117 if (ia64_safe_type (insn) == TYPE_L)
6118 curr_state->accumulated_insns_num++;
6122 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
6123 state_transition (curr_state->dfa_state, NULL);
6125 if (!try_issue_nops (curr_state, before_nops_num))
6127 if (!try_issue_insn (curr_state, insn))
6129 curr_state->accumulated_insns_num++;
6130 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6131 || asm_noperands (PATTERN (insn)) >= 0)
6133 /* Finish bundle containing asm insn. */
6134 curr_state->after_nops_num
6135 = 3 - curr_state->accumulated_insns_num % 3;
6136 curr_state->accumulated_insns_num
6137 += 3 - curr_state->accumulated_insns_num % 3;
6139 else if (ia64_safe_type (insn) == TYPE_L)
6140 curr_state->accumulated_insns_num++;
6142 if (ia64_safe_type (insn) == TYPE_B)
6143 curr_state->branch_deviation
6144 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
6145 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
6147 if (!only_bundle_end_p && insert_bundle_state (curr_state))
6150 struct bundle_state *curr_state1;
6151 struct bundle_state *allocated_states_chain;
6153 curr_state1 = get_free_bundle_state ();
6154 dfa_state = curr_state1->dfa_state;
6155 allocated_states_chain = curr_state1->allocated_states_chain;
6156 *curr_state1 = *curr_state;
6157 curr_state1->dfa_state = dfa_state;
6158 curr_state1->allocated_states_chain = allocated_states_chain;
6159 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
6161 curr_state = curr_state1;
6163 if (!try_issue_nops (curr_state,
6164 3 - curr_state->accumulated_insns_num % 3))
6166 curr_state->after_nops_num
6167 = 3 - curr_state->accumulated_insns_num % 3;
6168 curr_state->accumulated_insns_num
6169 += 3 - curr_state->accumulated_insns_num % 3;
6171 if (!insert_bundle_state (curr_state))
6172 free_bundle_state (curr_state);
6176 /* The following function returns position in the two window bundle
6180 get_max_pos (state_t state)
6182 if (cpu_unit_reservation_p (state, pos_6))
6184 else if (cpu_unit_reservation_p (state, pos_5))
6186 else if (cpu_unit_reservation_p (state, pos_4))
6188 else if (cpu_unit_reservation_p (state, pos_3))
6190 else if (cpu_unit_reservation_p (state, pos_2))
6192 else if (cpu_unit_reservation_p (state, pos_1))
6198 /* The function returns code of a possible template for given position
6199 and state. The function should be called only with 2 values of
6200 position equal to 3 or 6. */
6203 get_template (state_t state, int pos)
6208 if (cpu_unit_reservation_p (state, _0mii_))
6210 else if (cpu_unit_reservation_p (state, _0mmi_))
6212 else if (cpu_unit_reservation_p (state, _0mfi_))
6214 else if (cpu_unit_reservation_p (state, _0mmf_))
6216 else if (cpu_unit_reservation_p (state, _0bbb_))
6218 else if (cpu_unit_reservation_p (state, _0mbb_))
6220 else if (cpu_unit_reservation_p (state, _0mib_))
6222 else if (cpu_unit_reservation_p (state, _0mmb_))
6224 else if (cpu_unit_reservation_p (state, _0mfb_))
6226 else if (cpu_unit_reservation_p (state, _0mlx_))
6231 if (cpu_unit_reservation_p (state, _1mii_))
6233 else if (cpu_unit_reservation_p (state, _1mmi_))
6235 else if (cpu_unit_reservation_p (state, _1mfi_))
6237 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
6239 else if (cpu_unit_reservation_p (state, _1bbb_))
6241 else if (cpu_unit_reservation_p (state, _1mbb_))
6243 else if (cpu_unit_reservation_p (state, _1mib_))
6245 else if (cpu_unit_reservation_p (state, _1mmb_))
6247 else if (cpu_unit_reservation_p (state, _1mfb_))
6249 else if (cpu_unit_reservation_p (state, _1mlx_))
6258 /* The following function returns an insn important for insn bundling
6259 followed by INSN and before TAIL. */
6262 get_next_important_insn (rtx insn, rtx tail)
6264 for (; insn && insn != tail; insn = NEXT_INSN (insn))
6266 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6267 && GET_CODE (PATTERN (insn)) != USE
6268 && GET_CODE (PATTERN (insn)) != CLOBBER)
6273 /* The following function does insn bundling. Bundling means
6274 inserting templates and nop insns to fit insn groups into permitted
6275 templates. Instruction scheduling uses NDFA (non-deterministic
6276 finite automata) encoding informations about the templates and the
6277 inserted nops. Nondeterminism of the automata permits follows
6278 all possible insn sequences very fast.
6280 Unfortunately it is not possible to get information about inserting
6281 nop insns and used templates from the automata states. The
6282 automata only says that we can issue an insn possibly inserting
6283 some nops before it and using some template. Therefore insn
6284 bundling in this function is implemented by using DFA
6285 (deterministic finite automata). We follows all possible insn
6286 sequences by inserting 0-2 nops (that is what the NDFA describe for
6287 insn scheduling) before/after each insn being bundled. We know the
6288 start of simulated processor cycle from insn scheduling (insn
6289 starting a new cycle has TImode).
6291 Simple implementation of insn bundling would create enormous
6292 number of possible insn sequences satisfying information about new
6293 cycle ticks taken from the insn scheduling. To make the algorithm
6294 practical we use dynamic programming. Each decision (about
6295 inserting nops and implicitly about previous decisions) is described
6296 by structure bundle_state (see above). If we generate the same
6297 bundle state (key is automaton state after issuing the insns and
6298 nops for it), we reuse already generated one. As consequence we
6299 reject some decisions which cannot improve the solution and
6300 reduce memory for the algorithm.
6302 When we reach the end of EBB (extended basic block), we choose the
6303 best sequence and then, moving back in EBB, insert templates for
6304 the best alternative. The templates are taken from querying
6305 automaton state for each insn in chosen bundle states.
6307 So the algorithm makes two (forward and backward) passes through
6308 EBB. There is an additional forward pass through EBB for Itanium1
6309 processor. This pass inserts more nops to make dependency between
6310 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
6313 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
6315 struct bundle_state *curr_state, *next_state, *best_state;
6316 rtx insn, next_insn;
6318 int i, bundle_end_p, only_bundle_end_p, asm_p;
6319 int pos = 0, max_pos, template0, template1;
6322 enum attr_type type;
6325 /* Count insns in the EBB. */
6326 for (insn = NEXT_INSN (prev_head_insn);
6327 insn && insn != tail;
6328 insn = NEXT_INSN (insn))
6334 dfa_clean_insn_cache ();
6335 initiate_bundle_state_table ();
6336 index_to_bundle_states = xmalloc ((insn_num + 2)
6337 * sizeof (struct bundle_state *));
6338 /* First (forward) pass -- generation of bundle states. */
6339 curr_state = get_free_bundle_state ();
6340 curr_state->insn = NULL;
6341 curr_state->before_nops_num = 0;
6342 curr_state->after_nops_num = 0;
6343 curr_state->insn_num = 0;
6344 curr_state->cost = 0;
6345 curr_state->accumulated_insns_num = 0;
6346 curr_state->branch_deviation = 0;
6347 curr_state->next = NULL;
6348 curr_state->originator = NULL;
6349 state_reset (curr_state->dfa_state);
6350 index_to_bundle_states [0] = curr_state;
6352 /* Shift cycle mark if it is put on insn which could be ignored. */
6353 for (insn = NEXT_INSN (prev_head_insn);
6355 insn = NEXT_INSN (insn))
6357 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6358 || GET_CODE (PATTERN (insn)) == USE
6359 || GET_CODE (PATTERN (insn)) == CLOBBER)
6360 && GET_MODE (insn) == TImode)
6362 PUT_MODE (insn, VOIDmode);
6363 for (next_insn = NEXT_INSN (insn);
6365 next_insn = NEXT_INSN (next_insn))
6366 if (INSN_P (next_insn)
6367 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
6368 && GET_CODE (PATTERN (next_insn)) != USE
6369 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
6371 PUT_MODE (next_insn, TImode);
6375 /* Froward pass: generation of bundle states. */
6376 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
6381 || ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6382 || GET_CODE (PATTERN (insn)) == USE
6383 || GET_CODE (PATTERN (insn)) == CLOBBER)
6385 type = ia64_safe_type (insn);
6386 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
6388 index_to_bundle_states [insn_num] = NULL;
6389 for (curr_state = index_to_bundle_states [insn_num - 1];
6391 curr_state = next_state)
6393 pos = curr_state->accumulated_insns_num % 3;
6394 next_state = curr_state->next;
6395 /* We must fill up the current bundle in order to start a
6396 subsequent asm insn in a new bundle. Asm insn is always
6397 placed in a separate bundle. */
6399 = (next_insn != NULL_RTX
6400 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
6401 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
6402 /* We may fill up the current bundle if it is the cycle end
6403 without a group barrier. */
6405 = (only_bundle_end_p || next_insn == NULL_RTX
6406 || (GET_MODE (next_insn) == TImode
6407 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
6408 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
6410 /* We need to insert 2 nops for cases like M_MII. To
6411 guarantee issuing all insns on the same cycle for
6412 Itanium 1, we need to issue 2 nops after the first M
6413 insn (MnnMII where n is a nop insn). */
6414 || ((type == TYPE_M || type == TYPE_A)
6415 && ia64_tune == PROCESSOR_ITANIUM
6416 && !bundle_end_p && pos == 1))
6417 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
6419 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
6421 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
6424 if (index_to_bundle_states [insn_num] == NULL)
6426 for (curr_state = index_to_bundle_states [insn_num];
6428 curr_state = curr_state->next)
6429 if (verbose >= 2 && dump)
6431 /* This structure is taken from generated code of the
6432 pipeline hazard recognizer (see file insn-attrtab.c).
6433 Please don't forget to change the structure if a new
6434 automaton is added to .md file. */
6437 unsigned short one_automaton_state;
6438 unsigned short oneb_automaton_state;
6439 unsigned short two_automaton_state;
6440 unsigned short twob_automaton_state;
6445 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
6446 curr_state->unique_num,
6447 (curr_state->originator == NULL
6448 ? -1 : curr_state->originator->unique_num),
6450 curr_state->before_nops_num, curr_state->after_nops_num,
6451 curr_state->accumulated_insns_num, curr_state->branch_deviation,
6452 (ia64_tune == PROCESSOR_ITANIUM
6453 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
6454 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
6458 if (index_to_bundle_states [insn_num] == NULL)
6459 /* We should find a solution because the 2nd insn scheduling has
6462 /* Find a state corresponding to the best insn sequence. */
6464 for (curr_state = index_to_bundle_states [insn_num];
6466 curr_state = curr_state->next)
6467 /* We are just looking at the states with fully filled up last
6468 bundle. The first we prefer insn sequences with minimal cost
6469 then with minimal inserted nops and finally with branch insns
6470 placed in the 3rd slots. */
6471 if (curr_state->accumulated_insns_num % 3 == 0
6472 && (best_state == NULL || best_state->cost > curr_state->cost
6473 || (best_state->cost == curr_state->cost
6474 && (curr_state->accumulated_insns_num
6475 < best_state->accumulated_insns_num
6476 || (curr_state->accumulated_insns_num
6477 == best_state->accumulated_insns_num
6478 && curr_state->branch_deviation
6479 < best_state->branch_deviation)))))
6480 best_state = curr_state;
6481 /* Second (backward) pass: adding nops and templates. */
6482 insn_num = best_state->before_nops_num;
6483 template0 = template1 = -1;
6484 for (curr_state = best_state;
6485 curr_state->originator != NULL;
6486 curr_state = curr_state->originator)
6488 insn = curr_state->insn;
6489 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
6490 || asm_noperands (PATTERN (insn)) >= 0);
6492 if (verbose >= 2 && dump)
6496 unsigned short one_automaton_state;
6497 unsigned short oneb_automaton_state;
6498 unsigned short two_automaton_state;
6499 unsigned short twob_automaton_state;
6504 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
6505 curr_state->unique_num,
6506 (curr_state->originator == NULL
6507 ? -1 : curr_state->originator->unique_num),
6509 curr_state->before_nops_num, curr_state->after_nops_num,
6510 curr_state->accumulated_insns_num, curr_state->branch_deviation,
6511 (ia64_tune == PROCESSOR_ITANIUM
6512 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
6513 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
6516 /* Find the position in the current bundle window. The window can
6517 contain at most two bundles. Two bundle window means that
6518 the processor will make two bundle rotation. */
6519 max_pos = get_max_pos (curr_state->dfa_state);
6521 /* The following (negative template number) means that the
6522 processor did one bundle rotation. */
6523 || (max_pos == 3 && template0 < 0))
6525 /* We are at the end of the window -- find template(s) for
6529 template0 = get_template (curr_state->dfa_state, 3);
6532 template1 = get_template (curr_state->dfa_state, 3);
6533 template0 = get_template (curr_state->dfa_state, 6);
6536 if (max_pos > 3 && template1 < 0)
6537 /* It may happen when we have the stop inside a bundle. */
6541 template1 = get_template (curr_state->dfa_state, 3);
6545 /* Emit nops after the current insn. */
6546 for (i = 0; i < curr_state->after_nops_num; i++)
6549 emit_insn_after (nop, insn);
6555 /* We are at the start of a bundle: emit the template
6556 (it should be defined). */
6559 b = gen_bundle_selector (GEN_INT (template0));
6560 ia64_emit_insn_before (b, nop);
6561 /* If we have two bundle window, we make one bundle
6562 rotation. Otherwise template0 will be undefined
6563 (negative value). */
6564 template0 = template1;
6568 /* Move the position backward in the window. Group barrier has
6569 no slot. Asm insn takes all bundle. */
6570 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
6571 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6572 && asm_noperands (PATTERN (insn)) < 0)
6574 /* Long insn takes 2 slots. */
6575 if (ia64_safe_type (insn) == TYPE_L)
6580 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
6581 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6582 && asm_noperands (PATTERN (insn)) < 0)
6584 /* The current insn is at the bundle start: emit the
6588 b = gen_bundle_selector (GEN_INT (template0));
6589 ia64_emit_insn_before (b, insn);
6590 b = PREV_INSN (insn);
6592 /* See comment above in analogous place for emitting nops
6594 template0 = template1;
6597 /* Emit nops after the current insn. */
6598 for (i = 0; i < curr_state->before_nops_num; i++)
6601 ia64_emit_insn_before (nop, insn);
6602 nop = PREV_INSN (insn);
6609 /* See comment above in analogous place for emitting nops
6613 b = gen_bundle_selector (GEN_INT (template0));
6614 ia64_emit_insn_before (b, insn);
6615 b = PREV_INSN (insn);
6617 template0 = template1;
6622 if (ia64_tune == PROCESSOR_ITANIUM)
6623 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
6624 Itanium1 has a strange design, if the distance between an insn
6625 and dependent MM-insn is less 4 then we have a 6 additional
6626 cycles stall. So we make the distance equal to 4 cycles if it
6628 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
6633 || ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6634 || GET_CODE (PATTERN (insn)) == USE
6635 || GET_CODE (PATTERN (insn)) == CLOBBER)
6637 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
6638 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
6639 /* We found a MM-insn which needs additional cycles. */
6645 /* Now we are searching for a template of the bundle in
6646 which the MM-insn is placed and the position of the
6647 insn in the bundle (0, 1, 2). Also we are searching
6648 for that there is a stop before the insn. */
6649 last = prev_active_insn (insn);
6650 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
6652 last = prev_active_insn (last);
6654 for (;; last = prev_active_insn (last))
6655 if (recog_memoized (last) == CODE_FOR_bundle_selector)
6657 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
6659 /* The insn is in MLX bundle. Change the template
6660 onto MFI because we will add nops before the
6661 insn. It simplifies subsequent code a lot. */
6663 = gen_bundle_selector (const2_rtx); /* -> MFI */
6666 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier
6667 && (ia64_safe_itanium_class (last)
6668 != ITANIUM_CLASS_IGNORE))
6670 /* Some check of correctness: the stop is not at the
6671 bundle start, there are no more 3 insns in the bundle,
6672 and the MM-insn is not at the start of bundle with
6674 if ((pred_stop_p && n == 0) || n > 2
6675 || (template0 == 9 && n != 0))
6677 /* Put nops after the insn in the bundle. */
6678 for (j = 3 - n; j > 0; j --)
6679 ia64_emit_insn_before (gen_nop (), insn);
6680 /* It takes into account that we will add more N nops
6681 before the insn lately -- please see code below. */
6682 add_cycles [INSN_UID (insn)]--;
6683 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
6684 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6687 add_cycles [INSN_UID (insn)]--;
6688 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
6690 /* Insert "MII;" template. */
6691 ia64_emit_insn_before (gen_bundle_selector (const0_rtx),
6693 ia64_emit_insn_before (gen_nop (), insn);
6694 ia64_emit_insn_before (gen_nop (), insn);
6697 /* To decrease code size, we use "MI;I;"
6699 ia64_emit_insn_before
6700 (gen_insn_group_barrier (GEN_INT (3)), insn);
6703 ia64_emit_insn_before (gen_nop (), insn);
6704 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6707 /* Put the MM-insn in the same slot of a bundle with the
6708 same template as the original one. */
6709 ia64_emit_insn_before (gen_bundle_selector (GEN_INT (template0)),
6711 /* To put the insn in the same slot, add necessary number
6713 for (j = n; j > 0; j --)
6714 ia64_emit_insn_before (gen_nop (), insn);
6715 /* Put the stop if the original bundle had it. */
6717 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6721 free (index_to_bundle_states);
6722 finish_bundle_state_table ();
6724 dfa_clean_insn_cache ();
6727 /* The following function is called at the end of scheduling BB or
6728 EBB. After reload, it inserts stop bits and does insn bundling. */
6731 ia64_sched_finish (FILE *dump, int sched_verbose)
6734 fprintf (dump, "// Finishing schedule.\n");
6735 if (!reload_completed)
6737 if (reload_completed)
6739 final_emit_insn_group_barriers (dump);
6740 bundling (dump, sched_verbose, current_sched_info->prev_head,
6741 current_sched_info->next_tail);
6742 if (sched_verbose && dump)
6743 fprintf (dump, "// finishing %d-%d\n",
6744 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
6745 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
6751 /* The following function inserts stop bits in scheduled BB or EBB. */
6754 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6757 int need_barrier_p = 0;
6758 rtx prev_insn = NULL_RTX;
6760 init_insn_group_barriers ();
6762 for (insn = NEXT_INSN (current_sched_info->prev_head);
6763 insn != current_sched_info->next_tail;
6764 insn = NEXT_INSN (insn))
6766 if (GET_CODE (insn) == BARRIER)
6768 rtx last = prev_active_insn (insn);
6772 if (GET_CODE (last) == JUMP_INSN
6773 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6774 last = prev_active_insn (last);
6775 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6776 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6778 init_insn_group_barriers ();
6780 prev_insn = NULL_RTX;
6782 else if (INSN_P (insn))
6784 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6786 init_insn_group_barriers ();
6788 prev_insn = NULL_RTX;
6790 else if (need_barrier_p || group_barrier_needed_p (insn))
6792 if (TARGET_EARLY_STOP_BITS)
6797 last != current_sched_info->prev_head;
6798 last = PREV_INSN (last))
6799 if (INSN_P (last) && GET_MODE (last) == TImode
6800 && stops_p [INSN_UID (last)])
6802 if (last == current_sched_info->prev_head)
6804 last = prev_active_insn (last);
6806 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
6807 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
6809 init_insn_group_barriers ();
6810 for (last = NEXT_INSN (last);
6812 last = NEXT_INSN (last))
6814 group_barrier_needed_p (last);
6818 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
6820 init_insn_group_barriers ();
6822 group_barrier_needed_p (insn);
6823 prev_insn = NULL_RTX;
6825 else if (recog_memoized (insn) >= 0)
6827 need_barrier_p = (GET_CODE (insn) == CALL_INSN
6828 || GET_CODE (PATTERN (insn)) == ASM_INPUT
6829 || asm_noperands (PATTERN (insn)) >= 0);
6836 /* If the following function returns TRUE, we will use the the DFA
6840 ia64_first_cycle_multipass_dfa_lookahead (void)
6842 return (reload_completed ? 6 : 4);
6845 /* The following function initiates variable `dfa_pre_cycle_insn'. */
6848 ia64_init_dfa_pre_cycle_insn (void)
6850 if (temp_dfa_state == NULL)
6852 dfa_state_size = state_size ();
6853 temp_dfa_state = xmalloc (dfa_state_size);
6854 prev_cycle_state = xmalloc (dfa_state_size);
6856 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
6857 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
6858 recog_memoized (dfa_pre_cycle_insn);
6859 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
6860 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
6861 recog_memoized (dfa_stop_insn);
6864 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
6865 used by the DFA insn scheduler. */
6868 ia64_dfa_pre_cycle_insn (void)
6870 return dfa_pre_cycle_insn;
6873 /* The following function returns TRUE if PRODUCER (of type ilog or
6874 ld) produces address for CONSUMER (of type st or stf). */
6877 ia64_st_address_bypass_p (rtx producer, rtx consumer)
6881 if (producer == NULL_RTX || consumer == NULL_RTX)
6883 dest = ia64_single_set (producer);
6884 if (dest == NULL_RTX || (reg = SET_DEST (dest)) == NULL_RTX
6885 || (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG))
6887 if (GET_CODE (reg) == SUBREG)
6888 reg = SUBREG_REG (reg);
6889 dest = ia64_single_set (consumer);
6890 if (dest == NULL_RTX || (mem = SET_DEST (dest)) == NULL_RTX
6891 || GET_CODE (mem) != MEM)
6893 return reg_mentioned_p (reg, mem);
6896 /* The following function returns TRUE if PRODUCER (of type ilog or
6897 ld) produces address for CONSUMER (of type ld or fld). */
6900 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
6902 rtx dest, src, reg, mem;
6904 if (producer == NULL_RTX || consumer == NULL_RTX)
6906 dest = ia64_single_set (producer);
6907 if (dest == NULL_RTX || (reg = SET_DEST (dest)) == NULL_RTX
6908 || (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG))
6910 if (GET_CODE (reg) == SUBREG)
6911 reg = SUBREG_REG (reg);
6912 src = ia64_single_set (consumer);
6913 if (src == NULL_RTX || (mem = SET_SRC (src)) == NULL_RTX)
6915 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
6916 mem = XVECEXP (mem, 0, 0);
6917 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
6918 mem = XEXP (mem, 0);
6920 /* Note that LO_SUM is used for GOT loads. */
6921 if (GET_CODE (mem) != LO_SUM && GET_CODE (mem) != MEM)
6924 return reg_mentioned_p (reg, mem);
6927 /* The following function returns TRUE if INSN produces address for a
6928 load/store insn. We will place such insns into M slot because it
6929 decreases its latency time. */
6932 ia64_produce_address_p (rtx insn)
6938 /* Emit pseudo-ops for the assembler to describe predicate relations.
6939 At present this assumes that we only consider predicate pairs to
6940 be mutex, and that the assembler can deduce proper values from
6941 straight-line code. */
6944 emit_predicate_relation_info (void)
6948 FOR_EACH_BB_REVERSE (bb)
6951 rtx head = BB_HEAD (bb);
6953 /* We only need such notes at code labels. */
6954 if (GET_CODE (head) != CODE_LABEL)
6956 if (GET_CODE (NEXT_INSN (head)) == NOTE
6957 && NOTE_LINE_NUMBER (NEXT_INSN (head)) == NOTE_INSN_BASIC_BLOCK)
6958 head = NEXT_INSN (head);
6960 for (r = PR_REG (0); r < PR_REG (64); r += 2)
6961 if (REGNO_REG_SET_P (bb->global_live_at_start, r))
6963 rtx p = gen_rtx_REG (BImode, r);
6964 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
6965 if (head == BB_END (bb))
6971 /* Look for conditional calls that do not return, and protect predicate
6972 relations around them. Otherwise the assembler will assume the call
6973 returns, and complain about uses of call-clobbered predicates after
6975 FOR_EACH_BB_REVERSE (bb)
6977 rtx insn = BB_HEAD (bb);
6981 if (GET_CODE (insn) == CALL_INSN
6982 && GET_CODE (PATTERN (insn)) == COND_EXEC
6983 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
6985 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
6986 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
6987 if (BB_HEAD (bb) == insn)
6989 if (BB_END (bb) == insn)
6993 if (insn == BB_END (bb))
6995 insn = NEXT_INSN (insn);
7000 /* Perform machine dependent operations on the rtl chain INSNS. */
7005 /* We are freeing block_for_insn in the toplev to keep compatibility
7006 with old MDEP_REORGS that are not CFG based. Recompute it now. */
7007 compute_bb_for_insn ();
7009 /* If optimizing, we'll have split before scheduling. */
7011 split_all_insns (0);
7013 /* ??? update_life_info_in_dirty_blocks fails to terminate during
7014 non-optimizing bootstrap. */
7015 update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES);
7017 if (ia64_flag_schedule_insns2)
7019 timevar_push (TV_SCHED2);
7020 ia64_final_schedule = 1;
7022 initiate_bundle_states ();
7023 ia64_nop = make_insn_raw (gen_nop ());
7024 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
7025 recog_memoized (ia64_nop);
7026 clocks_length = get_max_uid () + 1;
7027 stops_p = xcalloc (1, clocks_length);
7028 if (ia64_tune == PROCESSOR_ITANIUM)
7030 clocks = xcalloc (clocks_length, sizeof (int));
7031 add_cycles = xcalloc (clocks_length, sizeof (int));
7033 if (ia64_tune == PROCESSOR_ITANIUM2)
7035 pos_1 = get_cpu_unit_code ("2_1");
7036 pos_2 = get_cpu_unit_code ("2_2");
7037 pos_3 = get_cpu_unit_code ("2_3");
7038 pos_4 = get_cpu_unit_code ("2_4");
7039 pos_5 = get_cpu_unit_code ("2_5");
7040 pos_6 = get_cpu_unit_code ("2_6");
7041 _0mii_ = get_cpu_unit_code ("2b_0mii.");
7042 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
7043 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
7044 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
7045 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
7046 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
7047 _0mib_ = get_cpu_unit_code ("2b_0mib.");
7048 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
7049 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
7050 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
7051 _1mii_ = get_cpu_unit_code ("2b_1mii.");
7052 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
7053 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
7054 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
7055 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
7056 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
7057 _1mib_ = get_cpu_unit_code ("2b_1mib.");
7058 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
7059 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
7060 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
7064 pos_1 = get_cpu_unit_code ("1_1");
7065 pos_2 = get_cpu_unit_code ("1_2");
7066 pos_3 = get_cpu_unit_code ("1_3");
7067 pos_4 = get_cpu_unit_code ("1_4");
7068 pos_5 = get_cpu_unit_code ("1_5");
7069 pos_6 = get_cpu_unit_code ("1_6");
7070 _0mii_ = get_cpu_unit_code ("1b_0mii.");
7071 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
7072 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
7073 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
7074 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
7075 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
7076 _0mib_ = get_cpu_unit_code ("1b_0mib.");
7077 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
7078 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
7079 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
7080 _1mii_ = get_cpu_unit_code ("1b_1mii.");
7081 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
7082 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
7083 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
7084 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
7085 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
7086 _1mib_ = get_cpu_unit_code ("1b_1mib.");
7087 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
7088 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
7089 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
7091 schedule_ebbs (dump_file);
7092 finish_bundle_states ();
7093 if (ia64_tune == PROCESSOR_ITANIUM)
7099 emit_insn_group_barriers (dump_file);
7101 ia64_final_schedule = 0;
7102 timevar_pop (TV_SCHED2);
7105 emit_all_insn_group_barriers (dump_file);
7107 /* A call must not be the last instruction in a function, so that the
7108 return address is still within the function, so that unwinding works
7109 properly. Note that IA-64 differs from dwarf2 on this point. */
7110 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7115 insn = get_last_insn ();
7116 if (! INSN_P (insn))
7117 insn = prev_active_insn (insn);
7118 /* Skip over insns that expand to nothing. */
7119 while (GET_CODE (insn) == INSN && get_attr_empty (insn) == EMPTY_YES)
7121 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
7122 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
7124 insn = prev_active_insn (insn);
7126 if (GET_CODE (insn) == CALL_INSN)
7129 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7130 emit_insn (gen_break_f ());
7131 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7136 emit_predicate_relation_info ();
7138 if (ia64_flag_var_tracking)
7140 timevar_push (TV_VAR_TRACKING);
7141 variable_tracking_main ();
7142 timevar_pop (TV_VAR_TRACKING);
7146 /* Return true if REGNO is used by the epilogue. */
7149 ia64_epilogue_uses (int regno)
7154 /* With a call to a function in another module, we will write a new
7155 value to "gp". After returning from such a call, we need to make
7156 sure the function restores the original gp-value, even if the
7157 function itself does not use the gp anymore. */
7158 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
7160 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
7161 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
7162 /* For functions defined with the syscall_linkage attribute, all
7163 input registers are marked as live at all function exits. This
7164 prevents the register allocator from using the input registers,
7165 which in turn makes it possible to restart a system call after
7166 an interrupt without having to save/restore the input registers.
7167 This also prevents kernel data from leaking to application code. */
7168 return lookup_attribute ("syscall_linkage",
7169 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
7172 /* Conditional return patterns can't represent the use of `b0' as
7173 the return address, so we force the value live this way. */
7177 /* Likewise for ar.pfs, which is used by br.ret. */
7185 /* Return true if REGNO is used by the frame unwinder. */
7188 ia64_eh_uses (int regno)
7190 if (! reload_completed)
7193 if (current_frame_info.reg_save_b0
7194 && regno == current_frame_info.reg_save_b0)
7196 if (current_frame_info.reg_save_pr
7197 && regno == current_frame_info.reg_save_pr)
7199 if (current_frame_info.reg_save_ar_pfs
7200 && regno == current_frame_info.reg_save_ar_pfs)
7202 if (current_frame_info.reg_save_ar_unat
7203 && regno == current_frame_info.reg_save_ar_unat)
7205 if (current_frame_info.reg_save_ar_lc
7206 && regno == current_frame_info.reg_save_ar_lc)
7212 /* Return true if this goes in small data/bss. */
7214 /* ??? We could also support own long data here. Generating movl/add/ld8
7215 instead of addl,ld8/ld8. This makes the code bigger, but should make the
7216 code faster because there is one less load. This also includes incomplete
7217 types which can't go in sdata/sbss. */
7220 ia64_in_small_data_p (tree exp)
7222 if (TARGET_NO_SDATA)
7225 /* We want to merge strings, so we never consider them small data. */
7226 if (TREE_CODE (exp) == STRING_CST)
7229 /* Functions are never small data. */
7230 if (TREE_CODE (exp) == FUNCTION_DECL)
7233 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
7235 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
7236 if (strcmp (section, ".sdata") == 0
7237 || strcmp (section, ".sbss") == 0)
7242 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
7244 /* If this is an incomplete type with size 0, then we can't put it
7245 in sdata because it might be too big when completed. */
7246 if (size > 0 && size <= ia64_section_threshold)
7253 /* Output assembly directives for prologue regions. */
7255 /* The current basic block number. */
7257 static bool last_block;
7259 /* True if we need a copy_state command at the start of the next block. */
7261 static bool need_copy_state;
7263 /* The function emits unwind directives for the start of an epilogue. */
7266 process_epilogue (void)
7268 /* If this isn't the last block of the function, then we need to label the
7269 current state, and copy it back in at the start of the next block. */
7273 fprintf (asm_out_file, "\t.label_state 1\n");
7274 need_copy_state = true;
7277 fprintf (asm_out_file, "\t.restore sp\n");
7280 /* This function processes a SET pattern looking for specific patterns
7281 which result in emitting an assembly directive required for unwinding. */
7284 process_set (FILE *asm_out_file, rtx pat)
7286 rtx src = SET_SRC (pat);
7287 rtx dest = SET_DEST (pat);
7288 int src_regno, dest_regno;
7290 /* Look for the ALLOC insn. */
7291 if (GET_CODE (src) == UNSPEC_VOLATILE
7292 && XINT (src, 1) == UNSPECV_ALLOC
7293 && GET_CODE (dest) == REG)
7295 dest_regno = REGNO (dest);
7297 /* If this is the final destination for ar.pfs, then this must
7298 be the alloc in the prologue. */
7299 if (dest_regno == current_frame_info.reg_save_ar_pfs)
7300 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
7301 ia64_dbx_register_number (dest_regno));
7304 /* This must be an alloc before a sibcall. We must drop the
7305 old frame info. The easiest way to drop the old frame
7306 info is to ensure we had a ".restore sp" directive
7307 followed by a new prologue. If the procedure doesn't
7308 have a memory-stack frame, we'll issue a dummy ".restore
7310 if (current_frame_info.total_size == 0)
7311 /* if haven't done process_epilogue() yet, do it now */
7312 process_epilogue ();
7313 fprintf (asm_out_file, "\t.prologue\n");
7318 /* Look for SP = .... */
7319 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
7321 if (GET_CODE (src) == PLUS)
7323 rtx op0 = XEXP (src, 0);
7324 rtx op1 = XEXP (src, 1);
7325 if (op0 == dest && GET_CODE (op1) == CONST_INT)
7327 if (INTVAL (op1) < 0)
7328 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
7331 process_epilogue ();
7336 else if (GET_CODE (src) == REG
7337 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
7338 process_epilogue ();
7345 /* Register move we need to look at. */
7346 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
7348 src_regno = REGNO (src);
7349 dest_regno = REGNO (dest);
7354 /* Saving return address pointer. */
7355 if (dest_regno != current_frame_info.reg_save_b0)
7357 fprintf (asm_out_file, "\t.save rp, r%d\n",
7358 ia64_dbx_register_number (dest_regno));
7362 if (dest_regno != current_frame_info.reg_save_pr)
7364 fprintf (asm_out_file, "\t.save pr, r%d\n",
7365 ia64_dbx_register_number (dest_regno));
7368 case AR_UNAT_REGNUM:
7369 if (dest_regno != current_frame_info.reg_save_ar_unat)
7371 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
7372 ia64_dbx_register_number (dest_regno));
7376 if (dest_regno != current_frame_info.reg_save_ar_lc)
7378 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
7379 ia64_dbx_register_number (dest_regno));
7382 case STACK_POINTER_REGNUM:
7383 if (dest_regno != HARD_FRAME_POINTER_REGNUM
7384 || ! frame_pointer_needed)
7386 fprintf (asm_out_file, "\t.vframe r%d\n",
7387 ia64_dbx_register_number (dest_regno));
7391 /* Everything else should indicate being stored to memory. */
7396 /* Memory store we need to look at. */
7397 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
7403 if (GET_CODE (XEXP (dest, 0)) == REG)
7405 base = XEXP (dest, 0);
7408 else if (GET_CODE (XEXP (dest, 0)) == PLUS
7409 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT)
7411 base = XEXP (XEXP (dest, 0), 0);
7412 off = INTVAL (XEXP (XEXP (dest, 0), 1));
7417 if (base == hard_frame_pointer_rtx)
7419 saveop = ".savepsp";
7422 else if (base == stack_pointer_rtx)
7427 src_regno = REGNO (src);
7431 if (current_frame_info.reg_save_b0 != 0)
7433 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
7437 if (current_frame_info.reg_save_pr != 0)
7439 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
7443 if (current_frame_info.reg_save_ar_lc != 0)
7445 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
7449 if (current_frame_info.reg_save_ar_pfs != 0)
7451 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
7454 case AR_UNAT_REGNUM:
7455 if (current_frame_info.reg_save_ar_unat != 0)
7457 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
7464 fprintf (asm_out_file, "\t.save.g 0x%x\n",
7465 1 << (src_regno - GR_REG (4)));
7473 fprintf (asm_out_file, "\t.save.b 0x%x\n",
7474 1 << (src_regno - BR_REG (1)));
7481 fprintf (asm_out_file, "\t.save.f 0x%x\n",
7482 1 << (src_regno - FR_REG (2)));
7485 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
7486 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
7487 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
7488 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
7489 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
7490 1 << (src_regno - FR_REG (12)));
7502 /* This function looks at a single insn and emits any directives
7503 required to unwind this insn. */
7505 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
7507 if (flag_unwind_tables
7508 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7512 if (GET_CODE (insn) == NOTE
7513 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
7515 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
7517 /* Restore unwind state from immediately before the epilogue. */
7518 if (need_copy_state)
7520 fprintf (asm_out_file, "\t.body\n");
7521 fprintf (asm_out_file, "\t.copy_state 1\n");
7522 need_copy_state = false;
7526 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
7529 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
7531 pat = XEXP (pat, 0);
7533 pat = PATTERN (insn);
7535 switch (GET_CODE (pat))
7538 process_set (asm_out_file, pat);
7544 int limit = XVECLEN (pat, 0);
7545 for (par_index = 0; par_index < limit; par_index++)
7547 rtx x = XVECEXP (pat, 0, par_index);
7548 if (GET_CODE (x) == SET)
7549 process_set (asm_out_file, x);
7562 ia64_init_builtins (void)
7564 tree psi_type_node = build_pointer_type (integer_type_node);
7565 tree pdi_type_node = build_pointer_type (long_integer_type_node);
7567 /* __sync_val_compare_and_swap_si, __sync_bool_compare_and_swap_si */
7568 tree si_ftype_psi_si_si
7569 = build_function_type_list (integer_type_node,
7570 psi_type_node, integer_type_node,
7571 integer_type_node, NULL_TREE);
7573 /* __sync_val_compare_and_swap_di */
7574 tree di_ftype_pdi_di_di
7575 = build_function_type_list (long_integer_type_node,
7576 pdi_type_node, long_integer_type_node,
7577 long_integer_type_node, NULL_TREE);
7578 /* __sync_bool_compare_and_swap_di */
7579 tree si_ftype_pdi_di_di
7580 = build_function_type_list (integer_type_node,
7581 pdi_type_node, long_integer_type_node,
7582 long_integer_type_node, NULL_TREE);
7583 /* __sync_synchronize */
7584 tree void_ftype_void
7585 = build_function_type (void_type_node, void_list_node);
7587 /* __sync_lock_test_and_set_si */
7588 tree si_ftype_psi_si
7589 = build_function_type_list (integer_type_node,
7590 psi_type_node, integer_type_node, NULL_TREE);
7592 /* __sync_lock_test_and_set_di */
7593 tree di_ftype_pdi_di
7594 = build_function_type_list (long_integer_type_node,
7595 pdi_type_node, long_integer_type_node,
7598 /* __sync_lock_release_si */
7600 = build_function_type_list (void_type_node, psi_type_node, NULL_TREE);
7602 /* __sync_lock_release_di */
7604 = build_function_type_list (void_type_node, pdi_type_node, NULL_TREE);
7609 /* The __fpreg type. */
7610 fpreg_type = make_node (REAL_TYPE);
7611 /* ??? The back end should know to load/save __fpreg variables using
7612 the ldf.fill and stf.spill instructions. */
7613 TYPE_PRECISION (fpreg_type) = 80;
7614 layout_type (fpreg_type);
7615 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
7617 /* The __float80 type. */
7618 float80_type = make_node (REAL_TYPE);
7619 TYPE_PRECISION (float80_type) = 80;
7620 layout_type (float80_type);
7621 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
7623 /* The __float128 type. */
7626 tree float128_type = make_node (REAL_TYPE);
7627 TYPE_PRECISION (float128_type) = 128;
7628 layout_type (float128_type);
7629 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
7632 /* Under HPUX, this is a synonym for "long double". */
7633 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
7636 #define def_builtin(name, type, code) \
7637 lang_hooks.builtin_function ((name), (type), (code), BUILT_IN_MD, \
7640 def_builtin ("__sync_val_compare_and_swap_si", si_ftype_psi_si_si,
7641 IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI);
7642 def_builtin ("__sync_val_compare_and_swap_di", di_ftype_pdi_di_di,
7643 IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI);
7644 def_builtin ("__sync_bool_compare_and_swap_si", si_ftype_psi_si_si,
7645 IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI);
7646 def_builtin ("__sync_bool_compare_and_swap_di", si_ftype_pdi_di_di,
7647 IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI);
7649 def_builtin ("__sync_synchronize", void_ftype_void,
7650 IA64_BUILTIN_SYNCHRONIZE);
7652 def_builtin ("__sync_lock_test_and_set_si", si_ftype_psi_si,
7653 IA64_BUILTIN_LOCK_TEST_AND_SET_SI);
7654 def_builtin ("__sync_lock_test_and_set_di", di_ftype_pdi_di,
7655 IA64_BUILTIN_LOCK_TEST_AND_SET_DI);
7656 def_builtin ("__sync_lock_release_si", void_ftype_psi,
7657 IA64_BUILTIN_LOCK_RELEASE_SI);
7658 def_builtin ("__sync_lock_release_di", void_ftype_pdi,
7659 IA64_BUILTIN_LOCK_RELEASE_DI);
7661 def_builtin ("__builtin_ia64_bsp",
7662 build_function_type (ptr_type_node, void_list_node),
7665 def_builtin ("__builtin_ia64_flushrs",
7666 build_function_type (void_type_node, void_list_node),
7667 IA64_BUILTIN_FLUSHRS);
7669 def_builtin ("__sync_fetch_and_add_si", si_ftype_psi_si,
7670 IA64_BUILTIN_FETCH_AND_ADD_SI);
7671 def_builtin ("__sync_fetch_and_sub_si", si_ftype_psi_si,
7672 IA64_BUILTIN_FETCH_AND_SUB_SI);
7673 def_builtin ("__sync_fetch_and_or_si", si_ftype_psi_si,
7674 IA64_BUILTIN_FETCH_AND_OR_SI);
7675 def_builtin ("__sync_fetch_and_and_si", si_ftype_psi_si,
7676 IA64_BUILTIN_FETCH_AND_AND_SI);
7677 def_builtin ("__sync_fetch_and_xor_si", si_ftype_psi_si,
7678 IA64_BUILTIN_FETCH_AND_XOR_SI);
7679 def_builtin ("__sync_fetch_and_nand_si", si_ftype_psi_si,
7680 IA64_BUILTIN_FETCH_AND_NAND_SI);
7682 def_builtin ("__sync_add_and_fetch_si", si_ftype_psi_si,
7683 IA64_BUILTIN_ADD_AND_FETCH_SI);
7684 def_builtin ("__sync_sub_and_fetch_si", si_ftype_psi_si,
7685 IA64_BUILTIN_SUB_AND_FETCH_SI);
7686 def_builtin ("__sync_or_and_fetch_si", si_ftype_psi_si,
7687 IA64_BUILTIN_OR_AND_FETCH_SI);
7688 def_builtin ("__sync_and_and_fetch_si", si_ftype_psi_si,
7689 IA64_BUILTIN_AND_AND_FETCH_SI);
7690 def_builtin ("__sync_xor_and_fetch_si", si_ftype_psi_si,
7691 IA64_BUILTIN_XOR_AND_FETCH_SI);
7692 def_builtin ("__sync_nand_and_fetch_si", si_ftype_psi_si,
7693 IA64_BUILTIN_NAND_AND_FETCH_SI);
7695 def_builtin ("__sync_fetch_and_add_di", di_ftype_pdi_di,
7696 IA64_BUILTIN_FETCH_AND_ADD_DI);
7697 def_builtin ("__sync_fetch_and_sub_di", di_ftype_pdi_di,
7698 IA64_BUILTIN_FETCH_AND_SUB_DI);
7699 def_builtin ("__sync_fetch_and_or_di", di_ftype_pdi_di,
7700 IA64_BUILTIN_FETCH_AND_OR_DI);
7701 def_builtin ("__sync_fetch_and_and_di", di_ftype_pdi_di,
7702 IA64_BUILTIN_FETCH_AND_AND_DI);
7703 def_builtin ("__sync_fetch_and_xor_di", di_ftype_pdi_di,
7704 IA64_BUILTIN_FETCH_AND_XOR_DI);
7705 def_builtin ("__sync_fetch_and_nand_di", di_ftype_pdi_di,
7706 IA64_BUILTIN_FETCH_AND_NAND_DI);
7708 def_builtin ("__sync_add_and_fetch_di", di_ftype_pdi_di,
7709 IA64_BUILTIN_ADD_AND_FETCH_DI);
7710 def_builtin ("__sync_sub_and_fetch_di", di_ftype_pdi_di,
7711 IA64_BUILTIN_SUB_AND_FETCH_DI);
7712 def_builtin ("__sync_or_and_fetch_di", di_ftype_pdi_di,
7713 IA64_BUILTIN_OR_AND_FETCH_DI);
7714 def_builtin ("__sync_and_and_fetch_di", di_ftype_pdi_di,
7715 IA64_BUILTIN_AND_AND_FETCH_DI);
7716 def_builtin ("__sync_xor_and_fetch_di", di_ftype_pdi_di,
7717 IA64_BUILTIN_XOR_AND_FETCH_DI);
7718 def_builtin ("__sync_nand_and_fetch_di", di_ftype_pdi_di,
7719 IA64_BUILTIN_NAND_AND_FETCH_DI);
7724 /* Expand fetch_and_op intrinsics. The basic code sequence is:
7732 cmpxchgsz.acq tmp = [ptr], tmp
7733 } while (tmp != ret)
7737 ia64_expand_fetch_and_op (optab binoptab, enum machine_mode mode,
7738 tree arglist, rtx target)
7740 rtx ret, label, tmp, ccv, insn, mem, value;
7743 arg0 = TREE_VALUE (arglist);
7744 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
7745 mem = expand_expr (arg0, NULL_RTX, Pmode, 0);
7746 #ifdef POINTERS_EXTEND_UNSIGNED
7747 if (GET_MODE(mem) != Pmode)
7748 mem = convert_memory_address (Pmode, mem);
7750 value = expand_expr (arg1, NULL_RTX, mode, 0);
7752 mem = gen_rtx_MEM (mode, force_reg (Pmode, mem));
7753 MEM_VOLATILE_P (mem) = 1;
7755 if (target && register_operand (target, mode))
7758 ret = gen_reg_rtx (mode);
7760 emit_insn (gen_mf ());
7762 /* Special case for fetchadd instructions. */
7763 if (binoptab == add_optab && fetchadd_operand (value, VOIDmode))
7766 insn = gen_fetchadd_acq_si (ret, mem, value);
7768 insn = gen_fetchadd_acq_di (ret, mem, value);
7773 tmp = gen_reg_rtx (mode);
7774 /* ar.ccv must always be loaded with a zero-extended DImode value. */
7775 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
7776 emit_move_insn (tmp, mem);
7778 label = gen_label_rtx ();
7780 emit_move_insn (ret, tmp);
7781 convert_move (ccv, tmp, /*unsignedp=*/1);
7783 /* Perform the specific operation. Special case NAND by noticing
7784 one_cmpl_optab instead. */
7785 if (binoptab == one_cmpl_optab)
7787 tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN);
7788 binoptab = and_optab;
7790 tmp = expand_binop (mode, binoptab, tmp, value, tmp, 1, OPTAB_WIDEN);
7793 insn = gen_cmpxchg_acq_si (tmp, mem, tmp, ccv);
7795 insn = gen_cmpxchg_acq_di (tmp, mem, tmp, ccv);
7798 emit_cmp_and_jump_insns (tmp, ret, NE, 0, mode, 1, label);
7803 /* Expand op_and_fetch intrinsics. The basic code sequence is:
7810 ret = tmp <op> value;
7811 cmpxchgsz.acq tmp = [ptr], ret
7812 } while (tmp != old)
7816 ia64_expand_op_and_fetch (optab binoptab, enum machine_mode mode,
7817 tree arglist, rtx target)
7819 rtx old, label, tmp, ret, ccv, insn, mem, value;
7822 arg0 = TREE_VALUE (arglist);
7823 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
7824 mem = expand_expr (arg0, NULL_RTX, Pmode, 0);
7825 #ifdef POINTERS_EXTEND_UNSIGNED
7826 if (GET_MODE(mem) != Pmode)
7827 mem = convert_memory_address (Pmode, mem);
7830 value = expand_expr (arg1, NULL_RTX, mode, 0);
7832 mem = gen_rtx_MEM (mode, force_reg (Pmode, mem));
7833 MEM_VOLATILE_P (mem) = 1;
7835 if (target && ! register_operand (target, mode))
7838 emit_insn (gen_mf ());
7839 tmp = gen_reg_rtx (mode);
7840 old = gen_reg_rtx (mode);
7841 /* ar.ccv must always be loaded with a zero-extended DImode value. */
7842 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
7844 emit_move_insn (tmp, mem);
7846 label = gen_label_rtx ();
7848 emit_move_insn (old, tmp);
7849 convert_move (ccv, tmp, /*unsignedp=*/1);
7851 /* Perform the specific operation. Special case NAND by noticing
7852 one_cmpl_optab instead. */
7853 if (binoptab == one_cmpl_optab)
7855 tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN);
7856 binoptab = and_optab;
7858 ret = expand_binop (mode, binoptab, tmp, value, target, 1, OPTAB_WIDEN);
7861 insn = gen_cmpxchg_acq_si (tmp, mem, ret, ccv);
7863 insn = gen_cmpxchg_acq_di (tmp, mem, ret, ccv);
7866 emit_cmp_and_jump_insns (tmp, old, NE, 0, mode, 1, label);
7871 /* Expand val_ and bool_compare_and_swap. For val_ we want:
7875 cmpxchgsz.acq ret = [ptr], newval, ar.ccv
7878 For bool_ it's the same except return ret == oldval.
7882 ia64_expand_compare_and_swap (enum machine_mode rmode, enum machine_mode mode,
7883 int boolp, tree arglist, rtx target)
7885 tree arg0, arg1, arg2;
7886 rtx mem, old, new, ccv, tmp, insn;
7888 arg0 = TREE_VALUE (arglist);
7889 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
7890 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
7891 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
7892 old = expand_expr (arg1, NULL_RTX, mode, 0);
7893 new = expand_expr (arg2, NULL_RTX, mode, 0);
7895 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
7896 MEM_VOLATILE_P (mem) = 1;
7898 if (GET_MODE (old) != mode)
7899 old = convert_to_mode (mode, old, /*unsignedp=*/1);
7900 if (GET_MODE (new) != mode)
7901 new = convert_to_mode (mode, new, /*unsignedp=*/1);
7903 if (! register_operand (old, mode))
7904 old = copy_to_mode_reg (mode, old);
7905 if (! register_operand (new, mode))
7906 new = copy_to_mode_reg (mode, new);
7908 if (! boolp && target && register_operand (target, mode))
7911 tmp = gen_reg_rtx (mode);
7913 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
7914 convert_move (ccv, old, /*unsignedp=*/1);
7915 emit_insn (gen_mf ());
7917 insn = gen_cmpxchg_acq_si (tmp, mem, new, ccv);
7919 insn = gen_cmpxchg_acq_di (tmp, mem, new, ccv);
7925 target = gen_reg_rtx (rmode);
7926 return emit_store_flag_force (target, EQ, tmp, old, mode, 1, 1);
7932 /* Expand lock_test_and_set. I.e. `xchgsz ret = [ptr], new'. */
7935 ia64_expand_lock_test_and_set (enum machine_mode mode, tree arglist,
7939 rtx mem, new, ret, insn;
7941 arg0 = TREE_VALUE (arglist);
7942 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
7943 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
7944 new = expand_expr (arg1, NULL_RTX, mode, 0);
7946 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
7947 MEM_VOLATILE_P (mem) = 1;
7948 if (! register_operand (new, mode))
7949 new = copy_to_mode_reg (mode, new);
7951 if (target && register_operand (target, mode))
7954 ret = gen_reg_rtx (mode);
7957 insn = gen_xchgsi (ret, mem, new);
7959 insn = gen_xchgdi (ret, mem, new);
7965 /* Expand lock_release. I.e. `stsz.rel [ptr] = r0'. */
7968 ia64_expand_lock_release (enum machine_mode mode, tree arglist,
7969 rtx target ATTRIBUTE_UNUSED)
7974 arg0 = TREE_VALUE (arglist);
7975 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
7977 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
7978 MEM_VOLATILE_P (mem) = 1;
7980 emit_move_insn (mem, const0_rtx);
7986 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
7987 enum machine_mode mode ATTRIBUTE_UNUSED,
7988 int ignore ATTRIBUTE_UNUSED)
7990 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
7991 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
7992 tree arglist = TREE_OPERAND (exp, 1);
7993 enum machine_mode rmode = VOIDmode;
7997 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI:
7998 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI:
8003 case IA64_BUILTIN_LOCK_TEST_AND_SET_SI:
8004 case IA64_BUILTIN_LOCK_RELEASE_SI:
8005 case IA64_BUILTIN_FETCH_AND_ADD_SI:
8006 case IA64_BUILTIN_FETCH_AND_SUB_SI:
8007 case IA64_BUILTIN_FETCH_AND_OR_SI:
8008 case IA64_BUILTIN_FETCH_AND_AND_SI:
8009 case IA64_BUILTIN_FETCH_AND_XOR_SI:
8010 case IA64_BUILTIN_FETCH_AND_NAND_SI:
8011 case IA64_BUILTIN_ADD_AND_FETCH_SI:
8012 case IA64_BUILTIN_SUB_AND_FETCH_SI:
8013 case IA64_BUILTIN_OR_AND_FETCH_SI:
8014 case IA64_BUILTIN_AND_AND_FETCH_SI:
8015 case IA64_BUILTIN_XOR_AND_FETCH_SI:
8016 case IA64_BUILTIN_NAND_AND_FETCH_SI:
8020 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI:
8025 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI:
8030 case IA64_BUILTIN_LOCK_TEST_AND_SET_DI:
8031 case IA64_BUILTIN_LOCK_RELEASE_DI:
8032 case IA64_BUILTIN_FETCH_AND_ADD_DI:
8033 case IA64_BUILTIN_FETCH_AND_SUB_DI:
8034 case IA64_BUILTIN_FETCH_AND_OR_DI:
8035 case IA64_BUILTIN_FETCH_AND_AND_DI:
8036 case IA64_BUILTIN_FETCH_AND_XOR_DI:
8037 case IA64_BUILTIN_FETCH_AND_NAND_DI:
8038 case IA64_BUILTIN_ADD_AND_FETCH_DI:
8039 case IA64_BUILTIN_SUB_AND_FETCH_DI:
8040 case IA64_BUILTIN_OR_AND_FETCH_DI:
8041 case IA64_BUILTIN_AND_AND_FETCH_DI:
8042 case IA64_BUILTIN_XOR_AND_FETCH_DI:
8043 case IA64_BUILTIN_NAND_AND_FETCH_DI:
8053 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI:
8054 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI:
8055 return ia64_expand_compare_and_swap (rmode, mode, 1, arglist,
8058 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI:
8059 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI:
8060 return ia64_expand_compare_and_swap (rmode, mode, 0, arglist,
8063 case IA64_BUILTIN_SYNCHRONIZE:
8064 emit_insn (gen_mf ());
8067 case IA64_BUILTIN_LOCK_TEST_AND_SET_SI:
8068 case IA64_BUILTIN_LOCK_TEST_AND_SET_DI:
8069 return ia64_expand_lock_test_and_set (mode, arglist, target);
8071 case IA64_BUILTIN_LOCK_RELEASE_SI:
8072 case IA64_BUILTIN_LOCK_RELEASE_DI:
8073 return ia64_expand_lock_release (mode, arglist, target);
8075 case IA64_BUILTIN_BSP:
8076 if (! target || ! register_operand (target, DImode))
8077 target = gen_reg_rtx (DImode);
8078 emit_insn (gen_bsp_value (target));
8079 #ifdef POINTERS_EXTEND_UNSIGNED
8080 target = convert_memory_address (ptr_mode, target);
8084 case IA64_BUILTIN_FLUSHRS:
8085 emit_insn (gen_flushrs ());
8088 case IA64_BUILTIN_FETCH_AND_ADD_SI:
8089 case IA64_BUILTIN_FETCH_AND_ADD_DI:
8090 return ia64_expand_fetch_and_op (add_optab, mode, arglist, target);
8092 case IA64_BUILTIN_FETCH_AND_SUB_SI:
8093 case IA64_BUILTIN_FETCH_AND_SUB_DI:
8094 return ia64_expand_fetch_and_op (sub_optab, mode, arglist, target);
8096 case IA64_BUILTIN_FETCH_AND_OR_SI:
8097 case IA64_BUILTIN_FETCH_AND_OR_DI:
8098 return ia64_expand_fetch_and_op (ior_optab, mode, arglist, target);
8100 case IA64_BUILTIN_FETCH_AND_AND_SI:
8101 case IA64_BUILTIN_FETCH_AND_AND_DI:
8102 return ia64_expand_fetch_and_op (and_optab, mode, arglist, target);
8104 case IA64_BUILTIN_FETCH_AND_XOR_SI:
8105 case IA64_BUILTIN_FETCH_AND_XOR_DI:
8106 return ia64_expand_fetch_and_op (xor_optab, mode, arglist, target);
8108 case IA64_BUILTIN_FETCH_AND_NAND_SI:
8109 case IA64_BUILTIN_FETCH_AND_NAND_DI:
8110 return ia64_expand_fetch_and_op (one_cmpl_optab, mode, arglist, target);
8112 case IA64_BUILTIN_ADD_AND_FETCH_SI:
8113 case IA64_BUILTIN_ADD_AND_FETCH_DI:
8114 return ia64_expand_op_and_fetch (add_optab, mode, arglist, target);
8116 case IA64_BUILTIN_SUB_AND_FETCH_SI:
8117 case IA64_BUILTIN_SUB_AND_FETCH_DI:
8118 return ia64_expand_op_and_fetch (sub_optab, mode, arglist, target);
8120 case IA64_BUILTIN_OR_AND_FETCH_SI:
8121 case IA64_BUILTIN_OR_AND_FETCH_DI:
8122 return ia64_expand_op_and_fetch (ior_optab, mode, arglist, target);
8124 case IA64_BUILTIN_AND_AND_FETCH_SI:
8125 case IA64_BUILTIN_AND_AND_FETCH_DI:
8126 return ia64_expand_op_and_fetch (and_optab, mode, arglist, target);
8128 case IA64_BUILTIN_XOR_AND_FETCH_SI:
8129 case IA64_BUILTIN_XOR_AND_FETCH_DI:
8130 return ia64_expand_op_and_fetch (xor_optab, mode, arglist, target);
8132 case IA64_BUILTIN_NAND_AND_FETCH_SI:
8133 case IA64_BUILTIN_NAND_AND_FETCH_DI:
8134 return ia64_expand_op_and_fetch (one_cmpl_optab, mode, arglist, target);
8143 /* For the HP-UX IA64 aggregate parameters are passed stored in the
8144 most significant bits of the stack slot. */
8147 ia64_hpux_function_arg_padding (enum machine_mode mode, tree type)
8149 /* Exception to normal case for structures/unions/etc. */
8151 if (type && AGGREGATE_TYPE_P (type)
8152 && int_size_in_bytes (type) < UNITS_PER_WORD)
8155 /* Fall back to the default. */
8156 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
8159 /* Linked list of all external functions that are to be emitted by GCC.
8160 We output the name if and only if TREE_SYMBOL_REFERENCED is set in
8161 order to avoid putting out names that are never really used. */
8163 struct extern_func_list GTY(())
8165 struct extern_func_list *next;
8169 static GTY(()) struct extern_func_list *extern_func_head;
8172 ia64_hpux_add_extern_decl (tree decl)
8174 struct extern_func_list *p = ggc_alloc (sizeof (struct extern_func_list));
8177 p->next = extern_func_head;
8178 extern_func_head = p;
8181 /* Print out the list of used global functions. */
8184 ia64_hpux_file_end (void)
8186 struct extern_func_list *p;
8188 for (p = extern_func_head; p; p = p->next)
8190 tree decl = p->decl;
8191 tree id = DECL_ASSEMBLER_NAME (decl);
8196 if (!TREE_ASM_WRITTEN (decl) && TREE_SYMBOL_REFERENCED (id))
8198 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
8200 TREE_ASM_WRITTEN (decl) = 1;
8201 (*targetm.asm_out.globalize_label) (asm_out_file, name);
8202 fputs (TYPE_ASM_OP, asm_out_file);
8203 assemble_name (asm_out_file, name);
8204 fprintf (asm_out_file, "," TYPE_OPERAND_FMT "\n", "function");
8208 extern_func_head = 0;
8211 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
8212 modes of word_mode and larger. Rename the TFmode libfuncs using the
8213 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
8214 backward compatibility. */
8217 ia64_init_libfuncs (void)
8219 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
8220 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
8221 set_optab_libfunc (smod_optab, SImode, "__modsi3");
8222 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
8224 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
8225 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
8226 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
8227 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
8228 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
8230 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
8231 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
8232 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
8233 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
8234 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
8235 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
8237 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
8238 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
8239 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
8240 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
8242 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
8243 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
8246 /* Rename all the TFmode libfuncs using the HPUX conventions. */
8249 ia64_hpux_init_libfuncs (void)
8251 ia64_init_libfuncs ();
8253 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
8254 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
8255 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
8257 /* ia64_expand_compare uses this. */
8258 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
8260 /* These should never be used. */
8261 set_optab_libfunc (eq_optab, TFmode, 0);
8262 set_optab_libfunc (ne_optab, TFmode, 0);
8263 set_optab_libfunc (gt_optab, TFmode, 0);
8264 set_optab_libfunc (ge_optab, TFmode, 0);
8265 set_optab_libfunc (lt_optab, TFmode, 0);
8266 set_optab_libfunc (le_optab, TFmode, 0);
8269 /* Rename the division and modulus functions in VMS. */
8272 ia64_vms_init_libfuncs (void)
8274 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
8275 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
8276 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
8277 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
8278 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
8279 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
8280 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
8281 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
8284 /* Rename the TFmode libfuncs available from soft-fp in glibc using
8285 the HPUX conventions. */
8288 ia64_sysv4_init_libfuncs (void)
8290 ia64_init_libfuncs ();
8292 /* These functions are not part of the HPUX TFmode interface. We
8293 use them instead of _U_Qfcmp, which doesn't work the way we
8295 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
8296 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
8297 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
8298 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
8299 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
8300 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
8302 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
8303 glibc doesn't have them. */
8306 /* Switch to the section to which we should output X. The only thing
8307 special we do here is to honor small data. */
8310 ia64_select_rtx_section (enum machine_mode mode, rtx x,
8311 unsigned HOST_WIDE_INT align)
8313 if (GET_MODE_SIZE (mode) > 0
8314 && GET_MODE_SIZE (mode) <= ia64_section_threshold)
8317 default_elf_select_rtx_section (mode, x, align);
8320 /* It is illegal to have relocations in shared segments on AIX and HPUX.
8321 Pretend flag_pic is always set. */
8324 ia64_rwreloc_select_section (tree exp, int reloc, unsigned HOST_WIDE_INT align)
8326 default_elf_select_section_1 (exp, reloc, align, true);
8330 ia64_rwreloc_unique_section (tree decl, int reloc)
8332 default_unique_section_1 (decl, reloc, true);
8336 ia64_rwreloc_select_rtx_section (enum machine_mode mode, rtx x,
8337 unsigned HOST_WIDE_INT align)
8339 int save_pic = flag_pic;
8341 ia64_select_rtx_section (mode, x, align);
8342 flag_pic = save_pic;
8346 ia64_rwreloc_section_type_flags (tree decl, const char *name, int reloc)
8348 return default_section_type_flags_1 (decl, name, reloc, true);
8351 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
8352 structure type and that the address of that type should be passed
8353 in out0, rather than in r8. */
8356 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
8358 tree ret_type = TREE_TYPE (fntype);
8360 /* The Itanium C++ ABI requires that out0, rather than r8, be used
8361 as the structure return address parameter, if the return value
8362 type has a non-trivial copy constructor or destructor. It is not
8363 clear if this same convention should be used for other
8364 programming languages. Until G++ 3.4, we incorrectly used r8 for
8365 these return values. */
8366 return (abi_version_at_least (2)
8368 && TYPE_MODE (ret_type) == BLKmode
8369 && TREE_ADDRESSABLE (ret_type)
8370 && strcmp (lang_hooks.name, "GNU C++") == 0);
8373 /* Output the assembler code for a thunk function. THUNK_DECL is the
8374 declaration for the thunk function itself, FUNCTION is the decl for
8375 the target function. DELTA is an immediate constant offset to be
8376 added to THIS. If VCALL_OFFSET is nonzero, the word at
8377 *(*this + vcall_offset) should be added to THIS. */
8380 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
8381 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8384 rtx this, insn, funexp;
8385 unsigned int this_parmno;
8386 unsigned int this_regno;
8388 reload_completed = 1;
8389 epilogue_completed = 1;
8391 reset_block_changes ();
8393 /* Set things up as ia64_expand_prologue might. */
8394 last_scratch_gr_reg = 15;
8396 memset (¤t_frame_info, 0, sizeof (current_frame_info));
8397 current_frame_info.spill_cfa_off = -16;
8398 current_frame_info.n_input_regs = 1;
8399 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
8401 /* Mark the end of the (empty) prologue. */
8402 emit_note (NOTE_INSN_PROLOGUE_END);
8404 /* Figure out whether "this" will be the first parameter (the
8405 typical case) or the second parameter (as happens when the
8406 virtual function returns certain class objects). */
8408 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
8410 this_regno = IN_REG (this_parmno);
8411 if (!TARGET_REG_NAMES)
8412 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
8414 this = gen_rtx_REG (Pmode, this_regno);
8417 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
8418 REG_POINTER (tmp) = 1;
8419 if (delta && CONST_OK_FOR_I (delta))
8421 emit_insn (gen_ptr_extend_plus_imm (this, tmp, GEN_INT (delta)));
8425 emit_insn (gen_ptr_extend (this, tmp));
8428 /* Apply the constant offset, if required. */
8431 rtx delta_rtx = GEN_INT (delta);
8433 if (!CONST_OK_FOR_I (delta))
8435 rtx tmp = gen_rtx_REG (Pmode, 2);
8436 emit_move_insn (tmp, delta_rtx);
8439 emit_insn (gen_adddi3 (this, this, delta_rtx));
8442 /* Apply the offset from the vtable, if required. */
8445 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8446 rtx tmp = gen_rtx_REG (Pmode, 2);
8450 rtx t = gen_rtx_REG (ptr_mode, 2);
8451 REG_POINTER (t) = 1;
8452 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this));
8453 if (CONST_OK_FOR_I (vcall_offset))
8455 emit_insn (gen_ptr_extend_plus_imm (tmp, t,
8460 emit_insn (gen_ptr_extend (tmp, t));
8463 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8467 if (!CONST_OK_FOR_J (vcall_offset))
8469 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
8470 emit_move_insn (tmp2, vcall_offset_rtx);
8471 vcall_offset_rtx = tmp2;
8473 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
8477 emit_move_insn (gen_rtx_REG (ptr_mode, 2),
8478 gen_rtx_MEM (ptr_mode, tmp));
8480 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
8482 emit_insn (gen_adddi3 (this, this, tmp));
8485 /* Generate a tail call to the target function. */
8486 if (! TREE_USED (function))
8488 assemble_external (function);
8489 TREE_USED (function) = 1;
8491 funexp = XEXP (DECL_RTL (function), 0);
8492 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8493 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
8494 insn = get_last_insn ();
8495 SIBLING_CALL_P (insn) = 1;
8497 /* Code generation for calls relies on splitting. */
8498 reload_completed = 1;
8499 epilogue_completed = 1;
8500 try_split (PATTERN (insn), insn, 0);
8504 /* Run just enough of rest_of_compilation to get the insns emitted.
8505 There's not really enough bulk here to make other passes such as
8506 instruction scheduling worth while. Note that use_thunk calls
8507 assemble_start_function and assemble_end_function. */
8509 insn_locators_initialize ();
8510 emit_all_insn_group_barriers (NULL);
8511 insn = get_insns ();
8512 shorten_branches (insn);
8513 final_start_function (insn, file, 1);
8514 final (insn, file, 1, 0);
8515 final_end_function ();
8517 reload_completed = 0;
8518 epilogue_completed = 0;
8522 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
8525 ia64_struct_value_rtx (tree fntype,
8526 int incoming ATTRIBUTE_UNUSED)
8528 if (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype))
8530 return gen_rtx_REG (Pmode, GR_REG (8));
8534 ia64_scalar_mode_supported_p (enum machine_mode mode)
8558 #include "gt-ia64.h"