1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to
21 the Free Software Foundation, 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
44 #include "basic-block.h"
46 #include "sched-int.h"
49 #include "target-def.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
56 /* This is used for communication between ASM_OUTPUT_LABEL and
57 ASM_OUTPUT_LABELREF. */
58 int ia64_asm_output_label = 0;
60 /* Define the information needed to generate branch and scc insns. This is
61 stored from the compare operation. */
62 struct rtx_def * ia64_compare_op0;
63 struct rtx_def * ia64_compare_op1;
65 /* Register names for ia64_expand_prologue. */
66 static const char * const ia64_reg_numbers[96] =
67 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
68 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
69 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
70 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
71 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
72 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
73 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
74 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
75 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
76 "r104","r105","r106","r107","r108","r109","r110","r111",
77 "r112","r113","r114","r115","r116","r117","r118","r119",
78 "r120","r121","r122","r123","r124","r125","r126","r127"};
80 /* ??? These strings could be shared with REGISTER_NAMES. */
81 static const char * const ia64_input_reg_names[8] =
82 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
84 /* ??? These strings could be shared with REGISTER_NAMES. */
85 static const char * const ia64_local_reg_names[80] =
86 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
87 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
88 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
89 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
90 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
91 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
92 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
93 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
94 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
95 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
97 /* ??? These strings could be shared with REGISTER_NAMES. */
98 static const char * const ia64_output_reg_names[8] =
99 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
101 /* String used with the -mfixed-range= option. */
102 const char *ia64_fixed_range_string;
104 /* Determines whether we use adds, addl, or movl to generate our
105 TLS immediate offsets. */
106 int ia64_tls_size = 22;
108 /* String used with the -mtls-size= option. */
109 const char *ia64_tls_size_string;
111 /* Which cpu are we scheduling for. */
112 enum processor_type ia64_tune;
114 /* String used with the -tune= option. */
115 const char *ia64_tune_string;
117 /* Determines whether we run our final scheduling pass or not. We always
118 avoid the normal second scheduling pass. */
119 static int ia64_flag_schedule_insns2;
121 /* Determines whether we run variable tracking in machine dependent
123 static int ia64_flag_var_tracking;
125 /* Variables which are this size or smaller are put in the sdata/sbss
128 unsigned int ia64_section_threshold;
130 /* The following variable is used by the DFA insn scheduler. The value is
131 TRUE if we do insn bundling instead of insn scheduling. */
134 /* Structure to be filled in by ia64_compute_frame_size with register
135 save masks and offsets for the current function. */
137 struct ia64_frame_info
139 HOST_WIDE_INT total_size; /* size of the stack frame, not including
140 the caller's scratch area. */
141 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
142 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
143 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
144 HARD_REG_SET mask; /* mask of saved registers. */
145 unsigned int gr_used_mask; /* mask of registers in use as gr spill
146 registers or long-term scratches. */
147 int n_spilled; /* number of spilled registers. */
148 int reg_fp; /* register for fp. */
149 int reg_save_b0; /* save register for b0. */
150 int reg_save_pr; /* save register for prs. */
151 int reg_save_ar_pfs; /* save register for ar.pfs. */
152 int reg_save_ar_unat; /* save register for ar.unat. */
153 int reg_save_ar_lc; /* save register for ar.lc. */
154 int reg_save_gp; /* save register for gp. */
155 int n_input_regs; /* number of input registers used. */
156 int n_local_regs; /* number of local registers used. */
157 int n_output_regs; /* number of output registers used. */
158 int n_rotate_regs; /* number of rotating registers used. */
160 char need_regstk; /* true if a .regstk directive needed. */
161 char initialized; /* true if the data is finalized. */
164 /* Current frame information calculated by ia64_compute_frame_size. */
165 static struct ia64_frame_info current_frame_info;
167 static int ia64_first_cycle_multipass_dfa_lookahead (void);
168 static void ia64_dependencies_evaluation_hook (rtx, rtx);
169 static void ia64_init_dfa_pre_cycle_insn (void);
170 static rtx ia64_dfa_pre_cycle_insn (void);
171 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
172 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
173 static rtx gen_tls_get_addr (void);
174 static rtx gen_thread_pointer (void);
175 static rtx ia64_expand_tls_address (enum tls_model, rtx, rtx);
176 static int find_gr_spill (int);
177 static int next_scratch_gr_reg (void);
178 static void mark_reg_gr_used_mask (rtx, void *);
179 static void ia64_compute_frame_size (HOST_WIDE_INT);
180 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
181 static void finish_spill_pointers (void);
182 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
183 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
184 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
185 static rtx gen_movdi_x (rtx, rtx, rtx);
186 static rtx gen_fr_spill_x (rtx, rtx, rtx);
187 static rtx gen_fr_restore_x (rtx, rtx, rtx);
189 static enum machine_mode hfa_element_mode (tree, int);
190 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
192 static bool ia64_function_ok_for_sibcall (tree, tree);
193 static bool ia64_return_in_memory (tree, tree);
194 static bool ia64_rtx_costs (rtx, int, int, int *);
195 static void fix_range (const char *);
196 static struct machine_function * ia64_init_machine_status (void);
197 static void emit_insn_group_barriers (FILE *);
198 static void emit_all_insn_group_barriers (FILE *);
199 static void final_emit_insn_group_barriers (FILE *);
200 static void emit_predicate_relation_info (void);
201 static void ia64_reorg (void);
202 static bool ia64_in_small_data_p (tree);
203 static void process_epilogue (void);
204 static int process_set (FILE *, rtx);
206 static rtx ia64_expand_fetch_and_op (optab, enum machine_mode, tree, rtx);
207 static rtx ia64_expand_op_and_fetch (optab, enum machine_mode, tree, rtx);
208 static rtx ia64_expand_compare_and_swap (enum machine_mode, enum machine_mode,
210 static rtx ia64_expand_lock_test_and_set (enum machine_mode, tree, rtx);
211 static rtx ia64_expand_lock_release (enum machine_mode, tree, rtx);
212 static bool ia64_assemble_integer (rtx, unsigned int, int);
213 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
214 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
215 static void ia64_output_function_end_prologue (FILE *);
217 static int ia64_issue_rate (void);
218 static int ia64_adjust_cost (rtx, rtx, rtx, int);
219 static void ia64_sched_init (FILE *, int, int);
220 static void ia64_sched_finish (FILE *, int);
221 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
222 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
223 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
224 static int ia64_variable_issue (FILE *, int, rtx, int);
226 static struct bundle_state *get_free_bundle_state (void);
227 static void free_bundle_state (struct bundle_state *);
228 static void initiate_bundle_states (void);
229 static void finish_bundle_states (void);
230 static unsigned bundle_state_hash (const void *);
231 static int bundle_state_eq_p (const void *, const void *);
232 static int insert_bundle_state (struct bundle_state *);
233 static void initiate_bundle_state_table (void);
234 static void finish_bundle_state_table (void);
235 static int try_issue_nops (struct bundle_state *, int);
236 static int try_issue_insn (struct bundle_state *, rtx);
237 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
238 static int get_max_pos (state_t);
239 static int get_template (state_t, int);
241 static rtx get_next_important_insn (rtx, rtx);
242 static void bundling (FILE *, int, rtx, rtx);
244 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
245 HOST_WIDE_INT, tree);
246 static void ia64_file_start (void);
248 static void ia64_select_rtx_section (enum machine_mode, rtx,
249 unsigned HOST_WIDE_INT);
250 static void ia64_rwreloc_select_section (tree, int, unsigned HOST_WIDE_INT)
252 static void ia64_rwreloc_unique_section (tree, int)
254 static void ia64_rwreloc_select_rtx_section (enum machine_mode, rtx,
255 unsigned HOST_WIDE_INT)
257 static unsigned int ia64_rwreloc_section_type_flags (tree, const char *, int)
260 static void ia64_hpux_add_extern_decl (tree decl)
262 static void ia64_hpux_file_end (void)
264 static void ia64_init_libfuncs (void)
266 static void ia64_hpux_init_libfuncs (void)
268 static void ia64_sysv4_init_libfuncs (void)
270 static void ia64_vms_init_libfuncs (void)
273 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
274 static void ia64_encode_section_info (tree, rtx, int);
275 static rtx ia64_struct_value_rtx (tree, int);
276 static tree ia64_gimplify_va_arg (tree, tree, tree *, tree *);
279 /* Table of valid machine attributes. */
280 static const struct attribute_spec ia64_attribute_table[] =
282 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
283 { "syscall_linkage", 0, 0, false, true, true, NULL },
284 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
285 { NULL, 0, 0, false, false, false, NULL }
288 /* Initialize the GCC target structure. */
289 #undef TARGET_ATTRIBUTE_TABLE
290 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
292 #undef TARGET_INIT_BUILTINS
293 #define TARGET_INIT_BUILTINS ia64_init_builtins
295 #undef TARGET_EXPAND_BUILTIN
296 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
298 #undef TARGET_ASM_BYTE_OP
299 #define TARGET_ASM_BYTE_OP "\tdata1\t"
300 #undef TARGET_ASM_ALIGNED_HI_OP
301 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
302 #undef TARGET_ASM_ALIGNED_SI_OP
303 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
304 #undef TARGET_ASM_ALIGNED_DI_OP
305 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
306 #undef TARGET_ASM_UNALIGNED_HI_OP
307 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
308 #undef TARGET_ASM_UNALIGNED_SI_OP
309 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
310 #undef TARGET_ASM_UNALIGNED_DI_OP
311 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
312 #undef TARGET_ASM_INTEGER
313 #define TARGET_ASM_INTEGER ia64_assemble_integer
315 #undef TARGET_ASM_FUNCTION_PROLOGUE
316 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
317 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
318 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
319 #undef TARGET_ASM_FUNCTION_EPILOGUE
320 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
322 #undef TARGET_IN_SMALL_DATA_P
323 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
325 #undef TARGET_SCHED_ADJUST_COST
326 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
327 #undef TARGET_SCHED_ISSUE_RATE
328 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
329 #undef TARGET_SCHED_VARIABLE_ISSUE
330 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
331 #undef TARGET_SCHED_INIT
332 #define TARGET_SCHED_INIT ia64_sched_init
333 #undef TARGET_SCHED_FINISH
334 #define TARGET_SCHED_FINISH ia64_sched_finish
335 #undef TARGET_SCHED_REORDER
336 #define TARGET_SCHED_REORDER ia64_sched_reorder
337 #undef TARGET_SCHED_REORDER2
338 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
340 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
341 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
343 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
344 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE hook_int_void_1
346 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
347 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
349 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
350 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
351 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
352 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
354 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
355 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
356 ia64_first_cycle_multipass_dfa_lookahead_guard
358 #undef TARGET_SCHED_DFA_NEW_CYCLE
359 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
361 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
362 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
364 #undef TARGET_ASM_OUTPUT_MI_THUNK
365 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
366 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
367 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
369 #undef TARGET_ASM_FILE_START
370 #define TARGET_ASM_FILE_START ia64_file_start
372 #undef TARGET_RTX_COSTS
373 #define TARGET_RTX_COSTS ia64_rtx_costs
374 #undef TARGET_ADDRESS_COST
375 #define TARGET_ADDRESS_COST hook_int_rtx_0
377 #undef TARGET_MACHINE_DEPENDENT_REORG
378 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
380 #undef TARGET_ENCODE_SECTION_INFO
381 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
383 /* ??? ABI doesn't allow us to define this. */
385 #undef TARGET_PROMOTE_FUNCTION_ARGS
386 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
389 /* ??? ABI doesn't allow us to define this. */
391 #undef TARGET_PROMOTE_FUNCTION_RETURN
392 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
395 /* ??? Investigate. */
397 #undef TARGET_PROMOTE_PROTOTYPES
398 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
401 #undef TARGET_STRUCT_VALUE_RTX
402 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
403 #undef TARGET_RETURN_IN_MEMORY
404 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
405 #undef TARGET_SETUP_INCOMING_VARARGS
406 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
407 #undef TARGET_STRICT_ARGUMENT_NAMING
408 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
409 #undef TARGET_MUST_PASS_IN_STACK
410 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
412 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
413 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
415 struct gcc_target targetm = TARGET_INITIALIZER;
417 /* Return 1 if OP is a valid operand for the MEM of a CALL insn. */
420 call_operand (rtx op, enum machine_mode mode)
422 if (mode != GET_MODE (op) && mode != VOIDmode)
425 return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == REG
426 || (GET_CODE (op) == SUBREG && GET_CODE (XEXP (op, 0)) == REG));
429 /* Return 1 if OP refers to a symbol in the sdata section. */
432 sdata_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
434 switch (GET_CODE (op))
437 if (GET_CODE (XEXP (op, 0)) != PLUS
438 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF)
440 op = XEXP (XEXP (op, 0), 0);
444 if (CONSTANT_POOL_ADDRESS_P (op))
445 return GET_MODE_SIZE (get_pool_mode (op)) <= ia64_section_threshold;
447 return SYMBOL_REF_LOCAL_P (op) && SYMBOL_REF_SMALL_P (op);
457 small_addr_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
459 return SYMBOL_REF_SMALL_ADDR_P (op);
462 /* Return 1 if OP refers to a symbol, and is appropriate for a GOT load. */
465 got_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
467 switch (GET_CODE (op))
471 if (GET_CODE (op) != PLUS)
473 if (GET_CODE (XEXP (op, 0)) != SYMBOL_REF)
476 if (GET_CODE (op) != CONST_INT)
481 /* Ok if we're not using GOT entries at all. */
482 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
485 /* "Ok" while emitting rtl, since otherwise we won't be provided
486 with the entire offset during emission, which makes it very
487 hard to split the offset into high and low parts. */
488 if (rtx_equal_function_value_matters)
491 /* Force the low 14 bits of the constant to zero so that we do not
492 use up so many GOT entries. */
493 return (INTVAL (op) & 0x3fff) == 0;
496 if (SYMBOL_REF_SMALL_ADDR_P (op))
507 /* Return 1 if OP refers to a symbol. */
510 symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
512 switch (GET_CODE (op))
525 /* Return tls_model if OP refers to a TLS symbol. */
528 tls_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
530 if (GET_CODE (op) != SYMBOL_REF)
532 return SYMBOL_REF_TLS_MODEL (op);
536 /* Return 1 if OP refers to a function. */
539 function_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
541 if (GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (op))
547 /* Return 1 if OP is setjmp or a similar function. */
549 /* ??? This is an unsatisfying solution. Should rethink. */
552 setjmp_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
557 if (GET_CODE (op) != SYMBOL_REF)
562 /* The following code is borrowed from special_function_p in calls.c. */
564 /* Disregard prefix _, __ or __x. */
567 if (name[1] == '_' && name[2] == 'x')
569 else if (name[1] == '_')
579 && (! strcmp (name, "setjmp")
580 || ! strcmp (name, "setjmp_syscall")))
582 && ! strcmp (name, "sigsetjmp"))
584 && ! strcmp (name, "savectx")));
586 else if ((name[0] == 'q' && name[1] == 's'
587 && ! strcmp (name, "qsetjmp"))
588 || (name[0] == 'v' && name[1] == 'f'
589 && ! strcmp (name, "vfork")))
595 /* Return 1 if OP is a general operand, excluding tls symbolic operands. */
598 move_operand (rtx op, enum machine_mode mode)
600 return general_operand (op, mode) && !tls_symbolic_operand (op, mode);
603 /* Return 1 if OP is a register operand that is (or could be) a GR reg. */
606 gr_register_operand (rtx op, enum machine_mode mode)
608 if (! register_operand (op, mode))
610 if (GET_CODE (op) == SUBREG)
611 op = SUBREG_REG (op);
612 if (GET_CODE (op) == REG)
614 unsigned int regno = REGNO (op);
615 if (regno < FIRST_PSEUDO_REGISTER)
616 return GENERAL_REGNO_P (regno);
621 /* Return 1 if OP is a register operand that is (or could be) an FR reg. */
624 fr_register_operand (rtx op, enum machine_mode mode)
626 if (! register_operand (op, mode))
628 if (GET_CODE (op) == SUBREG)
629 op = SUBREG_REG (op);
630 if (GET_CODE (op) == REG)
632 unsigned int regno = REGNO (op);
633 if (regno < FIRST_PSEUDO_REGISTER)
634 return FR_REGNO_P (regno);
639 /* Return 1 if OP is a register operand that is (or could be) a GR/FR reg. */
642 grfr_register_operand (rtx op, enum machine_mode mode)
644 if (! register_operand (op, mode))
646 if (GET_CODE (op) == SUBREG)
647 op = SUBREG_REG (op);
648 if (GET_CODE (op) == REG)
650 unsigned int regno = REGNO (op);
651 if (regno < FIRST_PSEUDO_REGISTER)
652 return GENERAL_REGNO_P (regno) || FR_REGNO_P (regno);
657 /* Return 1 if OP is a nonimmediate operand that is (or could be) a GR reg. */
660 gr_nonimmediate_operand (rtx op, enum machine_mode mode)
662 if (! nonimmediate_operand (op, mode))
664 if (GET_CODE (op) == SUBREG)
665 op = SUBREG_REG (op);
666 if (GET_CODE (op) == REG)
668 unsigned int regno = REGNO (op);
669 if (regno < FIRST_PSEUDO_REGISTER)
670 return GENERAL_REGNO_P (regno);
675 /* Return 1 if OP is a nonimmediate operand that is (or could be) a FR reg. */
678 fr_nonimmediate_operand (rtx op, enum machine_mode mode)
680 if (! nonimmediate_operand (op, mode))
682 if (GET_CODE (op) == SUBREG)
683 op = SUBREG_REG (op);
684 if (GET_CODE (op) == REG)
686 unsigned int regno = REGNO (op);
687 if (regno < FIRST_PSEUDO_REGISTER)
688 return FR_REGNO_P (regno);
693 /* Return 1 if OP is a nonimmediate operand that is a GR/FR reg. */
696 grfr_nonimmediate_operand (rtx op, enum machine_mode mode)
698 if (! nonimmediate_operand (op, mode))
700 if (GET_CODE (op) == SUBREG)
701 op = SUBREG_REG (op);
702 if (GET_CODE (op) == REG)
704 unsigned int regno = REGNO (op);
705 if (regno < FIRST_PSEUDO_REGISTER)
706 return GENERAL_REGNO_P (regno) || FR_REGNO_P (regno);
711 /* Return 1 if OP is a GR register operand, or zero. */
714 gr_reg_or_0_operand (rtx op, enum machine_mode mode)
716 return (op == const0_rtx || gr_register_operand (op, mode));
719 /* Return 1 if OP is a GR register operand, or a 5 bit immediate operand. */
722 gr_reg_or_5bit_operand (rtx op, enum machine_mode mode)
724 return ((GET_CODE (op) == CONST_INT && INTVAL (op) >= 0 && INTVAL (op) < 32)
725 || gr_register_operand (op, mode));
728 /* Return 1 if OP is a GR register operand, or a 6 bit immediate operand. */
731 gr_reg_or_6bit_operand (rtx op, enum machine_mode mode)
733 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (INTVAL (op)))
734 || gr_register_operand (op, mode));
737 /* Return 1 if OP is a GR register operand, or an 8 bit immediate operand. */
740 gr_reg_or_8bit_operand (rtx op, enum machine_mode mode)
742 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
743 || gr_register_operand (op, mode));
746 /* Return 1 if OP is a GR/FR register operand, or an 8 bit immediate. */
749 grfr_reg_or_8bit_operand (rtx op, enum machine_mode mode)
751 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
752 || grfr_register_operand (op, mode));
755 /* Return 1 if OP is a register operand, or an 8 bit adjusted immediate
759 gr_reg_or_8bit_adjusted_operand (rtx op, enum machine_mode mode)
761 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_L (INTVAL (op)))
762 || gr_register_operand (op, mode));
765 /* Return 1 if OP is a register operand, or is valid for both an 8 bit
766 immediate and an 8 bit adjusted immediate operand. This is necessary
767 because when we emit a compare, we don't know what the condition will be,
768 so we need the union of the immediates accepted by GT and LT. */
771 gr_reg_or_8bit_and_adjusted_operand (rtx op, enum machine_mode mode)
773 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op))
774 && CONST_OK_FOR_L (INTVAL (op)))
775 || gr_register_operand (op, mode));
778 /* Return 1 if OP is a register operand, or a 14 bit immediate operand. */
781 gr_reg_or_14bit_operand (rtx op, enum machine_mode mode)
783 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_I (INTVAL (op)))
784 || gr_register_operand (op, mode));
787 /* Return 1 if OP is a register operand, or a 22 bit immediate operand. */
790 gr_reg_or_22bit_operand (rtx op, enum machine_mode mode)
792 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_J (INTVAL (op)))
793 || gr_register_operand (op, mode));
796 /* Return 1 if OP is a 6 bit immediate operand. */
799 shift_count_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
801 return (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (INTVAL (op)));
804 /* Return 1 if OP is a 5 bit immediate operand. */
807 shift_32bit_count_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
809 return (GET_CODE (op) == CONST_INT
810 && (INTVAL (op) >= 0 && INTVAL (op) < 32));
813 /* Return 1 if OP is a 2, 4, 8, or 16 immediate operand. */
816 shladd_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
818 return (GET_CODE (op) == CONST_INT
819 && (INTVAL (op) == 2 || INTVAL (op) == 4
820 || INTVAL (op) == 8 || INTVAL (op) == 16));
823 /* Return 1 if OP is a -16, -8, -4, -1, 1, 4, 8, or 16 immediate operand. */
826 fetchadd_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
828 return (GET_CODE (op) == CONST_INT
829 && (INTVAL (op) == -16 || INTVAL (op) == -8 ||
830 INTVAL (op) == -4 || INTVAL (op) == -1 ||
831 INTVAL (op) == 1 || INTVAL (op) == 4 ||
832 INTVAL (op) == 8 || INTVAL (op) == 16));
835 /* Return 1 if OP is a floating-point constant zero, one, or a register. */
838 fr_reg_or_fp01_operand (rtx op, enum machine_mode mode)
840 return ((GET_CODE (op) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (op))
841 || fr_register_operand (op, mode));
844 /* Like nonimmediate_operand, but don't allow MEMs that try to use a
845 POST_MODIFY with a REG as displacement. */
848 destination_operand (rtx op, enum machine_mode mode)
850 if (! nonimmediate_operand (op, mode))
852 if (GET_CODE (op) == MEM
853 && GET_CODE (XEXP (op, 0)) == POST_MODIFY
854 && GET_CODE (XEXP (XEXP (XEXP (op, 0), 1), 1)) == REG)
859 /* Like memory_operand, but don't allow post-increments. */
862 not_postinc_memory_operand (rtx op, enum machine_mode mode)
864 return (memory_operand (op, mode)
865 && GET_RTX_CLASS (GET_CODE (XEXP (op, 0))) != RTX_AUTOINC);
868 /* Return 1 if this is a comparison operator, which accepts a normal 8-bit
869 signed immediate operand. */
872 normal_comparison_operator (register rtx op, enum machine_mode mode)
874 enum rtx_code code = GET_CODE (op);
875 return ((mode == VOIDmode || GET_MODE (op) == mode)
876 && (code == EQ || code == NE
877 || code == GT || code == LE || code == GTU || code == LEU));
880 /* Return 1 if this is a comparison operator, which accepts an adjusted 8-bit
881 signed immediate operand. */
884 adjusted_comparison_operator (register rtx op, enum machine_mode mode)
886 enum rtx_code code = GET_CODE (op);
887 return ((mode == VOIDmode || GET_MODE (op) == mode)
888 && (code == LT || code == GE || code == LTU || code == GEU));
891 /* Return 1 if this is a signed inequality operator. */
894 signed_inequality_operator (register rtx op, enum machine_mode mode)
896 enum rtx_code code = GET_CODE (op);
897 return ((mode == VOIDmode || GET_MODE (op) == mode)
898 && (code == GE || code == GT
899 || code == LE || code == LT));
902 /* Return 1 if this operator is valid for predication. */
905 predicate_operator (register rtx op, enum machine_mode mode)
907 enum rtx_code code = GET_CODE (op);
908 return ((GET_MODE (op) == mode || mode == VOIDmode)
909 && (code == EQ || code == NE));
912 /* Return 1 if this operator can be used in a conditional operation. */
915 condop_operator (register rtx op, enum machine_mode mode)
917 enum rtx_code code = GET_CODE (op);
918 return ((GET_MODE (op) == mode || mode == VOIDmode)
919 && (code == PLUS || code == MINUS || code == AND
920 || code == IOR || code == XOR));
923 /* Return 1 if this is the ar.lc register. */
926 ar_lc_reg_operand (register rtx op, enum machine_mode mode)
928 return (GET_MODE (op) == DImode
929 && (mode == DImode || mode == VOIDmode)
930 && GET_CODE (op) == REG
931 && REGNO (op) == AR_LC_REGNUM);
934 /* Return 1 if this is the ar.ccv register. */
937 ar_ccv_reg_operand (register rtx op, enum machine_mode mode)
939 return ((GET_MODE (op) == mode || mode == VOIDmode)
940 && GET_CODE (op) == REG
941 && REGNO (op) == AR_CCV_REGNUM);
944 /* Return 1 if this is the ar.pfs register. */
947 ar_pfs_reg_operand (register rtx op, enum machine_mode mode)
949 return ((GET_MODE (op) == mode || mode == VOIDmode)
950 && GET_CODE (op) == REG
951 && REGNO (op) == AR_PFS_REGNUM);
954 /* Like general_operand, but don't allow (mem (addressof)). */
957 general_xfmode_operand (rtx op, enum machine_mode mode)
959 if (! general_operand (op, mode))
967 destination_xfmode_operand (rtx op, enum machine_mode mode)
969 if (! destination_operand (op, mode))
977 xfreg_or_fp01_operand (rtx op, enum machine_mode mode)
979 if (GET_CODE (op) == SUBREG)
981 return fr_reg_or_fp01_operand (op, mode);
984 /* Return 1 if OP is valid as a base register in a reg + offset address. */
987 basereg_operand (rtx op, enum machine_mode mode)
989 /* ??? Should I copy the flag_omit_frame_pointer and cse_not_expected
990 checks from pa.c basereg_operand as well? Seems to be OK without them
993 return (register_operand (op, mode) &&
994 REG_POINTER ((GET_CODE (op) == SUBREG) ? SUBREG_REG (op) : op));
999 ADDR_AREA_NORMAL, /* normal address area */
1000 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
1004 static GTY(()) tree small_ident1;
1005 static GTY(()) tree small_ident2;
1010 if (small_ident1 == 0)
1012 small_ident1 = get_identifier ("small");
1013 small_ident2 = get_identifier ("__small__");
1017 /* Retrieve the address area that has been chosen for the given decl. */
1019 static ia64_addr_area
1020 ia64_get_addr_area (tree decl)
1024 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
1030 id = TREE_VALUE (TREE_VALUE (model_attr));
1031 if (id == small_ident1 || id == small_ident2)
1032 return ADDR_AREA_SMALL;
1034 return ADDR_AREA_NORMAL;
1038 ia64_handle_model_attribute (tree *node, tree name, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1040 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
1041 ia64_addr_area area;
1042 tree arg, decl = *node;
1045 arg = TREE_VALUE (args);
1046 if (arg == small_ident1 || arg == small_ident2)
1048 addr_area = ADDR_AREA_SMALL;
1052 warning ("invalid argument of `%s' attribute",
1053 IDENTIFIER_POINTER (name));
1054 *no_add_attrs = true;
1057 switch (TREE_CODE (decl))
1060 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
1062 && !TREE_STATIC (decl))
1064 error ("%Jan address area attribute cannot be specified for "
1065 "local variables", decl, decl);
1066 *no_add_attrs = true;
1068 area = ia64_get_addr_area (decl);
1069 if (area != ADDR_AREA_NORMAL && addr_area != area)
1071 error ("%Jaddress area of '%s' conflicts with previous "
1072 "declaration", decl, decl);
1073 *no_add_attrs = true;
1078 error ("%Jaddress area attribute cannot be specified for functions",
1080 *no_add_attrs = true;
1084 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
1085 *no_add_attrs = true;
1093 ia64_encode_addr_area (tree decl, rtx symbol)
1097 flags = SYMBOL_REF_FLAGS (symbol);
1098 switch (ia64_get_addr_area (decl))
1100 case ADDR_AREA_NORMAL: break;
1101 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
1104 SYMBOL_REF_FLAGS (symbol) = flags;
1108 ia64_encode_section_info (tree decl, rtx rtl, int first)
1110 default_encode_section_info (decl, rtl, first);
1112 /* Careful not to prod global register variables. */
1113 if (TREE_CODE (decl) == VAR_DECL
1114 && GET_CODE (DECL_RTL (decl)) == MEM
1115 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
1116 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
1117 ia64_encode_addr_area (decl, XEXP (rtl, 0));
1120 /* Return 1 if the operands of a move are ok. */
1123 ia64_move_ok (rtx dst, rtx src)
1125 /* If we're under init_recog_no_volatile, we'll not be able to use
1126 memory_operand. So check the code directly and don't worry about
1127 the validity of the underlying address, which should have been
1128 checked elsewhere anyway. */
1129 if (GET_CODE (dst) != MEM)
1131 if (GET_CODE (src) == MEM)
1133 if (register_operand (src, VOIDmode))
1136 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
1137 if (INTEGRAL_MODE_P (GET_MODE (dst)))
1138 return src == const0_rtx;
1140 return GET_CODE (src) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (src);
1144 addp4_optimize_ok (rtx op1, rtx op2)
1146 return (basereg_operand (op1, GET_MODE(op1)) !=
1147 basereg_operand (op2, GET_MODE(op2)));
1150 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
1151 Return the length of the field, or <= 0 on failure. */
1154 ia64_depz_field_mask (rtx rop, rtx rshift)
1156 unsigned HOST_WIDE_INT op = INTVAL (rop);
1157 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
1159 /* Get rid of the zero bits we're shifting in. */
1162 /* We must now have a solid block of 1's at bit 0. */
1163 return exact_log2 (op + 1);
1166 /* Expand a symbolic constant load. */
1169 ia64_expand_load_address (rtx dest, rtx src)
1171 if (tls_symbolic_operand (src, VOIDmode))
1173 if (GET_CODE (dest) != REG)
1176 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
1177 having to pointer-extend the value afterward. Other forms of address
1178 computation below are also more natural to compute as 64-bit quantities.
1179 If we've been given an SImode destination register, change it. */
1180 if (GET_MODE (dest) != Pmode)
1181 dest = gen_rtx_REG (Pmode, REGNO (dest));
1183 if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_SMALL_ADDR_P (src))
1185 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
1188 else if (TARGET_AUTO_PIC)
1190 emit_insn (gen_load_gprel64 (dest, src));
1193 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
1195 emit_insn (gen_load_fptr (dest, src));
1198 else if (sdata_symbolic_operand (src, VOIDmode))
1200 emit_insn (gen_load_gprel (dest, src));
1204 if (GET_CODE (src) == CONST
1205 && GET_CODE (XEXP (src, 0)) == PLUS
1206 && GET_CODE (XEXP (XEXP (src, 0), 1)) == CONST_INT
1207 && (INTVAL (XEXP (XEXP (src, 0), 1)) & 0x1fff) != 0)
1209 rtx sym = XEXP (XEXP (src, 0), 0);
1210 HOST_WIDE_INT ofs, hi, lo;
1212 /* Split the offset into a sign extended 14-bit low part
1213 and a complementary high part. */
1214 ofs = INTVAL (XEXP (XEXP (src, 0), 1));
1215 lo = ((ofs & 0x3fff) ^ 0x2000) - 0x2000;
1218 ia64_expand_load_address (dest, plus_constant (sym, hi));
1219 emit_insn (gen_adddi3 (dest, dest, GEN_INT (lo)));
1225 tmp = gen_rtx_HIGH (Pmode, src);
1226 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
1227 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1229 tmp = gen_rtx_LO_SUM (GET_MODE (dest), dest, src);
1230 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1234 static GTY(()) rtx gen_tls_tga;
1236 gen_tls_get_addr (void)
1239 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
1243 static GTY(()) rtx thread_pointer_rtx;
1245 gen_thread_pointer (void)
1247 if (!thread_pointer_rtx)
1249 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
1250 RTX_UNCHANGING_P (thread_pointer_rtx) = 1;
1252 return thread_pointer_rtx;
1256 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1)
1258 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
1263 case TLS_MODEL_GLOBAL_DYNAMIC:
1266 tga_op1 = gen_reg_rtx (Pmode);
1267 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
1268 tga_op1 = gen_rtx_MEM (Pmode, tga_op1);
1269 RTX_UNCHANGING_P (tga_op1) = 1;
1271 tga_op2 = gen_reg_rtx (Pmode);
1272 emit_insn (gen_load_ltoff_dtprel (tga_op2, op1));
1273 tga_op2 = gen_rtx_MEM (Pmode, tga_op2);
1274 RTX_UNCHANGING_P (tga_op2) = 1;
1276 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1277 LCT_CONST, Pmode, 2, tga_op1,
1278 Pmode, tga_op2, Pmode);
1280 insns = get_insns ();
1283 if (GET_MODE (op0) != Pmode)
1285 emit_libcall_block (insns, op0, tga_ret, op1);
1288 case TLS_MODEL_LOCAL_DYNAMIC:
1289 /* ??? This isn't the completely proper way to do local-dynamic
1290 If the call to __tls_get_addr is used only by a single symbol,
1291 then we should (somehow) move the dtprel to the second arg
1292 to avoid the extra add. */
1295 tga_op1 = gen_reg_rtx (Pmode);
1296 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
1297 tga_op1 = gen_rtx_MEM (Pmode, tga_op1);
1298 RTX_UNCHANGING_P (tga_op1) = 1;
1300 tga_op2 = const0_rtx;
1302 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1303 LCT_CONST, Pmode, 2, tga_op1,
1304 Pmode, tga_op2, Pmode);
1306 insns = get_insns ();
1309 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1311 tmp = gen_reg_rtx (Pmode);
1312 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
1314 if (!register_operand (op0, Pmode))
1315 op0 = gen_reg_rtx (Pmode);
1318 emit_insn (gen_load_dtprel (op0, op1));
1319 emit_insn (gen_adddi3 (op0, tmp, op0));
1322 emit_insn (gen_add_dtprel (op0, tmp, op1));
1325 case TLS_MODEL_INITIAL_EXEC:
1326 tmp = gen_reg_rtx (Pmode);
1327 emit_insn (gen_load_ltoff_tprel (tmp, op1));
1328 tmp = gen_rtx_MEM (Pmode, tmp);
1329 RTX_UNCHANGING_P (tmp) = 1;
1330 tmp = force_reg (Pmode, tmp);
1332 if (!register_operand (op0, Pmode))
1333 op0 = gen_reg_rtx (Pmode);
1334 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
1337 case TLS_MODEL_LOCAL_EXEC:
1338 if (!register_operand (op0, Pmode))
1339 op0 = gen_reg_rtx (Pmode);
1342 emit_insn (gen_load_tprel (op0, op1));
1343 emit_insn (gen_adddi3 (op0, gen_thread_pointer (), op0));
1346 emit_insn (gen_add_tprel (op0, gen_thread_pointer (), op1));
1353 if (orig_op0 == op0)
1355 if (GET_MODE (orig_op0) == Pmode)
1357 return gen_lowpart (GET_MODE (orig_op0), op0);
1361 ia64_expand_move (rtx op0, rtx op1)
1363 enum machine_mode mode = GET_MODE (op0);
1365 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1366 op1 = force_reg (mode, op1);
1368 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1370 enum tls_model tls_kind;
1371 if ((tls_kind = tls_symbolic_operand (op1, VOIDmode)))
1372 return ia64_expand_tls_address (tls_kind, op0, op1);
1374 if (!TARGET_NO_PIC && reload_completed)
1376 ia64_expand_load_address (op0, op1);
1384 /* Split a move from OP1 to OP0 conditional on COND. */
1387 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1389 rtx insn, first = get_last_insn ();
1391 emit_move_insn (op0, op1);
1393 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1395 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1399 /* Split a post-reload TImode or TFmode reference into two DImode
1400 components. This is made extra difficult by the fact that we do
1401 not get any scratch registers to work with, because reload cannot
1402 be prevented from giving us a scratch that overlaps the register
1403 pair involved. So instead, when addressing memory, we tweak the
1404 pointer register up and back down with POST_INCs. Or up and not
1405 back down when we can get away with it.
1407 REVERSED is true when the loads must be done in reversed order
1408 (high word first) for correctness. DEAD is true when the pointer
1409 dies with the second insn we generate and therefore the second
1410 address must not carry a postmodify.
1412 May return an insn which is to be emitted after the moves. */
1415 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1419 switch (GET_CODE (in))
1422 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1423 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1428 /* Cannot occur reversed. */
1429 if (reversed) abort ();
1431 if (GET_MODE (in) != TFmode)
1432 split_double (in, &out[0], &out[1]);
1434 /* split_double does not understand how to split a TFmode
1435 quantity into a pair of DImode constants. */
1438 unsigned HOST_WIDE_INT p[2];
1439 long l[4]; /* TFmode is 128 bits */
1441 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1442 real_to_target (l, &r, TFmode);
1444 if (FLOAT_WORDS_BIG_ENDIAN)
1446 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1447 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1451 p[0] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1452 p[1] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1454 out[0] = GEN_INT (p[0]);
1455 out[1] = GEN_INT (p[1]);
1461 rtx base = XEXP (in, 0);
1464 switch (GET_CODE (base))
1469 out[0] = adjust_automodify_address
1470 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1471 out[1] = adjust_automodify_address
1472 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1476 /* Reversal requires a pre-increment, which can only
1477 be done as a separate insn. */
1478 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1479 out[0] = adjust_automodify_address
1480 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1481 out[1] = adjust_address (in, DImode, 0);
1486 if (reversed || dead) abort ();
1487 /* Just do the increment in two steps. */
1488 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1489 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1493 if (reversed || dead) abort ();
1494 /* Add 8, subtract 24. */
1495 base = XEXP (base, 0);
1496 out[0] = adjust_automodify_address
1497 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1498 out[1] = adjust_automodify_address
1500 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1505 if (reversed || dead) abort ();
1506 /* Extract and adjust the modification. This case is
1507 trickier than the others, because we might have an
1508 index register, or we might have a combined offset that
1509 doesn't fit a signed 9-bit displacement field. We can
1510 assume the incoming expression is already legitimate. */
1511 offset = XEXP (base, 1);
1512 base = XEXP (base, 0);
1514 out[0] = adjust_automodify_address
1515 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1517 if (GET_CODE (XEXP (offset, 1)) == REG)
1519 /* Can't adjust the postmodify to match. Emit the
1520 original, then a separate addition insn. */
1521 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1522 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1524 else if (GET_CODE (XEXP (offset, 1)) != CONST_INT)
1526 else if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1528 /* Again the postmodify cannot be made to match, but
1529 in this case it's more efficient to get rid of the
1530 postmodify entirely and fix up with an add insn. */
1531 out[1] = adjust_automodify_address (in, DImode, base, 8);
1532 fixup = gen_adddi3 (base, base,
1533 GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1537 /* Combined offset still fits in the displacement field.
1538 (We cannot overflow it at the high end.) */
1539 out[1] = adjust_automodify_address
1541 gen_rtx_POST_MODIFY (Pmode, base,
1542 gen_rtx_PLUS (Pmode, base,
1543 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1561 /* Split a TImode or TFmode move instruction after reload.
1562 This is used by *movtf_internal and *movti_internal. */
1564 ia64_split_tmode_move (rtx operands[])
1566 rtx in[2], out[2], insn;
1569 bool reversed = false;
1571 /* It is possible for reload to decide to overwrite a pointer with
1572 the value it points to. In that case we have to do the loads in
1573 the appropriate order so that the pointer is not destroyed too
1574 early. Also we must not generate a postmodify for that second
1575 load, or rws_access_regno will abort. */
1576 if (GET_CODE (operands[1]) == MEM
1577 && reg_overlap_mentioned_p (operands[0], operands[1]))
1579 rtx base = XEXP (operands[1], 0);
1580 while (GET_CODE (base) != REG)
1581 base = XEXP (base, 0);
1583 if (REGNO (base) == REGNO (operands[0]))
1587 /* Another reason to do the moves in reversed order is if the first
1588 element of the target register pair is also the second element of
1589 the source register pair. */
1590 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1591 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1594 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1595 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1597 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1598 if (GET_CODE (EXP) == MEM \
1599 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1600 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1601 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1602 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1603 XEXP (XEXP (EXP, 0), 0), \
1606 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1607 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1608 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1610 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1611 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1612 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1615 emit_insn (fixup[0]);
1617 emit_insn (fixup[1]);
1619 #undef MAYBE_ADD_REG_INC_NOTE
1622 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1623 through memory plus an extra GR scratch register. Except that you can
1624 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1625 SECONDARY_RELOAD_CLASS, but not both.
1627 We got into problems in the first place by allowing a construct like
1628 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1629 This solution attempts to prevent this situation from occurring. When
1630 we see something like the above, we spill the inner register to memory. */
1633 spill_xfmode_operand (rtx in, int force)
1635 if (GET_CODE (in) == SUBREG
1636 && GET_MODE (SUBREG_REG (in)) == TImode
1637 && GET_CODE (SUBREG_REG (in)) == REG)
1639 rtx memt = assign_stack_temp (TImode, 16, 0);
1640 emit_move_insn (memt, SUBREG_REG (in));
1641 return adjust_address (memt, XFmode, 0);
1643 else if (force && GET_CODE (in) == REG)
1645 rtx memx = assign_stack_temp (XFmode, 16, 0);
1646 emit_move_insn (memx, in);
1653 /* Emit comparison instruction if necessary, returning the expression
1654 that holds the compare result in the proper mode. */
1656 static GTY(()) rtx cmptf_libfunc;
1659 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1661 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1664 /* If we have a BImode input, then we already have a compare result, and
1665 do not need to emit another comparison. */
1666 if (GET_MODE (op0) == BImode)
1668 if ((code == NE || code == EQ) && op1 == const0_rtx)
1673 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1674 magic number as its third argument, that indicates what to do.
1675 The return value is an integer to be compared against zero. */
1676 else if (GET_MODE (op0) == TFmode)
1679 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1685 enum rtx_code ncode;
1687 if (!cmptf_libfunc || GET_MODE (op1) != TFmode)
1691 /* 1 = equal, 0 = not equal. Equality operators do
1692 not raise FP_INVALID when given an SNaN operand. */
1693 case EQ: magic = QCMP_EQ; ncode = NE; break;
1694 case NE: magic = QCMP_EQ; ncode = EQ; break;
1695 /* isunordered() from C99. */
1696 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1697 /* Relational operators raise FP_INVALID when given
1699 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1700 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1701 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1702 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1703 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1704 Expanders for buneq etc. weuld have to be added to ia64.md
1705 for this to be useful. */
1711 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1712 op0, TFmode, op1, TFmode,
1713 GEN_INT (magic), DImode);
1714 cmp = gen_reg_rtx (BImode);
1715 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1716 gen_rtx_fmt_ee (ncode, BImode,
1719 insns = get_insns ();
1722 emit_libcall_block (insns, cmp, cmp,
1723 gen_rtx_fmt_ee (code, BImode, op0, op1));
1728 cmp = gen_reg_rtx (BImode);
1729 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1730 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1734 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1737 /* Emit the appropriate sequence for a call. */
1740 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1745 addr = XEXP (addr, 0);
1746 addr = convert_memory_address (DImode, addr);
1747 b0 = gen_rtx_REG (DImode, R_BR (0));
1749 /* ??? Should do this for functions known to bind local too. */
1750 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1753 insn = gen_sibcall_nogp (addr);
1755 insn = gen_call_nogp (addr, b0);
1757 insn = gen_call_value_nogp (retval, addr, b0);
1758 insn = emit_call_insn (insn);
1763 insn = gen_sibcall_gp (addr);
1765 insn = gen_call_gp (addr, b0);
1767 insn = gen_call_value_gp (retval, addr, b0);
1768 insn = emit_call_insn (insn);
1770 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1774 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1778 ia64_reload_gp (void)
1782 if (current_frame_info.reg_save_gp)
1783 tmp = gen_rtx_REG (DImode, current_frame_info.reg_save_gp);
1786 HOST_WIDE_INT offset;
1788 offset = (current_frame_info.spill_cfa_off
1789 + current_frame_info.spill_size);
1790 if (frame_pointer_needed)
1792 tmp = hard_frame_pointer_rtx;
1797 tmp = stack_pointer_rtx;
1798 offset = current_frame_info.total_size - offset;
1801 if (CONST_OK_FOR_I (offset))
1802 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1803 tmp, GEN_INT (offset)));
1806 emit_move_insn (pic_offset_table_rtx, GEN_INT (offset));
1807 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1808 pic_offset_table_rtx, tmp));
1811 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1814 emit_move_insn (pic_offset_table_rtx, tmp);
1818 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1819 rtx scratch_b, int noreturn_p, int sibcall_p)
1822 bool is_desc = false;
1824 /* If we find we're calling through a register, then we're actually
1825 calling through a descriptor, so load up the values. */
1826 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1831 /* ??? We are currently constrained to *not* use peep2, because
1832 we can legitimately change the global lifetime of the GP
1833 (in the form of killing where previously live). This is
1834 because a call through a descriptor doesn't use the previous
1835 value of the GP, while a direct call does, and we do not
1836 commit to either form until the split here.
1838 That said, this means that we lack precise life info for
1839 whether ADDR is dead after this call. This is not terribly
1840 important, since we can fix things up essentially for free
1841 with the POST_DEC below, but it's nice to not use it when we
1842 can immediately tell it's not necessary. */
1843 addr_dead_p = ((noreturn_p || sibcall_p
1844 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1846 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1848 /* Load the code address into scratch_b. */
1849 tmp = gen_rtx_POST_INC (Pmode, addr);
1850 tmp = gen_rtx_MEM (Pmode, tmp);
1851 emit_move_insn (scratch_r, tmp);
1852 emit_move_insn (scratch_b, scratch_r);
1854 /* Load the GP address. If ADDR is not dead here, then we must
1855 revert the change made above via the POST_INCREMENT. */
1857 tmp = gen_rtx_POST_DEC (Pmode, addr);
1860 tmp = gen_rtx_MEM (Pmode, tmp);
1861 emit_move_insn (pic_offset_table_rtx, tmp);
1868 insn = gen_sibcall_nogp (addr);
1870 insn = gen_call_value_nogp (retval, addr, retaddr);
1872 insn = gen_call_nogp (addr, retaddr);
1873 emit_call_insn (insn);
1875 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
1879 /* Begin the assembly file. */
1882 ia64_file_start (void)
1884 default_file_start ();
1885 emit_safe_across_calls ();
1889 emit_safe_across_calls (void)
1891 unsigned int rs, re;
1898 while (rs < 64 && call_used_regs[PR_REG (rs)])
1902 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
1906 fputs ("\t.pred.safe_across_calls ", asm_out_file);
1910 fputc (',', asm_out_file);
1912 fprintf (asm_out_file, "p%u", rs);
1914 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
1918 fputc ('\n', asm_out_file);
1921 /* Helper function for ia64_compute_frame_size: find an appropriate general
1922 register to spill some special register to. SPECIAL_SPILL_MASK contains
1923 bits in GR0 to GR31 that have already been allocated by this routine.
1924 TRY_LOCALS is true if we should attempt to locate a local regnum. */
1927 find_gr_spill (int try_locals)
1931 /* If this is a leaf function, first try an otherwise unused
1932 call-clobbered register. */
1933 if (current_function_is_leaf)
1935 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1936 if (! regs_ever_live[regno]
1937 && call_used_regs[regno]
1938 && ! fixed_regs[regno]
1939 && ! global_regs[regno]
1940 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1942 current_frame_info.gr_used_mask |= 1 << regno;
1949 regno = current_frame_info.n_local_regs;
1950 /* If there is a frame pointer, then we can't use loc79, because
1951 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
1952 reg_name switching code in ia64_expand_prologue. */
1953 if (regno < (80 - frame_pointer_needed))
1955 current_frame_info.n_local_regs = regno + 1;
1956 return LOC_REG (0) + regno;
1960 /* Failed to find a general register to spill to. Must use stack. */
1964 /* In order to make for nice schedules, we try to allocate every temporary
1965 to a different register. We must of course stay away from call-saved,
1966 fixed, and global registers. We must also stay away from registers
1967 allocated in current_frame_info.gr_used_mask, since those include regs
1968 used all through the prologue.
1970 Any register allocated here must be used immediately. The idea is to
1971 aid scheduling, not to solve data flow problems. */
1973 static int last_scratch_gr_reg;
1976 next_scratch_gr_reg (void)
1980 for (i = 0; i < 32; ++i)
1982 regno = (last_scratch_gr_reg + i + 1) & 31;
1983 if (call_used_regs[regno]
1984 && ! fixed_regs[regno]
1985 && ! global_regs[regno]
1986 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1988 last_scratch_gr_reg = regno;
1993 /* There must be _something_ available. */
1997 /* Helper function for ia64_compute_frame_size, called through
1998 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2001 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2003 unsigned int regno = REGNO (reg);
2006 unsigned int i, n = HARD_REGNO_NREGS (regno, GET_MODE (reg));
2007 for (i = 0; i < n; ++i)
2008 current_frame_info.gr_used_mask |= 1 << (regno + i);
2012 /* Returns the number of bytes offset between the frame pointer and the stack
2013 pointer for the current function. SIZE is the number of bytes of space
2014 needed for local variables. */
2017 ia64_compute_frame_size (HOST_WIDE_INT size)
2019 HOST_WIDE_INT total_size;
2020 HOST_WIDE_INT spill_size = 0;
2021 HOST_WIDE_INT extra_spill_size = 0;
2022 HOST_WIDE_INT pretend_args_size;
2025 int spilled_gr_p = 0;
2026 int spilled_fr_p = 0;
2030 if (current_frame_info.initialized)
2033 memset (¤t_frame_info, 0, sizeof current_frame_info);
2034 CLEAR_HARD_REG_SET (mask);
2036 /* Don't allocate scratches to the return register. */
2037 diddle_return_value (mark_reg_gr_used_mask, NULL);
2039 /* Don't allocate scratches to the EH scratch registers. */
2040 if (cfun->machine->ia64_eh_epilogue_sp)
2041 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2042 if (cfun->machine->ia64_eh_epilogue_bsp)
2043 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2045 /* Find the size of the register stack frame. We have only 80 local
2046 registers, because we reserve 8 for the inputs and 8 for the
2049 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2050 since we'll be adjusting that down later. */
2051 regno = LOC_REG (78) + ! frame_pointer_needed;
2052 for (; regno >= LOC_REG (0); regno--)
2053 if (regs_ever_live[regno])
2055 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2057 /* For functions marked with the syscall_linkage attribute, we must mark
2058 all eight input registers as in use, so that locals aren't visible to
2061 if (cfun->machine->n_varargs > 0
2062 || lookup_attribute ("syscall_linkage",
2063 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2064 current_frame_info.n_input_regs = 8;
2067 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2068 if (regs_ever_live[regno])
2070 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2073 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2074 if (regs_ever_live[regno])
2076 i = regno - OUT_REG (0) + 1;
2078 /* When -p profiling, we need one output register for the mcount argument.
2079 Likewise for -a profiling for the bb_init_func argument. For -ax
2080 profiling, we need two output registers for the two bb_init_trace_func
2082 if (current_function_profile)
2084 current_frame_info.n_output_regs = i;
2086 /* ??? No rotating register support yet. */
2087 current_frame_info.n_rotate_regs = 0;
2089 /* Discover which registers need spilling, and how much room that
2090 will take. Begin with floating point and general registers,
2091 which will always wind up on the stack. */
2093 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2094 if (regs_ever_live[regno] && ! call_used_regs[regno])
2096 SET_HARD_REG_BIT (mask, regno);
2102 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2103 if (regs_ever_live[regno] && ! call_used_regs[regno])
2105 SET_HARD_REG_BIT (mask, regno);
2111 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2112 if (regs_ever_live[regno] && ! call_used_regs[regno])
2114 SET_HARD_REG_BIT (mask, regno);
2119 /* Now come all special registers that might get saved in other
2120 general registers. */
2122 if (frame_pointer_needed)
2124 current_frame_info.reg_fp = find_gr_spill (1);
2125 /* If we did not get a register, then we take LOC79. This is guaranteed
2126 to be free, even if regs_ever_live is already set, because this is
2127 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2128 as we don't count loc79 above. */
2129 if (current_frame_info.reg_fp == 0)
2131 current_frame_info.reg_fp = LOC_REG (79);
2132 current_frame_info.n_local_regs++;
2136 if (! current_function_is_leaf)
2138 /* Emit a save of BR0 if we call other functions. Do this even
2139 if this function doesn't return, as EH depends on this to be
2140 able to unwind the stack. */
2141 SET_HARD_REG_BIT (mask, BR_REG (0));
2143 current_frame_info.reg_save_b0 = find_gr_spill (1);
2144 if (current_frame_info.reg_save_b0 == 0)
2150 /* Similarly for ar.pfs. */
2151 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2152 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
2153 if (current_frame_info.reg_save_ar_pfs == 0)
2155 extra_spill_size += 8;
2159 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2160 registers are clobbered, so we fall back to the stack. */
2161 current_frame_info.reg_save_gp
2162 = (current_function_calls_setjmp ? 0 : find_gr_spill (1));
2163 if (current_frame_info.reg_save_gp == 0)
2165 SET_HARD_REG_BIT (mask, GR_REG (1));
2172 if (regs_ever_live[BR_REG (0)] && ! call_used_regs[BR_REG (0)])
2174 SET_HARD_REG_BIT (mask, BR_REG (0));
2179 if (regs_ever_live[AR_PFS_REGNUM])
2181 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2182 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
2183 if (current_frame_info.reg_save_ar_pfs == 0)
2185 extra_spill_size += 8;
2191 /* Unwind descriptor hackery: things are most efficient if we allocate
2192 consecutive GR save registers for RP, PFS, FP in that order. However,
2193 it is absolutely critical that FP get the only hard register that's
2194 guaranteed to be free, so we allocated it first. If all three did
2195 happen to be allocated hard regs, and are consecutive, rearrange them
2196 into the preferred order now. */
2197 if (current_frame_info.reg_fp != 0
2198 && current_frame_info.reg_save_b0 == current_frame_info.reg_fp + 1
2199 && current_frame_info.reg_save_ar_pfs == current_frame_info.reg_fp + 2)
2201 current_frame_info.reg_save_b0 = current_frame_info.reg_fp;
2202 current_frame_info.reg_save_ar_pfs = current_frame_info.reg_fp + 1;
2203 current_frame_info.reg_fp = current_frame_info.reg_fp + 2;
2206 /* See if we need to store the predicate register block. */
2207 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2208 if (regs_ever_live[regno] && ! call_used_regs[regno])
2210 if (regno <= PR_REG (63))
2212 SET_HARD_REG_BIT (mask, PR_REG (0));
2213 current_frame_info.reg_save_pr = find_gr_spill (1);
2214 if (current_frame_info.reg_save_pr == 0)
2216 extra_spill_size += 8;
2220 /* ??? Mark them all as used so that register renaming and such
2221 are free to use them. */
2222 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2223 regs_ever_live[regno] = 1;
2226 /* If we're forced to use st8.spill, we're forced to save and restore
2227 ar.unat as well. The check for existing liveness allows inline asm
2228 to touch ar.unat. */
2229 if (spilled_gr_p || cfun->machine->n_varargs
2230 || regs_ever_live[AR_UNAT_REGNUM])
2232 regs_ever_live[AR_UNAT_REGNUM] = 1;
2233 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2234 current_frame_info.reg_save_ar_unat = find_gr_spill (spill_size == 0);
2235 if (current_frame_info.reg_save_ar_unat == 0)
2237 extra_spill_size += 8;
2242 if (regs_ever_live[AR_LC_REGNUM])
2244 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2245 current_frame_info.reg_save_ar_lc = find_gr_spill (spill_size == 0);
2246 if (current_frame_info.reg_save_ar_lc == 0)
2248 extra_spill_size += 8;
2253 /* If we have an odd number of words of pretend arguments written to
2254 the stack, then the FR save area will be unaligned. We round the
2255 size of this area up to keep things 16 byte aligned. */
2257 pretend_args_size = IA64_STACK_ALIGN (current_function_pretend_args_size);
2259 pretend_args_size = current_function_pretend_args_size;
2261 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2262 + current_function_outgoing_args_size);
2263 total_size = IA64_STACK_ALIGN (total_size);
2265 /* We always use the 16-byte scratch area provided by the caller, but
2266 if we are a leaf function, there's no one to which we need to provide
2268 if (current_function_is_leaf)
2269 total_size = MAX (0, total_size - 16);
2271 current_frame_info.total_size = total_size;
2272 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2273 current_frame_info.spill_size = spill_size;
2274 current_frame_info.extra_spill_size = extra_spill_size;
2275 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2276 current_frame_info.n_spilled = n_spilled;
2277 current_frame_info.initialized = reload_completed;
2280 /* Compute the initial difference between the specified pair of registers. */
2283 ia64_initial_elimination_offset (int from, int to)
2285 HOST_WIDE_INT offset;
2287 ia64_compute_frame_size (get_frame_size ());
2290 case FRAME_POINTER_REGNUM:
2291 if (to == HARD_FRAME_POINTER_REGNUM)
2293 if (current_function_is_leaf)
2294 offset = -current_frame_info.total_size;
2296 offset = -(current_frame_info.total_size
2297 - current_function_outgoing_args_size - 16);
2299 else if (to == STACK_POINTER_REGNUM)
2301 if (current_function_is_leaf)
2304 offset = 16 + current_function_outgoing_args_size;
2310 case ARG_POINTER_REGNUM:
2311 /* Arguments start above the 16 byte save area, unless stdarg
2312 in which case we store through the 16 byte save area. */
2313 if (to == HARD_FRAME_POINTER_REGNUM)
2314 offset = 16 - current_function_pretend_args_size;
2315 else if (to == STACK_POINTER_REGNUM)
2316 offset = (current_frame_info.total_size
2317 + 16 - current_function_pretend_args_size);
2329 /* If there are more than a trivial number of register spills, we use
2330 two interleaved iterators so that we can get two memory references
2333 In order to simplify things in the prologue and epilogue expanders,
2334 we use helper functions to fix up the memory references after the
2335 fact with the appropriate offsets to a POST_MODIFY memory mode.
2336 The following data structure tracks the state of the two iterators
2337 while insns are being emitted. */
2339 struct spill_fill_data
2341 rtx init_after; /* point at which to emit initializations */
2342 rtx init_reg[2]; /* initial base register */
2343 rtx iter_reg[2]; /* the iterator registers */
2344 rtx *prev_addr[2]; /* address of last memory use */
2345 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2346 HOST_WIDE_INT prev_off[2]; /* last offset */
2347 int n_iter; /* number of iterators in use */
2348 int next_iter; /* next iterator to use */
2349 unsigned int save_gr_used_mask;
2352 static struct spill_fill_data spill_fill_data;
2355 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2359 spill_fill_data.init_after = get_last_insn ();
2360 spill_fill_data.init_reg[0] = init_reg;
2361 spill_fill_data.init_reg[1] = init_reg;
2362 spill_fill_data.prev_addr[0] = NULL;
2363 spill_fill_data.prev_addr[1] = NULL;
2364 spill_fill_data.prev_insn[0] = NULL;
2365 spill_fill_data.prev_insn[1] = NULL;
2366 spill_fill_data.prev_off[0] = cfa_off;
2367 spill_fill_data.prev_off[1] = cfa_off;
2368 spill_fill_data.next_iter = 0;
2369 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2371 spill_fill_data.n_iter = 1 + (n_spills > 2);
2372 for (i = 0; i < spill_fill_data.n_iter; ++i)
2374 int regno = next_scratch_gr_reg ();
2375 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2376 current_frame_info.gr_used_mask |= 1 << regno;
2381 finish_spill_pointers (void)
2383 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
2387 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
2389 int iter = spill_fill_data.next_iter;
2390 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
2391 rtx disp_rtx = GEN_INT (disp);
2394 if (spill_fill_data.prev_addr[iter])
2396 if (CONST_OK_FOR_N (disp))
2398 *spill_fill_data.prev_addr[iter]
2399 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2400 gen_rtx_PLUS (DImode,
2401 spill_fill_data.iter_reg[iter],
2403 REG_NOTES (spill_fill_data.prev_insn[iter])
2404 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
2405 REG_NOTES (spill_fill_data.prev_insn[iter]));
2409 /* ??? Could use register post_modify for loads. */
2410 if (! CONST_OK_FOR_I (disp))
2412 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2413 emit_move_insn (tmp, disp_rtx);
2416 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2417 spill_fill_data.iter_reg[iter], disp_rtx));
2420 /* Micro-optimization: if we've created a frame pointer, it's at
2421 CFA 0, which may allow the real iterator to be initialized lower,
2422 slightly increasing parallelism. Also, if there are few saves
2423 it may eliminate the iterator entirely. */
2425 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
2426 && frame_pointer_needed)
2428 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
2429 set_mem_alias_set (mem, get_varargs_alias_set ());
2437 seq = gen_movdi (spill_fill_data.iter_reg[iter],
2438 spill_fill_data.init_reg[iter]);
2443 if (! CONST_OK_FOR_I (disp))
2445 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2446 emit_move_insn (tmp, disp_rtx);
2450 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2451 spill_fill_data.init_reg[iter],
2458 /* Careful for being the first insn in a sequence. */
2459 if (spill_fill_data.init_after)
2460 insn = emit_insn_after (seq, spill_fill_data.init_after);
2463 rtx first = get_insns ();
2465 insn = emit_insn_before (seq, first);
2467 insn = emit_insn (seq);
2469 spill_fill_data.init_after = insn;
2471 /* If DISP is 0, we may or may not have a further adjustment
2472 afterward. If we do, then the load/store insn may be modified
2473 to be a post-modify. If we don't, then this copy may be
2474 eliminated by copyprop_hardreg_forward, which makes this
2475 insn garbage, which runs afoul of the sanity check in
2476 propagate_one_insn. So mark this insn as legal to delete. */
2478 REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx,
2482 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
2484 /* ??? Not all of the spills are for varargs, but some of them are.
2485 The rest of the spills belong in an alias set of their own. But
2486 it doesn't actually hurt to include them here. */
2487 set_mem_alias_set (mem, get_varargs_alias_set ());
2489 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
2490 spill_fill_data.prev_off[iter] = cfa_off;
2492 if (++iter >= spill_fill_data.n_iter)
2494 spill_fill_data.next_iter = iter;
2500 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
2503 int iter = spill_fill_data.next_iter;
2506 mem = spill_restore_mem (reg, cfa_off);
2507 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
2508 spill_fill_data.prev_insn[iter] = insn;
2515 RTX_FRAME_RELATED_P (insn) = 1;
2517 /* Don't even pretend that the unwind code can intuit its way
2518 through a pair of interleaved post_modify iterators. Just
2519 provide the correct answer. */
2521 if (frame_pointer_needed)
2523 base = hard_frame_pointer_rtx;
2528 base = stack_pointer_rtx;
2529 off = current_frame_info.total_size - cfa_off;
2533 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2534 gen_rtx_SET (VOIDmode,
2535 gen_rtx_MEM (GET_MODE (reg),
2536 plus_constant (base, off)),
2543 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
2545 int iter = spill_fill_data.next_iter;
2548 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
2549 GEN_INT (cfa_off)));
2550 spill_fill_data.prev_insn[iter] = insn;
2553 /* Wrapper functions that discards the CONST_INT spill offset. These
2554 exist so that we can give gr_spill/gr_fill the offset they need and
2555 use a consistent function interface. */
2558 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2560 return gen_movdi (dest, src);
2564 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2566 return gen_fr_spill (dest, src);
2570 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2572 return gen_fr_restore (dest, src);
2575 /* Called after register allocation to add any instructions needed for the
2576 prologue. Using a prologue insn is favored compared to putting all of the
2577 instructions in output_function_prologue(), since it allows the scheduler
2578 to intermix instructions with the saves of the caller saved registers. In
2579 some cases, it might be necessary to emit a barrier instruction as the last
2580 insn to prevent such scheduling.
2582 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2583 so that the debug info generation code can handle them properly.
2585 The register save area is layed out like so:
2587 [ varargs spill area ]
2588 [ fr register spill area ]
2589 [ br register spill area ]
2590 [ ar register spill area ]
2591 [ pr register spill area ]
2592 [ gr register spill area ] */
2594 /* ??? Get inefficient code when the frame size is larger than can fit in an
2595 adds instruction. */
2598 ia64_expand_prologue (void)
2600 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2601 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2604 ia64_compute_frame_size (get_frame_size ());
2605 last_scratch_gr_reg = 15;
2607 /* If there is no epilogue, then we don't need some prologue insns.
2608 We need to avoid emitting the dead prologue insns, because flow
2609 will complain about them. */
2614 for (e = EXIT_BLOCK_PTR->pred; e ; e = e->pred_next)
2615 if ((e->flags & EDGE_FAKE) == 0
2616 && (e->flags & EDGE_FALLTHRU) != 0)
2618 epilogue_p = (e != NULL);
2623 /* Set the local, input, and output register names. We need to do this
2624 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2625 half. If we use in/loc/out register names, then we get assembler errors
2626 in crtn.S because there is no alloc insn or regstk directive in there. */
2627 if (! TARGET_REG_NAMES)
2629 int inputs = current_frame_info.n_input_regs;
2630 int locals = current_frame_info.n_local_regs;
2631 int outputs = current_frame_info.n_output_regs;
2633 for (i = 0; i < inputs; i++)
2634 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2635 for (i = 0; i < locals; i++)
2636 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2637 for (i = 0; i < outputs; i++)
2638 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2641 /* Set the frame pointer register name. The regnum is logically loc79,
2642 but of course we'll not have allocated that many locals. Rather than
2643 worrying about renumbering the existing rtxs, we adjust the name. */
2644 /* ??? This code means that we can never use one local register when
2645 there is a frame pointer. loc79 gets wasted in this case, as it is
2646 renamed to a register that will never be used. See also the try_locals
2647 code in find_gr_spill. */
2648 if (current_frame_info.reg_fp)
2650 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2651 reg_names[HARD_FRAME_POINTER_REGNUM]
2652 = reg_names[current_frame_info.reg_fp];
2653 reg_names[current_frame_info.reg_fp] = tmp;
2656 /* We don't need an alloc instruction if we've used no outputs or locals. */
2657 if (current_frame_info.n_local_regs == 0
2658 && current_frame_info.n_output_regs == 0
2659 && current_frame_info.n_input_regs <= current_function_args_info.int_regs
2660 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2662 /* If there is no alloc, but there are input registers used, then we
2663 need a .regstk directive. */
2664 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
2665 ar_pfs_save_reg = NULL_RTX;
2669 current_frame_info.need_regstk = 0;
2671 if (current_frame_info.reg_save_ar_pfs)
2672 regno = current_frame_info.reg_save_ar_pfs;
2674 regno = next_scratch_gr_reg ();
2675 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
2677 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
2678 GEN_INT (current_frame_info.n_input_regs),
2679 GEN_INT (current_frame_info.n_local_regs),
2680 GEN_INT (current_frame_info.n_output_regs),
2681 GEN_INT (current_frame_info.n_rotate_regs)));
2682 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_pfs != 0);
2685 /* Set up frame pointer, stack pointer, and spill iterators. */
2687 n_varargs = cfun->machine->n_varargs;
2688 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
2689 stack_pointer_rtx, 0);
2691 if (frame_pointer_needed)
2693 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
2694 RTX_FRAME_RELATED_P (insn) = 1;
2697 if (current_frame_info.total_size != 0)
2699 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
2702 if (CONST_OK_FOR_I (- current_frame_info.total_size))
2703 offset = frame_size_rtx;
2706 regno = next_scratch_gr_reg ();
2707 offset = gen_rtx_REG (DImode, regno);
2708 emit_move_insn (offset, frame_size_rtx);
2711 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
2712 stack_pointer_rtx, offset));
2714 if (! frame_pointer_needed)
2716 RTX_FRAME_RELATED_P (insn) = 1;
2717 if (GET_CODE (offset) != CONST_INT)
2720 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2721 gen_rtx_SET (VOIDmode,
2723 gen_rtx_PLUS (DImode,
2730 /* ??? At this point we must generate a magic insn that appears to
2731 modify the stack pointer, the frame pointer, and all spill
2732 iterators. This would allow the most scheduling freedom. For
2733 now, just hard stop. */
2734 emit_insn (gen_blockage ());
2737 /* Must copy out ar.unat before doing any integer spills. */
2738 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2740 if (current_frame_info.reg_save_ar_unat)
2742 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2745 alt_regno = next_scratch_gr_reg ();
2746 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2747 current_frame_info.gr_used_mask |= 1 << alt_regno;
2750 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2751 insn = emit_move_insn (ar_unat_save_reg, reg);
2752 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_unat != 0);
2754 /* Even if we're not going to generate an epilogue, we still
2755 need to save the register so that EH works. */
2756 if (! epilogue_p && current_frame_info.reg_save_ar_unat)
2757 emit_insn (gen_prologue_use (ar_unat_save_reg));
2760 ar_unat_save_reg = NULL_RTX;
2762 /* Spill all varargs registers. Do this before spilling any GR registers,
2763 since we want the UNAT bits for the GR registers to override the UNAT
2764 bits from varargs, which we don't care about. */
2767 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
2769 reg = gen_rtx_REG (DImode, regno);
2770 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
2773 /* Locate the bottom of the register save area. */
2774 cfa_off = (current_frame_info.spill_cfa_off
2775 + current_frame_info.spill_size
2776 + current_frame_info.extra_spill_size);
2778 /* Save the predicate register block either in a register or in memory. */
2779 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2781 reg = gen_rtx_REG (DImode, PR_REG (0));
2782 if (current_frame_info.reg_save_pr != 0)
2784 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2785 insn = emit_move_insn (alt_reg, reg);
2787 /* ??? Denote pr spill/fill by a DImode move that modifies all
2788 64 hard registers. */
2789 RTX_FRAME_RELATED_P (insn) = 1;
2791 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2792 gen_rtx_SET (VOIDmode, alt_reg, reg),
2795 /* Even if we're not going to generate an epilogue, we still
2796 need to save the register so that EH works. */
2798 emit_insn (gen_prologue_use (alt_reg));
2802 alt_regno = next_scratch_gr_reg ();
2803 alt_reg = gen_rtx_REG (DImode, alt_regno);
2804 insn = emit_move_insn (alt_reg, reg);
2805 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2810 /* Handle AR regs in numerical order. All of them get special handling. */
2811 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
2812 && current_frame_info.reg_save_ar_unat == 0)
2814 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2815 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
2819 /* The alloc insn already copied ar.pfs into a general register. The
2820 only thing we have to do now is copy that register to a stack slot
2821 if we'd not allocated a local register for the job. */
2822 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
2823 && current_frame_info.reg_save_ar_pfs == 0)
2825 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2826 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
2830 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2832 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2833 if (current_frame_info.reg_save_ar_lc != 0)
2835 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2836 insn = emit_move_insn (alt_reg, reg);
2837 RTX_FRAME_RELATED_P (insn) = 1;
2839 /* Even if we're not going to generate an epilogue, we still
2840 need to save the register so that EH works. */
2842 emit_insn (gen_prologue_use (alt_reg));
2846 alt_regno = next_scratch_gr_reg ();
2847 alt_reg = gen_rtx_REG (DImode, alt_regno);
2848 emit_move_insn (alt_reg, reg);
2849 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2854 if (current_frame_info.reg_save_gp)
2856 insn = emit_move_insn (gen_rtx_REG (DImode,
2857 current_frame_info.reg_save_gp),
2858 pic_offset_table_rtx);
2859 /* We don't know for sure yet if this is actually needed, since
2860 we've not split the PIC call patterns. If all of the calls
2861 are indirect, and not followed by any uses of the gp, then
2862 this save is dead. Allow it to go away. */
2864 = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, REG_NOTES (insn));
2867 /* We should now be at the base of the gr/br/fr spill area. */
2868 if (cfa_off != (current_frame_info.spill_cfa_off
2869 + current_frame_info.spill_size))
2872 /* Spill all general registers. */
2873 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
2874 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2876 reg = gen_rtx_REG (DImode, regno);
2877 do_spill (gen_gr_spill, reg, cfa_off, reg);
2881 /* Handle BR0 specially -- it may be getting stored permanently in
2882 some GR register. */
2883 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2885 reg = gen_rtx_REG (DImode, BR_REG (0));
2886 if (current_frame_info.reg_save_b0 != 0)
2888 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2889 insn = emit_move_insn (alt_reg, reg);
2890 RTX_FRAME_RELATED_P (insn) = 1;
2892 /* Even if we're not going to generate an epilogue, we still
2893 need to save the register so that EH works. */
2895 emit_insn (gen_prologue_use (alt_reg));
2899 alt_regno = next_scratch_gr_reg ();
2900 alt_reg = gen_rtx_REG (DImode, alt_regno);
2901 emit_move_insn (alt_reg, reg);
2902 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2907 /* Spill the rest of the BR registers. */
2908 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2909 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2911 alt_regno = next_scratch_gr_reg ();
2912 alt_reg = gen_rtx_REG (DImode, alt_regno);
2913 reg = gen_rtx_REG (DImode, regno);
2914 emit_move_insn (alt_reg, reg);
2915 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2919 /* Align the frame and spill all FR registers. */
2920 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2921 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2925 reg = gen_rtx_REG (XFmode, regno);
2926 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
2930 if (cfa_off != current_frame_info.spill_cfa_off)
2933 finish_spill_pointers ();
2936 /* Called after register allocation to add any instructions needed for the
2937 epilogue. Using an epilogue insn is favored compared to putting all of the
2938 instructions in output_function_prologue(), since it allows the scheduler
2939 to intermix instructions with the saves of the caller saved registers. In
2940 some cases, it might be necessary to emit a barrier instruction as the last
2941 insn to prevent such scheduling. */
2944 ia64_expand_epilogue (int sibcall_p)
2946 rtx insn, reg, alt_reg, ar_unat_save_reg;
2947 int regno, alt_regno, cfa_off;
2949 ia64_compute_frame_size (get_frame_size ());
2951 /* If there is a frame pointer, then we use it instead of the stack
2952 pointer, so that the stack pointer does not need to be valid when
2953 the epilogue starts. See EXIT_IGNORE_STACK. */
2954 if (frame_pointer_needed)
2955 setup_spill_pointers (current_frame_info.n_spilled,
2956 hard_frame_pointer_rtx, 0);
2958 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
2959 current_frame_info.total_size);
2961 if (current_frame_info.total_size != 0)
2963 /* ??? At this point we must generate a magic insn that appears to
2964 modify the spill iterators and the frame pointer. This would
2965 allow the most scheduling freedom. For now, just hard stop. */
2966 emit_insn (gen_blockage ());
2969 /* Locate the bottom of the register save area. */
2970 cfa_off = (current_frame_info.spill_cfa_off
2971 + current_frame_info.spill_size
2972 + current_frame_info.extra_spill_size);
2974 /* Restore the predicate registers. */
2975 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2977 if (current_frame_info.reg_save_pr != 0)
2978 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2981 alt_regno = next_scratch_gr_reg ();
2982 alt_reg = gen_rtx_REG (DImode, alt_regno);
2983 do_restore (gen_movdi_x, alt_reg, cfa_off);
2986 reg = gen_rtx_REG (DImode, PR_REG (0));
2987 emit_move_insn (reg, alt_reg);
2990 /* Restore the application registers. */
2992 /* Load the saved unat from the stack, but do not restore it until
2993 after the GRs have been restored. */
2994 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2996 if (current_frame_info.reg_save_ar_unat != 0)
2998 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
3001 alt_regno = next_scratch_gr_reg ();
3002 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3003 current_frame_info.gr_used_mask |= 1 << alt_regno;
3004 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3009 ar_unat_save_reg = NULL_RTX;
3011 if (current_frame_info.reg_save_ar_pfs != 0)
3013 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_pfs);
3014 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3015 emit_move_insn (reg, alt_reg);
3017 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3019 alt_regno = next_scratch_gr_reg ();
3020 alt_reg = gen_rtx_REG (DImode, alt_regno);
3021 do_restore (gen_movdi_x, alt_reg, cfa_off);
3023 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3024 emit_move_insn (reg, alt_reg);
3027 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3029 if (current_frame_info.reg_save_ar_lc != 0)
3030 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
3033 alt_regno = next_scratch_gr_reg ();
3034 alt_reg = gen_rtx_REG (DImode, alt_regno);
3035 do_restore (gen_movdi_x, alt_reg, cfa_off);
3038 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3039 emit_move_insn (reg, alt_reg);
3042 /* We should now be at the base of the gr/br/fr spill area. */
3043 if (cfa_off != (current_frame_info.spill_cfa_off
3044 + current_frame_info.spill_size))
3047 /* The GP may be stored on the stack in the prologue, but it's
3048 never restored in the epilogue. Skip the stack slot. */
3049 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3052 /* Restore all general registers. */
3053 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3054 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3056 reg = gen_rtx_REG (DImode, regno);
3057 do_restore (gen_gr_restore, reg, cfa_off);
3061 /* Restore the branch registers. Handle B0 specially, as it may
3062 have gotten stored in some GR register. */
3063 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3065 if (current_frame_info.reg_save_b0 != 0)
3066 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3069 alt_regno = next_scratch_gr_reg ();
3070 alt_reg = gen_rtx_REG (DImode, alt_regno);
3071 do_restore (gen_movdi_x, alt_reg, cfa_off);
3074 reg = gen_rtx_REG (DImode, BR_REG (0));
3075 emit_move_insn (reg, alt_reg);
3078 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3079 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3081 alt_regno = next_scratch_gr_reg ();
3082 alt_reg = gen_rtx_REG (DImode, alt_regno);
3083 do_restore (gen_movdi_x, alt_reg, cfa_off);
3085 reg = gen_rtx_REG (DImode, regno);
3086 emit_move_insn (reg, alt_reg);
3089 /* Restore floating point registers. */
3090 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3091 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3095 reg = gen_rtx_REG (XFmode, regno);
3096 do_restore (gen_fr_restore_x, reg, cfa_off);
3100 /* Restore ar.unat for real. */
3101 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3103 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3104 emit_move_insn (reg, ar_unat_save_reg);
3107 if (cfa_off != current_frame_info.spill_cfa_off)
3110 finish_spill_pointers ();
3112 if (current_frame_info.total_size || cfun->machine->ia64_eh_epilogue_sp)
3114 /* ??? At this point we must generate a magic insn that appears to
3115 modify the spill iterators, the stack pointer, and the frame
3116 pointer. This would allow the most scheduling freedom. For now,
3118 emit_insn (gen_blockage ());
3121 if (cfun->machine->ia64_eh_epilogue_sp)
3122 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
3123 else if (frame_pointer_needed)
3125 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
3126 RTX_FRAME_RELATED_P (insn) = 1;
3128 else if (current_frame_info.total_size)
3130 rtx offset, frame_size_rtx;
3132 frame_size_rtx = GEN_INT (current_frame_info.total_size);
3133 if (CONST_OK_FOR_I (current_frame_info.total_size))
3134 offset = frame_size_rtx;
3137 regno = next_scratch_gr_reg ();
3138 offset = gen_rtx_REG (DImode, regno);
3139 emit_move_insn (offset, frame_size_rtx);
3142 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3145 RTX_FRAME_RELATED_P (insn) = 1;
3146 if (GET_CODE (offset) != CONST_INT)
3149 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3150 gen_rtx_SET (VOIDmode,
3152 gen_rtx_PLUS (DImode,
3159 if (cfun->machine->ia64_eh_epilogue_bsp)
3160 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3163 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
3166 int fp = GR_REG (2);
3167 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
3168 first available call clobbered register. If there was a frame_pointer
3169 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
3170 so we have to make sure we're using the string "r2" when emitting
3171 the register name for the assembler. */
3172 if (current_frame_info.reg_fp && current_frame_info.reg_fp == GR_REG (2))
3173 fp = HARD_FRAME_POINTER_REGNUM;
3175 /* We must emit an alloc to force the input registers to become output
3176 registers. Otherwise, if the callee tries to pass its parameters
3177 through to another call without an intervening alloc, then these
3179 /* ??? We don't need to preserve all input registers. We only need to
3180 preserve those input registers used as arguments to the sibling call.
3181 It is unclear how to compute that number here. */
3182 if (current_frame_info.n_input_regs != 0)
3183 emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
3184 const0_rtx, const0_rtx,
3185 GEN_INT (current_frame_info.n_input_regs),
3190 /* Return 1 if br.ret can do all the work required to return from a
3194 ia64_direct_return (void)
3196 if (reload_completed && ! frame_pointer_needed)
3198 ia64_compute_frame_size (get_frame_size ());
3200 return (current_frame_info.total_size == 0
3201 && current_frame_info.n_spilled == 0
3202 && current_frame_info.reg_save_b0 == 0
3203 && current_frame_info.reg_save_pr == 0
3204 && current_frame_info.reg_save_ar_pfs == 0
3205 && current_frame_info.reg_save_ar_unat == 0
3206 && current_frame_info.reg_save_ar_lc == 0);
3211 /* Return the magic cookie that we use to hold the return address
3212 during early compilation. */
3215 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3219 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3222 /* Split this value after reload, now that we know where the return
3223 address is saved. */
3226 ia64_split_return_addr_rtx (rtx dest)
3230 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3232 if (current_frame_info.reg_save_b0 != 0)
3233 src = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3239 /* Compute offset from CFA for BR0. */
3240 /* ??? Must be kept in sync with ia64_expand_prologue. */
3241 off = (current_frame_info.spill_cfa_off
3242 + current_frame_info.spill_size);
3243 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3244 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3247 /* Convert CFA offset to a register based offset. */
3248 if (frame_pointer_needed)
3249 src = hard_frame_pointer_rtx;
3252 src = stack_pointer_rtx;
3253 off += current_frame_info.total_size;
3256 /* Load address into scratch register. */
3257 if (CONST_OK_FOR_I (off))
3258 emit_insn (gen_adddi3 (dest, src, GEN_INT (off)));
3261 emit_move_insn (dest, GEN_INT (off));
3262 emit_insn (gen_adddi3 (dest, src, dest));
3265 src = gen_rtx_MEM (Pmode, dest);
3269 src = gen_rtx_REG (DImode, BR_REG (0));
3271 emit_move_insn (dest, src);
3275 ia64_hard_regno_rename_ok (int from, int to)
3277 /* Don't clobber any of the registers we reserved for the prologue. */
3278 if (to == current_frame_info.reg_fp
3279 || to == current_frame_info.reg_save_b0
3280 || to == current_frame_info.reg_save_pr
3281 || to == current_frame_info.reg_save_ar_pfs
3282 || to == current_frame_info.reg_save_ar_unat
3283 || to == current_frame_info.reg_save_ar_lc)
3286 if (from == current_frame_info.reg_fp
3287 || from == current_frame_info.reg_save_b0
3288 || from == current_frame_info.reg_save_pr
3289 || from == current_frame_info.reg_save_ar_pfs
3290 || from == current_frame_info.reg_save_ar_unat
3291 || from == current_frame_info.reg_save_ar_lc)
3294 /* Don't use output registers outside the register frame. */
3295 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3298 /* Retain even/oddness on predicate register pairs. */
3299 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3300 return (from & 1) == (to & 1);
3305 /* Target hook for assembling integer objects. Handle word-sized
3306 aligned objects and detect the cases when @fptr is needed. */
3309 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3311 if (size == POINTER_SIZE / BITS_PER_UNIT
3313 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3314 && GET_CODE (x) == SYMBOL_REF
3315 && SYMBOL_REF_FUNCTION_P (x))
3317 if (POINTER_SIZE == 32)
3318 fputs ("\tdata4\t@fptr(", asm_out_file);
3320 fputs ("\tdata8\t@fptr(", asm_out_file);
3321 output_addr_const (asm_out_file, x);
3322 fputs (")\n", asm_out_file);
3325 return default_assemble_integer (x, size, aligned_p);
3328 /* Emit the function prologue. */
3331 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3333 int mask, grsave, grsave_prev;
3335 if (current_frame_info.need_regstk)
3336 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
3337 current_frame_info.n_input_regs,
3338 current_frame_info.n_local_regs,
3339 current_frame_info.n_output_regs,
3340 current_frame_info.n_rotate_regs);
3342 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3345 /* Emit the .prologue directive. */
3348 grsave = grsave_prev = 0;
3349 if (current_frame_info.reg_save_b0 != 0)
3352 grsave = grsave_prev = current_frame_info.reg_save_b0;
3354 if (current_frame_info.reg_save_ar_pfs != 0
3355 && (grsave_prev == 0
3356 || current_frame_info.reg_save_ar_pfs == grsave_prev + 1))
3359 if (grsave_prev == 0)
3360 grsave = current_frame_info.reg_save_ar_pfs;
3361 grsave_prev = current_frame_info.reg_save_ar_pfs;
3363 if (current_frame_info.reg_fp != 0
3364 && (grsave_prev == 0
3365 || current_frame_info.reg_fp == grsave_prev + 1))
3368 if (grsave_prev == 0)
3369 grsave = HARD_FRAME_POINTER_REGNUM;
3370 grsave_prev = current_frame_info.reg_fp;
3372 if (current_frame_info.reg_save_pr != 0
3373 && (grsave_prev == 0
3374 || current_frame_info.reg_save_pr == grsave_prev + 1))
3377 if (grsave_prev == 0)
3378 grsave = current_frame_info.reg_save_pr;
3381 if (mask && TARGET_GNU_AS)
3382 fprintf (file, "\t.prologue %d, %d\n", mask,
3383 ia64_dbx_register_number (grsave));
3385 fputs ("\t.prologue\n", file);
3387 /* Emit a .spill directive, if necessary, to relocate the base of
3388 the register spill area. */
3389 if (current_frame_info.spill_cfa_off != -16)
3390 fprintf (file, "\t.spill %ld\n",
3391 (long) (current_frame_info.spill_cfa_off
3392 + current_frame_info.spill_size));
3395 /* Emit the .body directive at the scheduled end of the prologue. */
3398 ia64_output_function_end_prologue (FILE *file)
3400 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3403 fputs ("\t.body\n", file);
3406 /* Emit the function epilogue. */
3409 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
3410 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3414 if (current_frame_info.reg_fp)
3416 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3417 reg_names[HARD_FRAME_POINTER_REGNUM]
3418 = reg_names[current_frame_info.reg_fp];
3419 reg_names[current_frame_info.reg_fp] = tmp;
3421 if (! TARGET_REG_NAMES)
3423 for (i = 0; i < current_frame_info.n_input_regs; i++)
3424 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
3425 for (i = 0; i < current_frame_info.n_local_regs; i++)
3426 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
3427 for (i = 0; i < current_frame_info.n_output_regs; i++)
3428 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
3431 current_frame_info.initialized = 0;
3435 ia64_dbx_register_number (int regno)
3437 /* In ia64_expand_prologue we quite literally renamed the frame pointer
3438 from its home at loc79 to something inside the register frame. We
3439 must perform the same renumbering here for the debug info. */
3440 if (current_frame_info.reg_fp)
3442 if (regno == HARD_FRAME_POINTER_REGNUM)
3443 regno = current_frame_info.reg_fp;
3444 else if (regno == current_frame_info.reg_fp)
3445 regno = HARD_FRAME_POINTER_REGNUM;
3448 if (IN_REGNO_P (regno))
3449 return 32 + regno - IN_REG (0);
3450 else if (LOC_REGNO_P (regno))
3451 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
3452 else if (OUT_REGNO_P (regno))
3453 return (32 + current_frame_info.n_input_regs
3454 + current_frame_info.n_local_regs + regno - OUT_REG (0));
3460 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
3462 rtx addr_reg, eight = GEN_INT (8);
3464 /* The Intel assembler requires that the global __ia64_trampoline symbol
3465 be declared explicitly */
3468 static bool declared_ia64_trampoline = false;
3470 if (!declared_ia64_trampoline)
3472 declared_ia64_trampoline = true;
3473 (*targetm.asm_out.globalize_label) (asm_out_file,
3474 "__ia64_trampoline");
3478 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
3479 addr = convert_memory_address (Pmode, addr);
3480 fnaddr = convert_memory_address (Pmode, fnaddr);
3481 static_chain = convert_memory_address (Pmode, static_chain);
3483 /* Load up our iterator. */
3484 addr_reg = gen_reg_rtx (Pmode);
3485 emit_move_insn (addr_reg, addr);
3487 /* The first two words are the fake descriptor:
3488 __ia64_trampoline, ADDR+16. */
3489 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3490 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
3491 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3493 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3494 copy_to_reg (plus_constant (addr, 16)));
3495 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3497 /* The third word is the target descriptor. */
3498 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
3499 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3501 /* The fourth word is the static chain. */
3502 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
3505 /* Do any needed setup for a variadic function. CUM has not been updated
3506 for the last named argument which has type TYPE and mode MODE.
3508 We generate the actual spill instructions during prologue generation. */
3511 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3512 tree type, int * pretend_size,
3513 int second_time ATTRIBUTE_UNUSED)
3515 CUMULATIVE_ARGS next_cum = *cum;
3517 /* Skip the current argument. */
3518 ia64_function_arg_advance (&next_cum, mode, type, 1);
3520 if (next_cum.words < MAX_ARGUMENT_SLOTS)
3522 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
3523 *pretend_size = n * UNITS_PER_WORD;
3524 cfun->machine->n_varargs = n;
3528 /* Check whether TYPE is a homogeneous floating point aggregate. If
3529 it is, return the mode of the floating point type that appears
3530 in all leafs. If it is not, return VOIDmode.
3532 An aggregate is a homogeneous floating point aggregate is if all
3533 fields/elements in it have the same floating point type (e.g,
3534 SFmode). 128-bit quad-precision floats are excluded. */
3536 static enum machine_mode
3537 hfa_element_mode (tree type, int nested)
3539 enum machine_mode element_mode = VOIDmode;
3540 enum machine_mode mode;
3541 enum tree_code code = TREE_CODE (type);
3542 int know_element_mode = 0;
3547 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
3548 case BOOLEAN_TYPE: case CHAR_TYPE: case POINTER_TYPE:
3549 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
3550 case FILE_TYPE: case SET_TYPE: case LANG_TYPE:
3554 /* Fortran complex types are supposed to be HFAs, so we need to handle
3555 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
3558 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
3559 && TYPE_MODE (type) != TCmode)
3560 return GET_MODE_INNER (TYPE_MODE (type));
3565 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
3566 mode if this is contained within an aggregate. */
3567 if (nested && TYPE_MODE (type) != TFmode)
3568 return TYPE_MODE (type);
3573 return hfa_element_mode (TREE_TYPE (type), 1);
3577 case QUAL_UNION_TYPE:
3578 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3580 if (TREE_CODE (t) != FIELD_DECL)
3583 mode = hfa_element_mode (TREE_TYPE (t), 1);
3584 if (know_element_mode)
3586 if (mode != element_mode)
3589 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3593 know_element_mode = 1;
3594 element_mode = mode;
3597 return element_mode;
3600 /* If we reach here, we probably have some front-end specific type
3601 that the backend doesn't know about. This can happen via the
3602 aggregate_value_p call in init_function_start. All we can do is
3603 ignore unknown tree types. */
3610 /* Return the number of words required to hold a quantity of TYPE and MODE
3611 when passed as an argument. */
3613 ia64_function_arg_words (tree type, enum machine_mode mode)
3617 if (mode == BLKmode)
3618 words = int_size_in_bytes (type);
3620 words = GET_MODE_SIZE (mode);
3622 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
3625 /* Return the number of registers that should be skipped so the current
3626 argument (described by TYPE and WORDS) will be properly aligned.
3628 Integer and float arguments larger than 8 bytes start at the next
3629 even boundary. Aggregates larger than 8 bytes start at the next
3630 even boundary if the aggregate has 16 byte alignment. Note that
3631 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
3632 but are still to be aligned in registers.
3634 ??? The ABI does not specify how to handle aggregates with
3635 alignment from 9 to 15 bytes, or greater than 16. We handle them
3636 all as if they had 16 byte alignment. Such aggregates can occur
3637 only if gcc extensions are used. */
3639 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
3641 if ((cum->words & 1) == 0)
3645 && TREE_CODE (type) != INTEGER_TYPE
3646 && TREE_CODE (type) != REAL_TYPE)
3647 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
3652 /* Return rtx for register where argument is passed, or zero if it is passed
3654 /* ??? 128-bit quad-precision floats are always passed in general
3658 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
3659 int named, int incoming)
3661 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
3662 int words = ia64_function_arg_words (type, mode);
3663 int offset = ia64_function_arg_offset (cum, type, words);
3664 enum machine_mode hfa_mode = VOIDmode;
3666 /* If all argument slots are used, then it must go on the stack. */
3667 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3670 /* Check for and handle homogeneous FP aggregates. */
3672 hfa_mode = hfa_element_mode (type, 0);
3674 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3675 and unprototyped hfas are passed specially. */
3676 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3680 int fp_regs = cum->fp_regs;
3681 int int_regs = cum->words + offset;
3682 int hfa_size = GET_MODE_SIZE (hfa_mode);
3686 /* If prototyped, pass it in FR regs then GR regs.
3687 If not prototyped, pass it in both FR and GR regs.
3689 If this is an SFmode aggregate, then it is possible to run out of
3690 FR regs while GR regs are still left. In that case, we pass the
3691 remaining part in the GR regs. */
3693 /* Fill the FP regs. We do this always. We stop if we reach the end
3694 of the argument, the last FP register, or the last argument slot. */
3696 byte_size = ((mode == BLKmode)
3697 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3698 args_byte_size = int_regs * UNITS_PER_WORD;
3700 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3701 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
3703 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3704 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
3708 args_byte_size += hfa_size;
3712 /* If no prototype, then the whole thing must go in GR regs. */
3713 if (! cum->prototype)
3715 /* If this is an SFmode aggregate, then we might have some left over
3716 that needs to go in GR regs. */
3717 else if (byte_size != offset)
3718 int_regs += offset / UNITS_PER_WORD;
3720 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
3722 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
3724 enum machine_mode gr_mode = DImode;
3725 unsigned int gr_size;
3727 /* If we have an odd 4 byte hunk because we ran out of FR regs,
3728 then this goes in a GR reg left adjusted/little endian, right
3729 adjusted/big endian. */
3730 /* ??? Currently this is handled wrong, because 4-byte hunks are
3731 always right adjusted/little endian. */
3734 /* If we have an even 4 byte hunk because the aggregate is a
3735 multiple of 4 bytes in size, then this goes in a GR reg right
3736 adjusted/little endian. */
3737 else if (byte_size - offset == 4)
3740 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3741 gen_rtx_REG (gr_mode, (basereg
3745 gr_size = GET_MODE_SIZE (gr_mode);
3747 if (gr_size == UNITS_PER_WORD
3748 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
3750 else if (gr_size > UNITS_PER_WORD)
3751 int_regs += gr_size / UNITS_PER_WORD;
3754 /* If we ended up using just one location, just return that one loc, but
3755 change the mode back to the argument mode. However, we can't do this
3756 when hfa_mode is XFmode and mode is TImode. In that case, we would
3757 return a TImode reference to an FP reg, but FP regs can't hold TImode.
3758 We need the PARALLEL to make this work. This can happen for a union
3759 containing a single __float80 member. */
3760 if (i == 1 && ! (hfa_mode == XFmode && mode == TImode))
3761 return gen_rtx_REG (mode, REGNO (XEXP (loc[0], 0)));
3763 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3766 /* Integral and aggregates go in general registers. If we have run out of
3767 FR registers, then FP values must also go in general registers. This can
3768 happen when we have a SFmode HFA. */
3769 else if (mode == TFmode || mode == TCmode
3770 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
3772 int byte_size = ((mode == BLKmode)
3773 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3774 if (BYTES_BIG_ENDIAN
3775 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
3776 && byte_size < UNITS_PER_WORD
3779 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3780 gen_rtx_REG (DImode,
3781 (basereg + cum->words
3784 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
3787 return gen_rtx_REG (mode, basereg + cum->words + offset);
3791 /* If there is a prototype, then FP values go in a FR register when
3792 named, and in a GR register when unnamed. */
3793 else if (cum->prototype)
3796 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
3797 /* In big-endian mode, an anonymous SFmode value must be represented
3798 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
3799 the value into the high half of the general register. */
3800 else if (BYTES_BIG_ENDIAN && mode == SFmode)
3801 return gen_rtx_PARALLEL (mode,
3803 gen_rtx_EXPR_LIST (VOIDmode,
3804 gen_rtx_REG (DImode, basereg + cum->words + offset),
3807 return gen_rtx_REG (mode, basereg + cum->words + offset);
3809 /* If there is no prototype, then FP values go in both FR and GR
3813 /* See comment above. */
3814 enum machine_mode inner_mode =
3815 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
3817 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
3818 gen_rtx_REG (mode, (FR_ARG_FIRST
3821 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3822 gen_rtx_REG (inner_mode,
3823 (basereg + cum->words
3827 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
3831 /* Return number of words, at the beginning of the argument, that must be
3832 put in registers. 0 is the argument is entirely in registers or entirely
3836 ia64_function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3837 tree type, int named ATTRIBUTE_UNUSED)
3839 int words = ia64_function_arg_words (type, mode);
3840 int offset = ia64_function_arg_offset (cum, type, words);
3842 /* If all argument slots are used, then it must go on the stack. */
3843 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3846 /* It doesn't matter whether the argument goes in FR or GR regs. If
3847 it fits within the 8 argument slots, then it goes entirely in
3848 registers. If it extends past the last argument slot, then the rest
3849 goes on the stack. */
3851 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
3854 return MAX_ARGUMENT_SLOTS - cum->words - offset;
3857 /* Update CUM to point after this argument. This is patterned after
3858 ia64_function_arg. */
3861 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3862 tree type, int named)
3864 int words = ia64_function_arg_words (type, mode);
3865 int offset = ia64_function_arg_offset (cum, type, words);
3866 enum machine_mode hfa_mode = VOIDmode;
3868 /* If all arg slots are already full, then there is nothing to do. */
3869 if (cum->words >= MAX_ARGUMENT_SLOTS)
3872 cum->words += words + offset;
3874 /* Check for and handle homogeneous FP aggregates. */
3876 hfa_mode = hfa_element_mode (type, 0);
3878 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3879 and unprototyped hfas are passed specially. */
3880 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3882 int fp_regs = cum->fp_regs;
3883 /* This is the original value of cum->words + offset. */
3884 int int_regs = cum->words - words;
3885 int hfa_size = GET_MODE_SIZE (hfa_mode);
3889 /* If prototyped, pass it in FR regs then GR regs.
3890 If not prototyped, pass it in both FR and GR regs.
3892 If this is an SFmode aggregate, then it is possible to run out of
3893 FR regs while GR regs are still left. In that case, we pass the
3894 remaining part in the GR regs. */
3896 /* Fill the FP regs. We do this always. We stop if we reach the end
3897 of the argument, the last FP register, or the last argument slot. */
3899 byte_size = ((mode == BLKmode)
3900 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3901 args_byte_size = int_regs * UNITS_PER_WORD;
3903 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3904 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
3907 args_byte_size += hfa_size;
3911 cum->fp_regs = fp_regs;
3914 /* Integral and aggregates go in general registers. If we have run out of
3915 FR registers, then FP values must also go in general registers. This can
3916 happen when we have a SFmode HFA. */
3917 else if (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS)
3918 cum->int_regs = cum->words;
3920 /* If there is a prototype, then FP values go in a FR register when
3921 named, and in a GR register when unnamed. */
3922 else if (cum->prototype)
3925 cum->int_regs = cum->words;
3927 /* ??? Complex types should not reach here. */
3928 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3930 /* If there is no prototype, then FP values go in both FR and GR
3934 /* ??? Complex types should not reach here. */
3935 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3936 cum->int_regs = cum->words;
3940 /* Variable sized types are passed by reference. */
3941 /* ??? At present this is a GCC extension to the IA-64 ABI. */
3944 ia64_function_arg_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3945 enum machine_mode mode ATTRIBUTE_UNUSED,
3946 tree type, int named ATTRIBUTE_UNUSED)
3948 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
3951 /* True if it is OK to do sibling call optimization for the specified
3952 call expression EXP. DECL will be the called function, or NULL if
3953 this is an indirect call. */
3955 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3957 /* We must always return with our current GP. This means we can
3958 only sibcall to functions defined in the current module. */
3959 return decl && (*targetm.binds_local_p) (decl);
3963 /* Implement va_arg. */
3966 ia64_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
3968 /* Variable sized types are passed by reference. */
3969 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
3971 tree ptrtype = build_pointer_type (type);
3972 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
3973 return build_fold_indirect_ref (addr);
3976 /* Aggregate arguments with alignment larger than 8 bytes start at
3977 the next even boundary. Integer and floating point arguments
3978 do so if they are larger than 8 bytes, whether or not they are
3979 also aligned larger than 8 bytes. */
3980 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
3981 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
3983 tree t = build (PLUS_EXPR, TREE_TYPE (valist), valist,
3984 build_int_2 (2 * UNITS_PER_WORD - 1, 0));
3985 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
3986 build_int_2 (-2 * UNITS_PER_WORD, -1));
3987 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
3988 gimplify_and_add (t, pre_p);
3991 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3994 /* Return 1 if function return value returned in memory. Return 0 if it is
3998 ia64_return_in_memory (tree valtype, tree fntype ATTRIBUTE_UNUSED)
4000 enum machine_mode mode;
4001 enum machine_mode hfa_mode;
4002 HOST_WIDE_INT byte_size;
4004 mode = TYPE_MODE (valtype);
4005 byte_size = GET_MODE_SIZE (mode);
4006 if (mode == BLKmode)
4008 byte_size = int_size_in_bytes (valtype);
4013 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4015 hfa_mode = hfa_element_mode (valtype, 0);
4016 if (hfa_mode != VOIDmode)
4018 int hfa_size = GET_MODE_SIZE (hfa_mode);
4020 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
4025 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
4031 /* Return rtx for register that holds the function return value. */
4034 ia64_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
4036 enum machine_mode mode;
4037 enum machine_mode hfa_mode;
4039 mode = TYPE_MODE (valtype);
4040 hfa_mode = hfa_element_mode (valtype, 0);
4042 if (hfa_mode != VOIDmode)
4050 hfa_size = GET_MODE_SIZE (hfa_mode);
4051 byte_size = ((mode == BLKmode)
4052 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
4054 for (i = 0; offset < byte_size; i++)
4056 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4057 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
4063 return XEXP (loc[0], 0);
4065 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4067 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
4068 return gen_rtx_REG (mode, FR_ARG_FIRST);
4071 if (BYTES_BIG_ENDIAN
4072 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
4080 bytesize = int_size_in_bytes (valtype);
4081 for (i = 0; offset < bytesize; i++)
4083 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4084 gen_rtx_REG (DImode,
4087 offset += UNITS_PER_WORD;
4089 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4092 return gen_rtx_REG (mode, GR_RET_FIRST);
4096 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
4097 We need to emit DTP-relative relocations. */
4100 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
4104 fputs ("\tdata8.ua\t@dtprel(", file);
4105 output_addr_const (file, x);
4109 /* Print a memory address as an operand to reference that memory location. */
4111 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
4112 also call this from ia64_print_operand for memory addresses. */
4115 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
4116 rtx address ATTRIBUTE_UNUSED)
4120 /* Print an operand to an assembler instruction.
4121 C Swap and print a comparison operator.
4122 D Print an FP comparison operator.
4123 E Print 32 - constant, for SImode shifts as extract.
4124 e Print 64 - constant, for DImode rotates.
4125 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4126 a floating point register emitted normally.
4127 I Invert a predicate register by adding 1.
4128 J Select the proper predicate register for a condition.
4129 j Select the inverse predicate register for a condition.
4130 O Append .acq for volatile load.
4131 P Postincrement of a MEM.
4132 Q Append .rel for volatile store.
4133 S Shift amount for shladd instruction.
4134 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
4135 for Intel assembler.
4136 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
4137 for Intel assembler.
4138 r Print register name, or constant 0 as r0. HP compatibility for
4141 ia64_print_operand (FILE * file, rtx x, int code)
4148 /* Handled below. */
4153 enum rtx_code c = swap_condition (GET_CODE (x));
4154 fputs (GET_RTX_NAME (c), file);
4159 switch (GET_CODE (x))
4171 str = GET_RTX_NAME (GET_CODE (x));
4178 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
4182 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
4186 if (x == CONST0_RTX (GET_MODE (x)))
4187 str = reg_names [FR_REG (0)];
4188 else if (x == CONST1_RTX (GET_MODE (x)))
4189 str = reg_names [FR_REG (1)];
4190 else if (GET_CODE (x) == REG)
4191 str = reg_names [REGNO (x)];
4198 fputs (reg_names [REGNO (x) + 1], file);
4204 unsigned int regno = REGNO (XEXP (x, 0));
4205 if (GET_CODE (x) == EQ)
4209 fputs (reg_names [regno], file);
4214 if (MEM_VOLATILE_P (x))
4215 fputs(".acq", file);
4220 HOST_WIDE_INT value;
4222 switch (GET_CODE (XEXP (x, 0)))
4228 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
4229 if (GET_CODE (x) == CONST_INT)
4231 else if (GET_CODE (x) == REG)
4233 fprintf (file, ", %s", reg_names[REGNO (x)]);
4241 value = GET_MODE_SIZE (GET_MODE (x));
4245 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
4249 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
4254 if (MEM_VOLATILE_P (x))
4255 fputs(".rel", file);
4259 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4263 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4265 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
4271 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4273 const char *prefix = "0x";
4274 if (INTVAL (x) & 0x80000000)
4276 fprintf (file, "0xffffffff");
4279 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
4285 /* If this operand is the constant zero, write it as register zero.
4286 Any register, zero, or CONST_INT value is OK here. */
4287 if (GET_CODE (x) == REG)
4288 fputs (reg_names[REGNO (x)], file);
4289 else if (x == CONST0_RTX (GET_MODE (x)))
4291 else if (GET_CODE (x) == CONST_INT)
4292 output_addr_const (file, x);
4294 output_operand_lossage ("invalid %%r value");
4301 /* For conditional branches, returns or calls, substitute
4302 sptk, dptk, dpnt, or spnt for %s. */
4303 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
4306 int pred_val = INTVAL (XEXP (x, 0));
4308 /* Guess top and bottom 10% statically predicted. */
4309 if (pred_val < REG_BR_PROB_BASE / 50)
4311 else if (pred_val < REG_BR_PROB_BASE / 2)
4313 else if (pred_val < REG_BR_PROB_BASE / 100 * 98)
4318 else if (GET_CODE (current_output_insn) == CALL_INSN)
4323 fputs (which, file);
4328 x = current_insn_predicate;
4331 unsigned int regno = REGNO (XEXP (x, 0));
4332 if (GET_CODE (x) == EQ)
4334 fprintf (file, "(%s) ", reg_names [regno]);
4339 output_operand_lossage ("ia64_print_operand: unknown code");
4343 switch (GET_CODE (x))
4345 /* This happens for the spill/restore instructions. */
4350 /* ... fall through ... */
4353 fputs (reg_names [REGNO (x)], file);
4358 rtx addr = XEXP (x, 0);
4359 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
4360 addr = XEXP (addr, 0);
4361 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
4366 output_addr_const (file, x);
4373 /* Compute a (partial) cost for rtx X. Return true if the complete
4374 cost has been computed, and false if subexpressions should be
4375 scanned. In either case, *TOTAL contains the cost result. */
4376 /* ??? This is incomplete. */
4379 ia64_rtx_costs (rtx x, int code, int outer_code, int *total)
4387 *total = CONST_OK_FOR_J (INTVAL (x)) ? 0 : COSTS_N_INSNS (1);
4390 if (CONST_OK_FOR_I (INTVAL (x)))
4392 else if (CONST_OK_FOR_J (INTVAL (x)))
4395 *total = COSTS_N_INSNS (1);
4398 if (CONST_OK_FOR_K (INTVAL (x)) || CONST_OK_FOR_L (INTVAL (x)))
4401 *total = COSTS_N_INSNS (1);
4406 *total = COSTS_N_INSNS (1);
4412 *total = COSTS_N_INSNS (3);
4416 /* For multiplies wider than HImode, we have to go to the FPU,
4417 which normally involves copies. Plus there's the latency
4418 of the multiply itself, and the latency of the instructions to
4419 transfer integer regs to FP regs. */
4420 /* ??? Check for FP mode. */
4421 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
4422 *total = COSTS_N_INSNS (10);
4424 *total = COSTS_N_INSNS (2);
4432 *total = COSTS_N_INSNS (1);
4439 /* We make divide expensive, so that divide-by-constant will be
4440 optimized to a multiply. */
4441 *total = COSTS_N_INSNS (60);
4449 /* Calculate the cost of moving data from a register in class FROM to
4450 one in class TO, using MODE. */
4453 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
4456 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
4457 if (to == ADDL_REGS)
4459 if (from == ADDL_REGS)
4462 /* All costs are symmetric, so reduce cases by putting the
4463 lower number class as the destination. */
4466 enum reg_class tmp = to;
4467 to = from, from = tmp;
4470 /* Moving from FR<->GR in XFmode must be more expensive than 2,
4471 so that we get secondary memory reloads. Between FR_REGS,
4472 we have to make this at least as expensive as MEMORY_MOVE_COST
4473 to avoid spectacularly poor register class preferencing. */
4476 if (to != GR_REGS || from != GR_REGS)
4477 return MEMORY_MOVE_COST (mode, to, 0);
4485 /* Moving between PR registers takes two insns. */
4486 if (from == PR_REGS)
4488 /* Moving between PR and anything but GR is impossible. */
4489 if (from != GR_REGS)
4490 return MEMORY_MOVE_COST (mode, to, 0);
4494 /* Moving between BR and anything but GR is impossible. */
4495 if (from != GR_REGS && from != GR_AND_BR_REGS)
4496 return MEMORY_MOVE_COST (mode, to, 0);
4501 /* Moving between AR and anything but GR is impossible. */
4502 if (from != GR_REGS)
4503 return MEMORY_MOVE_COST (mode, to, 0);
4508 case GR_AND_FR_REGS:
4509 case GR_AND_BR_REGS:
4520 /* This function returns the register class required for a secondary
4521 register when copying between one of the registers in CLASS, and X,
4522 using MODE. A return value of NO_REGS means that no secondary register
4526 ia64_secondary_reload_class (enum reg_class class,
4527 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
4531 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
4532 regno = true_regnum (x);
4539 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
4540 interaction. We end up with two pseudos with overlapping lifetimes
4541 both of which are equiv to the same constant, and both which need
4542 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
4543 changes depending on the path length, which means the qty_first_reg
4544 check in make_regs_eqv can give different answers at different times.
4545 At some point I'll probably need a reload_indi pattern to handle
4548 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
4549 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
4550 non-general registers for good measure. */
4551 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
4554 /* This is needed if a pseudo used as a call_operand gets spilled to a
4556 if (GET_CODE (x) == MEM)
4561 /* Need to go through general registers to get to other class regs. */
4562 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
4565 /* This can happen when a paradoxical subreg is an operand to the
4567 /* ??? This shouldn't be necessary after instruction scheduling is
4568 enabled, because paradoxical subregs are not accepted by
4569 register_operand when INSN_SCHEDULING is defined. Or alternatively,
4570 stop the paradoxical subreg stupidity in the *_operand functions
4572 if (GET_CODE (x) == MEM
4573 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
4574 || GET_MODE (x) == QImode))
4577 /* This can happen because of the ior/and/etc patterns that accept FP
4578 registers as operands. If the third operand is a constant, then it
4579 needs to be reloaded into a FP register. */
4580 if (GET_CODE (x) == CONST_INT)
4583 /* This can happen because of register elimination in a muldi3 insn.
4584 E.g. `26107 * (unsigned long)&u'. */
4585 if (GET_CODE (x) == PLUS)
4590 /* ??? This happens if we cse/gcse a BImode value across a call,
4591 and the function has a nonlocal goto. This is because global
4592 does not allocate call crossing pseudos to hard registers when
4593 current_function_has_nonlocal_goto is true. This is relatively
4594 common for C++ programs that use exceptions. To reproduce,
4595 return NO_REGS and compile libstdc++. */
4596 if (GET_CODE (x) == MEM)
4599 /* This can happen when we take a BImode subreg of a DImode value,
4600 and that DImode value winds up in some non-GR register. */
4601 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
4613 /* Emit text to declare externally defined variables and functions, because
4614 the Intel assembler does not support undefined externals. */
4617 ia64_asm_output_external (FILE *file, tree decl, const char *name)
4619 int save_referenced;
4621 /* GNU as does not need anything here, but the HP linker does need
4622 something for external functions. */
4626 || TREE_CODE (decl) != FUNCTION_DECL
4627 || strstr (name, "__builtin_") == name))
4630 /* ??? The Intel assembler creates a reference that needs to be satisfied by
4631 the linker when we do this, so we need to be careful not to do this for
4632 builtin functions which have no library equivalent. Unfortunately, we
4633 can't tell here whether or not a function will actually be called by
4634 expand_expr, so we pull in library functions even if we may not need
4636 if (! strcmp (name, "__builtin_next_arg")
4637 || ! strcmp (name, "alloca")
4638 || ! strcmp (name, "__builtin_constant_p")
4639 || ! strcmp (name, "__builtin_args_info"))
4643 ia64_hpux_add_extern_decl (decl);
4646 /* assemble_name will set TREE_SYMBOL_REFERENCED, so we must save and
4648 save_referenced = TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl));
4649 if (TREE_CODE (decl) == FUNCTION_DECL)
4650 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
4651 (*targetm.asm_out.globalize_label) (file, name);
4652 TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)) = save_referenced;
4656 /* Parse the -mfixed-range= option string. */
4659 fix_range (const char *const_str)
4662 char *str, *dash, *comma;
4664 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
4665 REG2 are either register names or register numbers. The effect
4666 of this option is to mark the registers in the range from REG1 to
4667 REG2 as ``fixed'' so they won't be used by the compiler. This is
4668 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
4670 i = strlen (const_str);
4671 str = (char *) alloca (i + 1);
4672 memcpy (str, const_str, i + 1);
4676 dash = strchr (str, '-');
4679 warning ("value of -mfixed-range must have form REG1-REG2");
4684 comma = strchr (dash + 1, ',');
4688 first = decode_reg_name (str);
4691 warning ("unknown register name: %s", str);
4695 last = decode_reg_name (dash + 1);
4698 warning ("unknown register name: %s", dash + 1);
4706 warning ("%s-%s is an empty range", str, dash + 1);
4710 for (i = first; i <= last; ++i)
4711 fixed_regs[i] = call_used_regs[i] = 1;
4721 static struct machine_function *
4722 ia64_init_machine_status (void)
4724 return ggc_alloc_cleared (sizeof (struct machine_function));
4727 /* Handle TARGET_OPTIONS switches. */
4730 ia64_override_options (void)
4734 const char *const name; /* processor name or nickname. */
4735 const enum processor_type processor;
4737 const processor_alias_table[] =
4739 {"itanium", PROCESSOR_ITANIUM},
4740 {"itanium1", PROCESSOR_ITANIUM},
4741 {"merced", PROCESSOR_ITANIUM},
4742 {"itanium2", PROCESSOR_ITANIUM2},
4743 {"mckinley", PROCESSOR_ITANIUM2},
4746 int const pta_size = ARRAY_SIZE (processor_alias_table);
4749 if (TARGET_AUTO_PIC)
4750 target_flags |= MASK_CONST_GP;
4752 if (TARGET_INLINE_FLOAT_DIV_LAT && TARGET_INLINE_FLOAT_DIV_THR)
4754 if ((target_flags_explicit & MASK_INLINE_FLOAT_DIV_LAT)
4755 && (target_flags_explicit & MASK_INLINE_FLOAT_DIV_THR))
4757 warning ("cannot optimize floating point division for both latency and throughput");
4758 target_flags &= ~MASK_INLINE_FLOAT_DIV_THR;
4762 if (target_flags_explicit & MASK_INLINE_FLOAT_DIV_THR)
4763 target_flags &= ~MASK_INLINE_FLOAT_DIV_LAT;
4765 target_flags &= ~MASK_INLINE_FLOAT_DIV_THR;
4769 if (TARGET_INLINE_INT_DIV_LAT && TARGET_INLINE_INT_DIV_THR)
4771 if ((target_flags_explicit & MASK_INLINE_INT_DIV_LAT)
4772 && (target_flags_explicit & MASK_INLINE_INT_DIV_THR))
4774 warning ("cannot optimize integer division for both latency and throughput");
4775 target_flags &= ~MASK_INLINE_INT_DIV_THR;
4779 if (target_flags_explicit & MASK_INLINE_INT_DIV_THR)
4780 target_flags &= ~MASK_INLINE_INT_DIV_LAT;
4782 target_flags &= ~MASK_INLINE_INT_DIV_THR;
4786 if (TARGET_INLINE_SQRT_LAT && TARGET_INLINE_SQRT_THR)
4788 if ((target_flags_explicit & MASK_INLINE_SQRT_LAT)
4789 && (target_flags_explicit & MASK_INLINE_SQRT_THR))
4791 warning ("cannot optimize square root for both latency and throughput");
4792 target_flags &= ~MASK_INLINE_SQRT_THR;
4796 if (target_flags_explicit & MASK_INLINE_SQRT_THR)
4797 target_flags &= ~MASK_INLINE_SQRT_LAT;
4799 target_flags &= ~MASK_INLINE_SQRT_THR;
4803 if (TARGET_INLINE_SQRT_LAT)
4805 warning ("not yet implemented: latency-optimized inline square root");
4806 target_flags &= ~MASK_INLINE_SQRT_LAT;
4809 if (ia64_fixed_range_string)
4810 fix_range (ia64_fixed_range_string);
4812 if (ia64_tls_size_string)
4815 unsigned long tmp = strtoul (ia64_tls_size_string, &end, 10);
4816 if (*end || (tmp != 14 && tmp != 22 && tmp != 64))
4817 error ("bad value (%s) for -mtls-size= switch", ia64_tls_size_string);
4819 ia64_tls_size = tmp;
4822 if (!ia64_tune_string)
4823 ia64_tune_string = "itanium2";
4825 for (i = 0; i < pta_size; i++)
4826 if (! strcmp (ia64_tune_string, processor_alias_table[i].name))
4828 ia64_tune = processor_alias_table[i].processor;
4833 error ("bad value (%s) for -tune= switch", ia64_tune_string);
4835 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
4836 flag_schedule_insns_after_reload = 0;
4838 /* Variable tracking should be run after all optimizations which change order
4839 of insns. It also needs a valid CFG. */
4840 ia64_flag_var_tracking = flag_var_tracking;
4841 flag_var_tracking = 0;
4843 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
4845 init_machine_status = ia64_init_machine_status;
4848 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
4849 static enum attr_type ia64_safe_type (rtx);
4851 static enum attr_itanium_class
4852 ia64_safe_itanium_class (rtx insn)
4854 if (recog_memoized (insn) >= 0)
4855 return get_attr_itanium_class (insn);
4857 return ITANIUM_CLASS_UNKNOWN;
4860 static enum attr_type
4861 ia64_safe_type (rtx insn)
4863 if (recog_memoized (insn) >= 0)
4864 return get_attr_type (insn);
4866 return TYPE_UNKNOWN;
4869 /* The following collection of routines emit instruction group stop bits as
4870 necessary to avoid dependencies. */
4872 /* Need to track some additional registers as far as serialization is
4873 concerned so we can properly handle br.call and br.ret. We could
4874 make these registers visible to gcc, but since these registers are
4875 never explicitly used in gcc generated code, it seems wasteful to
4876 do so (plus it would make the call and return patterns needlessly
4878 #define REG_RP (BR_REG (0))
4879 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
4880 /* This is used for volatile asms which may require a stop bit immediately
4881 before and after them. */
4882 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
4883 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
4884 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
4886 /* For each register, we keep track of how it has been written in the
4887 current instruction group.
4889 If a register is written unconditionally (no qualifying predicate),
4890 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
4892 If a register is written if its qualifying predicate P is true, we
4893 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
4894 may be written again by the complement of P (P^1) and when this happens,
4895 WRITE_COUNT gets set to 2.
4897 The result of this is that whenever an insn attempts to write a register
4898 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
4900 If a predicate register is written by a floating-point insn, we set
4901 WRITTEN_BY_FP to true.
4903 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
4904 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
4906 struct reg_write_state
4908 unsigned int write_count : 2;
4909 unsigned int first_pred : 16;
4910 unsigned int written_by_fp : 1;
4911 unsigned int written_by_and : 1;
4912 unsigned int written_by_or : 1;
4915 /* Cumulative info for the current instruction group. */
4916 struct reg_write_state rws_sum[NUM_REGS];
4917 /* Info for the current instruction. This gets copied to rws_sum after a
4918 stop bit is emitted. */
4919 struct reg_write_state rws_insn[NUM_REGS];
4921 /* Indicates whether this is the first instruction after a stop bit,
4922 in which case we don't need another stop bit. Without this, we hit
4923 the abort in ia64_variable_issue when scheduling an alloc. */
4924 static int first_instruction;
4926 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
4927 RTL for one instruction. */
4930 unsigned int is_write : 1; /* Is register being written? */
4931 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
4932 unsigned int is_branch : 1; /* Is register used as part of a branch? */
4933 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
4934 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
4935 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
4938 static void rws_update (struct reg_write_state *, int, struct reg_flags, int);
4939 static int rws_access_regno (int, struct reg_flags, int);
4940 static int rws_access_reg (rtx, struct reg_flags, int);
4941 static void update_set_flags (rtx, struct reg_flags *, int *, rtx *);
4942 static int set_src_needs_barrier (rtx, struct reg_flags, int, rtx);
4943 static int rtx_needs_barrier (rtx, struct reg_flags, int);
4944 static void init_insn_group_barriers (void);
4945 static int group_barrier_needed_p (rtx);
4946 static int safe_group_barrier_needed_p (rtx);
4948 /* Update *RWS for REGNO, which is being written by the current instruction,
4949 with predicate PRED, and associated register flags in FLAGS. */
4952 rws_update (struct reg_write_state *rws, int regno, struct reg_flags flags, int pred)
4955 rws[regno].write_count++;
4957 rws[regno].write_count = 2;
4958 rws[regno].written_by_fp |= flags.is_fp;
4959 /* ??? Not tracking and/or across differing predicates. */
4960 rws[regno].written_by_and = flags.is_and;
4961 rws[regno].written_by_or = flags.is_or;
4962 rws[regno].first_pred = pred;
4965 /* Handle an access to register REGNO of type FLAGS using predicate register
4966 PRED. Update rws_insn and rws_sum arrays. Return 1 if this access creates
4967 a dependency with an earlier instruction in the same group. */
4970 rws_access_regno (int regno, struct reg_flags flags, int pred)
4972 int need_barrier = 0;
4974 if (regno >= NUM_REGS)
4977 if (! PR_REGNO_P (regno))
4978 flags.is_and = flags.is_or = 0;
4984 /* One insn writes same reg multiple times? */
4985 if (rws_insn[regno].write_count > 0)
4988 /* Update info for current instruction. */
4989 rws_update (rws_insn, regno, flags, pred);
4990 write_count = rws_sum[regno].write_count;
4992 switch (write_count)
4995 /* The register has not been written yet. */
4996 rws_update (rws_sum, regno, flags, pred);
5000 /* The register has been written via a predicate. If this is
5001 not a complementary predicate, then we need a barrier. */
5002 /* ??? This assumes that P and P+1 are always complementary
5003 predicates for P even. */
5004 if (flags.is_and && rws_sum[regno].written_by_and)
5006 else if (flags.is_or && rws_sum[regno].written_by_or)
5008 else if ((rws_sum[regno].first_pred ^ 1) != pred)
5010 rws_update (rws_sum, regno, flags, pred);
5014 /* The register has been unconditionally written already. We
5016 if (flags.is_and && rws_sum[regno].written_by_and)
5018 else if (flags.is_or && rws_sum[regno].written_by_or)
5022 rws_sum[regno].written_by_and = flags.is_and;
5023 rws_sum[regno].written_by_or = flags.is_or;
5032 if (flags.is_branch)
5034 /* Branches have several RAW exceptions that allow to avoid
5037 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
5038 /* RAW dependencies on branch regs are permissible as long
5039 as the writer is a non-branch instruction. Since we
5040 never generate code that uses a branch register written
5041 by a branch instruction, handling this case is
5045 if (REGNO_REG_CLASS (regno) == PR_REGS
5046 && ! rws_sum[regno].written_by_fp)
5047 /* The predicates of a branch are available within the
5048 same insn group as long as the predicate was written by
5049 something other than a floating-point instruction. */
5053 if (flags.is_and && rws_sum[regno].written_by_and)
5055 if (flags.is_or && rws_sum[regno].written_by_or)
5058 switch (rws_sum[regno].write_count)
5061 /* The register has not been written yet. */
5065 /* The register has been written via a predicate. If this is
5066 not a complementary predicate, then we need a barrier. */
5067 /* ??? This assumes that P and P+1 are always complementary
5068 predicates for P even. */
5069 if ((rws_sum[regno].first_pred ^ 1) != pred)
5074 /* The register has been unconditionally written already. We
5084 return need_barrier;
5088 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
5090 int regno = REGNO (reg);
5091 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
5094 return rws_access_regno (regno, flags, pred);
5097 int need_barrier = 0;
5099 need_barrier |= rws_access_regno (regno + n, flags, pred);
5100 return need_barrier;
5104 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
5105 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
5108 update_set_flags (rtx x, struct reg_flags *pflags, int *ppred, rtx *pcond)
5110 rtx src = SET_SRC (x);
5114 switch (GET_CODE (src))
5120 if (SET_DEST (x) == pc_rtx)
5121 /* X is a conditional branch. */
5125 int is_complemented = 0;
5127 /* X is a conditional move. */
5128 rtx cond = XEXP (src, 0);
5129 if (GET_CODE (cond) == EQ)
5130 is_complemented = 1;
5131 cond = XEXP (cond, 0);
5132 if (GET_CODE (cond) != REG
5133 && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS)
5136 if (XEXP (src, 1) == SET_DEST (x)
5137 || XEXP (src, 2) == SET_DEST (x))
5139 /* X is a conditional move that conditionally writes the
5142 /* We need another complement in this case. */
5143 if (XEXP (src, 1) == SET_DEST (x))
5144 is_complemented = ! is_complemented;
5146 *ppred = REGNO (cond);
5147 if (is_complemented)
5151 /* ??? If this is a conditional write to the dest, then this
5152 instruction does not actually read one source. This probably
5153 doesn't matter, because that source is also the dest. */
5154 /* ??? Multiple writes to predicate registers are allowed
5155 if they are all AND type compares, or if they are all OR
5156 type compares. We do not generate such instructions
5159 /* ... fall through ... */
5162 if (COMPARISON_P (src)
5163 && GET_MODE_CLASS (GET_MODE (XEXP (src, 0))) == MODE_FLOAT)
5164 /* Set pflags->is_fp to 1 so that we know we're dealing
5165 with a floating point comparison when processing the
5166 destination of the SET. */
5169 /* Discover if this is a parallel comparison. We only handle
5170 and.orcm and or.andcm at present, since we must retain a
5171 strict inverse on the predicate pair. */
5172 else if (GET_CODE (src) == AND)
5174 else if (GET_CODE (src) == IOR)
5181 /* Subroutine of rtx_needs_barrier; this function determines whether the
5182 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
5183 are as in rtx_needs_barrier. COND is an rtx that holds the condition
5187 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred, rtx cond)
5189 int need_barrier = 0;
5191 rtx src = SET_SRC (x);
5193 if (GET_CODE (src) == CALL)
5194 /* We don't need to worry about the result registers that
5195 get written by subroutine call. */
5196 return rtx_needs_barrier (src, flags, pred);
5197 else if (SET_DEST (x) == pc_rtx)
5199 /* X is a conditional branch. */
5200 /* ??? This seems redundant, as the caller sets this bit for
5202 flags.is_branch = 1;
5203 return rtx_needs_barrier (src, flags, pred);
5206 need_barrier = rtx_needs_barrier (src, flags, pred);
5208 /* This instruction unconditionally uses a predicate register. */
5210 need_barrier |= rws_access_reg (cond, flags, 0);
5213 if (GET_CODE (dst) == ZERO_EXTRACT)
5215 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
5216 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
5217 dst = XEXP (dst, 0);
5219 return need_barrier;
5222 /* Handle an access to rtx X of type FLAGS using predicate register
5223 PRED. Return 1 if this access creates a dependency with an earlier
5224 instruction in the same group. */
5227 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
5230 int is_complemented = 0;
5231 int need_barrier = 0;
5232 const char *format_ptr;
5233 struct reg_flags new_flags;
5241 switch (GET_CODE (x))
5244 update_set_flags (x, &new_flags, &pred, &cond);
5245 need_barrier = set_src_needs_barrier (x, new_flags, pred, cond);
5246 if (GET_CODE (SET_SRC (x)) != CALL)
5248 new_flags.is_write = 1;
5249 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
5254 new_flags.is_write = 0;
5255 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5257 /* Avoid multiple register writes, in case this is a pattern with
5258 multiple CALL rtx. This avoids an abort in rws_access_reg. */
5259 if (! flags.is_sibcall && ! rws_insn[REG_AR_CFM].write_count)
5261 new_flags.is_write = 1;
5262 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
5263 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
5264 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5269 /* X is a predicated instruction. */
5271 cond = COND_EXEC_TEST (x);
5274 need_barrier = rtx_needs_barrier (cond, flags, 0);
5276 if (GET_CODE (cond) == EQ)
5277 is_complemented = 1;
5278 cond = XEXP (cond, 0);
5279 if (GET_CODE (cond) != REG
5280 && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS)
5282 pred = REGNO (cond);
5283 if (is_complemented)
5286 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
5287 return need_barrier;
5291 /* Clobber & use are for earlier compiler-phases only. */
5296 /* We always emit stop bits for traditional asms. We emit stop bits
5297 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
5298 if (GET_CODE (x) != ASM_OPERANDS
5299 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
5301 /* Avoid writing the register multiple times if we have multiple
5302 asm outputs. This avoids an abort in rws_access_reg. */
5303 if (! rws_insn[REG_VOLATILE].write_count)
5305 new_flags.is_write = 1;
5306 rws_access_regno (REG_VOLATILE, new_flags, pred);
5311 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
5312 We can not just fall through here since then we would be confused
5313 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
5314 traditional asms unlike their normal usage. */
5316 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
5317 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
5322 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5324 rtx pat = XVECEXP (x, 0, i);
5325 if (GET_CODE (pat) == SET)
5327 update_set_flags (pat, &new_flags, &pred, &cond);
5328 need_barrier |= set_src_needs_barrier (pat, new_flags, pred, cond);
5330 else if (GET_CODE (pat) == USE
5331 || GET_CODE (pat) == CALL
5332 || GET_CODE (pat) == ASM_OPERANDS)
5333 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5334 else if (GET_CODE (pat) != CLOBBER && GET_CODE (pat) != RETURN)
5337 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5339 rtx pat = XVECEXP (x, 0, i);
5340 if (GET_CODE (pat) == SET)
5342 if (GET_CODE (SET_SRC (pat)) != CALL)
5344 new_flags.is_write = 1;
5345 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
5349 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
5350 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5358 if (REGNO (x) == AR_UNAT_REGNUM)
5360 for (i = 0; i < 64; ++i)
5361 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
5364 need_barrier = rws_access_reg (x, flags, pred);
5368 /* Find the regs used in memory address computation. */
5369 new_flags.is_write = 0;
5370 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5373 case CONST_INT: case CONST_DOUBLE:
5374 case SYMBOL_REF: case LABEL_REF: case CONST:
5377 /* Operators with side-effects. */
5378 case POST_INC: case POST_DEC:
5379 if (GET_CODE (XEXP (x, 0)) != REG)
5382 new_flags.is_write = 0;
5383 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5384 new_flags.is_write = 1;
5385 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5389 if (GET_CODE (XEXP (x, 0)) != REG)
5392 new_flags.is_write = 0;
5393 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5394 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5395 new_flags.is_write = 1;
5396 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5399 /* Handle common unary and binary ops for efficiency. */
5400 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
5401 case MOD: case UDIV: case UMOD: case AND: case IOR:
5402 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
5403 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
5404 case NE: case EQ: case GE: case GT: case LE:
5405 case LT: case GEU: case GTU: case LEU: case LTU:
5406 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5407 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5410 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
5411 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
5412 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
5413 case SQRT: case FFS: case POPCOUNT:
5414 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5418 switch (XINT (x, 1))
5420 case UNSPEC_LTOFF_DTPMOD:
5421 case UNSPEC_LTOFF_DTPREL:
5423 case UNSPEC_LTOFF_TPREL:
5425 case UNSPEC_PRED_REL_MUTEX:
5426 case UNSPEC_PIC_CALL:
5428 case UNSPEC_FETCHADD_ACQ:
5429 case UNSPEC_BSP_VALUE:
5430 case UNSPEC_FLUSHRS:
5431 case UNSPEC_BUNDLE_SELECTOR:
5434 case UNSPEC_GR_SPILL:
5435 case UNSPEC_GR_RESTORE:
5437 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
5438 HOST_WIDE_INT bit = (offset >> 3) & 63;
5440 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5441 new_flags.is_write = (XINT (x, 1) == 1);
5442 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
5447 case UNSPEC_FR_SPILL:
5448 case UNSPEC_FR_RESTORE:
5449 case UNSPEC_GETF_EXP:
5450 case UNSPEC_SETF_EXP:
5452 case UNSPEC_FR_SQRT_RECIP_APPROX:
5453 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5456 case UNSPEC_FR_RECIP_APPROX:
5457 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5458 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5461 case UNSPEC_CMPXCHG_ACQ:
5462 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5463 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
5471 case UNSPEC_VOLATILE:
5472 switch (XINT (x, 1))
5475 /* Alloc must always be the first instruction of a group.
5476 We force this by always returning true. */
5477 /* ??? We might get better scheduling if we explicitly check for
5478 input/local/output register dependencies, and modify the
5479 scheduler so that alloc is always reordered to the start of
5480 the current group. We could then eliminate all of the
5481 first_instruction code. */
5482 rws_access_regno (AR_PFS_REGNUM, flags, pred);
5484 new_flags.is_write = 1;
5485 rws_access_regno (REG_AR_CFM, new_flags, pred);
5488 case UNSPECV_SET_BSP:
5492 case UNSPECV_BLOCKAGE:
5493 case UNSPECV_INSN_GROUP_BARRIER:
5495 case UNSPECV_PSAC_ALL:
5496 case UNSPECV_PSAC_NORMAL:
5505 new_flags.is_write = 0;
5506 need_barrier = rws_access_regno (REG_RP, flags, pred);
5507 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
5509 new_flags.is_write = 1;
5510 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5511 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5515 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
5516 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5517 switch (format_ptr[i])
5519 case '0': /* unused field */
5520 case 'i': /* integer */
5521 case 'n': /* note */
5522 case 'w': /* wide integer */
5523 case 's': /* pointer to string */
5524 case 'S': /* optional pointer to string */
5528 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
5533 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
5534 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
5543 return need_barrier;
5546 /* Clear out the state for group_barrier_needed_p at the start of a
5547 sequence of insns. */
5550 init_insn_group_barriers (void)
5552 memset (rws_sum, 0, sizeof (rws_sum));
5553 first_instruction = 1;
5556 /* Given the current state, recorded by previous calls to this function,
5557 determine whether a group barrier (a stop bit) is necessary before INSN.
5558 Return nonzero if so. */
5561 group_barrier_needed_p (rtx insn)
5564 int need_barrier = 0;
5565 struct reg_flags flags;
5567 memset (&flags, 0, sizeof (flags));
5568 switch (GET_CODE (insn))
5574 /* A barrier doesn't imply an instruction group boundary. */
5578 memset (rws_insn, 0, sizeof (rws_insn));
5582 flags.is_branch = 1;
5583 flags.is_sibcall = SIBLING_CALL_P (insn);
5584 memset (rws_insn, 0, sizeof (rws_insn));
5586 /* Don't bundle a call following another call. */
5587 if ((pat = prev_active_insn (insn))
5588 && GET_CODE (pat) == CALL_INSN)
5594 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
5598 flags.is_branch = 1;
5600 /* Don't bundle a jump following a call. */
5601 if ((pat = prev_active_insn (insn))
5602 && GET_CODE (pat) == CALL_INSN)
5610 if (GET_CODE (PATTERN (insn)) == USE
5611 || GET_CODE (PATTERN (insn)) == CLOBBER)
5612 /* Don't care about USE and CLOBBER "insns"---those are used to
5613 indicate to the optimizer that it shouldn't get rid of
5614 certain operations. */
5617 pat = PATTERN (insn);
5619 /* Ug. Hack hacks hacked elsewhere. */
5620 switch (recog_memoized (insn))
5622 /* We play dependency tricks with the epilogue in order
5623 to get proper schedules. Undo this for dv analysis. */
5624 case CODE_FOR_epilogue_deallocate_stack:
5625 case CODE_FOR_prologue_allocate_stack:
5626 pat = XVECEXP (pat, 0, 0);
5629 /* The pattern we use for br.cloop confuses the code above.
5630 The second element of the vector is representative. */
5631 case CODE_FOR_doloop_end_internal:
5632 pat = XVECEXP (pat, 0, 1);
5635 /* Doesn't generate code. */
5636 case CODE_FOR_pred_rel_mutex:
5637 case CODE_FOR_prologue_use:
5644 memset (rws_insn, 0, sizeof (rws_insn));
5645 need_barrier = rtx_needs_barrier (pat, flags, 0);
5647 /* Check to see if the previous instruction was a volatile
5650 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
5657 if (first_instruction && INSN_P (insn)
5658 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
5659 && GET_CODE (PATTERN (insn)) != USE
5660 && GET_CODE (PATTERN (insn)) != CLOBBER)
5663 first_instruction = 0;
5666 return need_barrier;
5669 /* Like group_barrier_needed_p, but do not clobber the current state. */
5672 safe_group_barrier_needed_p (rtx insn)
5674 struct reg_write_state rws_saved[NUM_REGS];
5675 int saved_first_instruction;
5678 memcpy (rws_saved, rws_sum, NUM_REGS * sizeof *rws_saved);
5679 saved_first_instruction = first_instruction;
5681 t = group_barrier_needed_p (insn);
5683 memcpy (rws_sum, rws_saved, NUM_REGS * sizeof *rws_saved);
5684 first_instruction = saved_first_instruction;
5689 /* Scan the current function and insert stop bits as necessary to
5690 eliminate dependencies. This function assumes that a final
5691 instruction scheduling pass has been run which has already
5692 inserted most of the necessary stop bits. This function only
5693 inserts new ones at basic block boundaries, since these are
5694 invisible to the scheduler. */
5697 emit_insn_group_barriers (FILE *dump)
5701 int insns_since_last_label = 0;
5703 init_insn_group_barriers ();
5705 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5707 if (GET_CODE (insn) == CODE_LABEL)
5709 if (insns_since_last_label)
5711 insns_since_last_label = 0;
5713 else if (GET_CODE (insn) == NOTE
5714 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
5716 if (insns_since_last_label)
5718 insns_since_last_label = 0;
5720 else if (GET_CODE (insn) == INSN
5721 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
5722 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
5724 init_insn_group_barriers ();
5727 else if (INSN_P (insn))
5729 insns_since_last_label = 1;
5731 if (group_barrier_needed_p (insn))
5736 fprintf (dump, "Emitting stop before label %d\n",
5737 INSN_UID (last_label));
5738 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
5741 init_insn_group_barriers ();
5749 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
5750 This function has to emit all necessary group barriers. */
5753 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
5757 init_insn_group_barriers ();
5759 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5761 if (GET_CODE (insn) == BARRIER)
5763 rtx last = prev_active_insn (insn);
5767 if (GET_CODE (last) == JUMP_INSN
5768 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
5769 last = prev_active_insn (last);
5770 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
5771 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
5773 init_insn_group_barriers ();
5775 else if (INSN_P (insn))
5777 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
5778 init_insn_group_barriers ();
5779 else if (group_barrier_needed_p (insn))
5781 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5782 init_insn_group_barriers ();
5783 group_barrier_needed_p (insn);
5790 static int errata_find_address_regs (rtx *, void *);
5791 static void errata_emit_nops (rtx);
5792 static void fixup_errata (void);
5794 /* This structure is used to track some details about the previous insns
5795 groups so we can determine if it may be necessary to insert NOPs to
5796 workaround hardware errata. */
5799 HARD_REG_SET p_reg_set;
5800 HARD_REG_SET gr_reg_conditionally_set;
5803 /* Index into the last_group array. */
5804 static int group_idx;
5806 /* Called through for_each_rtx; determines if a hard register that was
5807 conditionally set in the previous group is used as an address register.
5808 It ensures that for_each_rtx returns 1 in that case. */
5810 errata_find_address_regs (rtx *xp, void *data ATTRIBUTE_UNUSED)
5813 if (GET_CODE (x) != MEM)
5816 if (GET_CODE (x) == POST_MODIFY)
5818 if (GET_CODE (x) == REG)
5820 struct group *prev_group = last_group + (group_idx ^ 1);
5821 if (TEST_HARD_REG_BIT (prev_group->gr_reg_conditionally_set,
5829 /* Called for each insn; this function keeps track of the state in
5830 last_group and emits additional NOPs if necessary to work around
5831 an Itanium A/B step erratum. */
5833 errata_emit_nops (rtx insn)
5835 struct group *this_group = last_group + group_idx;
5836 struct group *prev_group = last_group + (group_idx ^ 1);
5837 rtx pat = PATTERN (insn);
5838 rtx cond = GET_CODE (pat) == COND_EXEC ? COND_EXEC_TEST (pat) : 0;
5839 rtx real_pat = cond ? COND_EXEC_CODE (pat) : pat;
5840 enum attr_type type;
5843 if (GET_CODE (real_pat) == USE
5844 || GET_CODE (real_pat) == CLOBBER
5845 || GET_CODE (real_pat) == ASM_INPUT
5846 || GET_CODE (real_pat) == ADDR_VEC
5847 || GET_CODE (real_pat) == ADDR_DIFF_VEC
5848 || asm_noperands (PATTERN (insn)) >= 0)
5851 /* single_set doesn't work for COND_EXEC insns, so we have to duplicate
5854 if (GET_CODE (set) == PARALLEL)
5857 set = XVECEXP (real_pat, 0, 0);
5858 for (i = 1; i < XVECLEN (real_pat, 0); i++)
5859 if (GET_CODE (XVECEXP (real_pat, 0, i)) != USE
5860 && GET_CODE (XVECEXP (real_pat, 0, i)) != CLOBBER)
5867 if (set && GET_CODE (set) != SET)
5870 type = get_attr_type (insn);
5873 && set && REG_P (SET_DEST (set)) && PR_REGNO_P (REGNO (SET_DEST (set))))
5874 SET_HARD_REG_BIT (this_group->p_reg_set, REGNO (SET_DEST (set)));
5876 if ((type == TYPE_M || type == TYPE_A) && cond && set
5877 && REG_P (SET_DEST (set))
5878 && GET_CODE (SET_SRC (set)) != PLUS
5879 && GET_CODE (SET_SRC (set)) != MINUS
5880 && (GET_CODE (SET_SRC (set)) != ASHIFT
5881 || !shladd_operand (XEXP (SET_SRC (set), 1), VOIDmode))
5882 && (GET_CODE (SET_SRC (set)) != MEM
5883 || GET_CODE (XEXP (SET_SRC (set), 0)) != POST_MODIFY)
5884 && GENERAL_REGNO_P (REGNO (SET_DEST (set))))
5886 if (!COMPARISON_P (cond)
5887 || !REG_P (XEXP (cond, 0)))
5890 if (TEST_HARD_REG_BIT (prev_group->p_reg_set, REGNO (XEXP (cond, 0))))
5891 SET_HARD_REG_BIT (this_group->gr_reg_conditionally_set, REGNO (SET_DEST (set)));
5893 if (for_each_rtx (&real_pat, errata_find_address_regs, NULL))
5895 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5896 emit_insn_before (gen_nop (), insn);
5897 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5899 memset (last_group, 0, sizeof last_group);
5903 /* Emit extra nops if they are required to work around hardware errata. */
5910 if (! TARGET_B_STEP)
5914 memset (last_group, 0, sizeof last_group);
5916 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5921 if (ia64_safe_type (insn) == TYPE_S)
5924 memset (last_group + group_idx, 0, sizeof last_group[group_idx]);
5927 errata_emit_nops (insn);
5932 /* Instruction scheduling support. */
5934 #define NR_BUNDLES 10
5936 /* A list of names of all available bundles. */
5938 static const char *bundle_name [NR_BUNDLES] =
5944 #if NR_BUNDLES == 10
5954 /* Nonzero if we should insert stop bits into the schedule. */
5956 int ia64_final_schedule = 0;
5958 /* Codes of the corresponding quieryied units: */
5960 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
5961 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
5963 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
5964 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
5966 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
5968 /* The following variable value is an insn group barrier. */
5970 static rtx dfa_stop_insn;
5972 /* The following variable value is the last issued insn. */
5974 static rtx last_scheduled_insn;
5976 /* The following variable value is size of the DFA state. */
5978 static size_t dfa_state_size;
5980 /* The following variable value is pointer to a DFA state used as
5981 temporary variable. */
5983 static state_t temp_dfa_state = NULL;
5985 /* The following variable value is DFA state after issuing the last
5988 static state_t prev_cycle_state = NULL;
5990 /* The following array element values are TRUE if the corresponding
5991 insn requires to add stop bits before it. */
5993 static char *stops_p;
5995 /* The following variable is used to set up the mentioned above array. */
5997 static int stop_before_p = 0;
5999 /* The following variable value is length of the arrays `clocks' and
6002 static int clocks_length;
6004 /* The following array element values are cycles on which the
6005 corresponding insn will be issued. The array is used only for
6010 /* The following array element values are numbers of cycles should be
6011 added to improve insn scheduling for MM_insns for Itanium1. */
6013 static int *add_cycles;
6015 static rtx ia64_single_set (rtx);
6016 static void ia64_emit_insn_before (rtx, rtx);
6018 /* Map a bundle number to its pseudo-op. */
6021 get_bundle_name (int b)
6023 return bundle_name[b];
6027 /* Return the maximum number of instructions a cpu can issue. */
6030 ia64_issue_rate (void)
6035 /* Helper function - like single_set, but look inside COND_EXEC. */
6038 ia64_single_set (rtx insn)
6040 rtx x = PATTERN (insn), ret;
6041 if (GET_CODE (x) == COND_EXEC)
6042 x = COND_EXEC_CODE (x);
6043 if (GET_CODE (x) == SET)
6046 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
6047 Although they are not classical single set, the second set is there just
6048 to protect it from moving past FP-relative stack accesses. */
6049 switch (recog_memoized (insn))
6051 case CODE_FOR_prologue_allocate_stack:
6052 case CODE_FOR_epilogue_deallocate_stack:
6053 ret = XVECEXP (x, 0, 0);
6057 ret = single_set_2 (insn, x);
6064 /* Adjust the cost of a scheduling dependency. Return the new cost of
6065 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
6068 ia64_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
6070 enum attr_itanium_class dep_class;
6071 enum attr_itanium_class insn_class;
6073 if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT)
6076 insn_class = ia64_safe_itanium_class (insn);
6077 dep_class = ia64_safe_itanium_class (dep_insn);
6078 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
6079 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
6085 /* Like emit_insn_before, but skip cycle_display notes.
6086 ??? When cycle display notes are implemented, update this. */
6089 ia64_emit_insn_before (rtx insn, rtx before)
6091 emit_insn_before (insn, before);
6094 /* The following function marks insns who produce addresses for load
6095 and store insns. Such insns will be placed into M slots because it
6096 decrease latency time for Itanium1 (see function
6097 `ia64_produce_address_p' and the DFA descriptions). */
6100 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
6102 rtx insn, link, next, next_tail;
6104 next_tail = NEXT_INSN (tail);
6105 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6108 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6110 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
6112 for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
6114 next = XEXP (link, 0);
6115 if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_ST
6116 || ia64_safe_itanium_class (next) == ITANIUM_CLASS_STF)
6117 && ia64_st_address_bypass_p (insn, next))
6119 else if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_LD
6120 || ia64_safe_itanium_class (next)
6121 == ITANIUM_CLASS_FLD)
6122 && ia64_ld_address_bypass_p (insn, next))
6125 insn->call = link != 0;
6129 /* We're beginning a new block. Initialize data structures as necessary. */
6132 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
6133 int sched_verbose ATTRIBUTE_UNUSED,
6134 int max_ready ATTRIBUTE_UNUSED)
6136 #ifdef ENABLE_CHECKING
6139 if (reload_completed)
6140 for (insn = NEXT_INSN (current_sched_info->prev_head);
6141 insn != current_sched_info->next_tail;
6142 insn = NEXT_INSN (insn))
6143 if (SCHED_GROUP_P (insn))
6146 last_scheduled_insn = NULL_RTX;
6147 init_insn_group_barriers ();
6150 /* We are about to being issuing insns for this clock cycle.
6151 Override the default sort algorithm to better slot instructions. */
6154 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
6155 int *pn_ready, int clock_var ATTRIBUTE_UNUSED,
6159 int n_ready = *pn_ready;
6160 rtx *e_ready = ready + n_ready;
6164 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
6166 if (reorder_type == 0)
6168 /* First, move all USEs, CLOBBERs and other crud out of the way. */
6170 for (insnp = ready; insnp < e_ready; insnp++)
6171 if (insnp < e_ready)
6174 enum attr_type t = ia64_safe_type (insn);
6175 if (t == TYPE_UNKNOWN)
6177 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6178 || asm_noperands (PATTERN (insn)) >= 0)
6180 rtx lowest = ready[n_asms];
6181 ready[n_asms] = insn;
6187 rtx highest = ready[n_ready - 1];
6188 ready[n_ready - 1] = insn;
6195 if (n_asms < n_ready)
6197 /* Some normal insns to process. Skip the asms. */
6201 else if (n_ready > 0)
6205 if (ia64_final_schedule)
6208 int nr_need_stop = 0;
6210 for (insnp = ready; insnp < e_ready; insnp++)
6211 if (safe_group_barrier_needed_p (*insnp))
6214 if (reorder_type == 1 && n_ready == nr_need_stop)
6216 if (reorder_type == 0)
6219 /* Move down everything that needs a stop bit, preserving
6221 while (insnp-- > ready + deleted)
6222 while (insnp >= ready + deleted)
6225 if (! safe_group_barrier_needed_p (insn))
6227 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
6238 /* We are about to being issuing insns for this clock cycle. Override
6239 the default sort algorithm to better slot instructions. */
6242 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
6245 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
6246 pn_ready, clock_var, 0);
6249 /* Like ia64_sched_reorder, but called after issuing each insn.
6250 Override the default sort algorithm to better slot instructions. */
6253 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
6254 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
6255 int *pn_ready, int clock_var)
6257 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
6258 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
6259 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
6263 /* We are about to issue INSN. Return the number of insns left on the
6264 ready queue that can be issued this cycle. */
6267 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
6268 int sched_verbose ATTRIBUTE_UNUSED,
6269 rtx insn ATTRIBUTE_UNUSED,
6270 int can_issue_more ATTRIBUTE_UNUSED)
6272 last_scheduled_insn = insn;
6273 memcpy (prev_cycle_state, curr_state, dfa_state_size);
6274 if (reload_completed)
6276 if (group_barrier_needed_p (insn))
6278 if (GET_CODE (insn) == CALL_INSN)
6279 init_insn_group_barriers ();
6280 stops_p [INSN_UID (insn)] = stop_before_p;
6286 /* We are choosing insn from the ready queue. Return nonzero if INSN
6290 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
6292 if (insn == NULL_RTX || !INSN_P (insn))
6294 return (!reload_completed
6295 || !safe_group_barrier_needed_p (insn));
6298 /* The following variable value is pseudo-insn used by the DFA insn
6299 scheduler to change the DFA state when the simulated clock is
6302 static rtx dfa_pre_cycle_insn;
6304 /* We are about to being issuing INSN. Return nonzero if we can not
6305 issue it on given cycle CLOCK and return zero if we should not sort
6306 the ready queue on the next clock start. */
6309 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
6310 int clock, int *sort_p)
6312 int setup_clocks_p = FALSE;
6314 if (insn == NULL_RTX || !INSN_P (insn))
6316 if ((reload_completed && safe_group_barrier_needed_p (insn))
6317 || (last_scheduled_insn
6318 && (GET_CODE (last_scheduled_insn) == CALL_INSN
6319 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6320 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
6322 init_insn_group_barriers ();
6323 if (verbose && dump)
6324 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
6325 last_clock == clock ? " + cycle advance" : "");
6327 if (last_clock == clock)
6329 state_transition (curr_state, dfa_stop_insn);
6330 if (TARGET_EARLY_STOP_BITS)
6331 *sort_p = (last_scheduled_insn == NULL_RTX
6332 || GET_CODE (last_scheduled_insn) != CALL_INSN);
6337 else if (reload_completed)
6338 setup_clocks_p = TRUE;
6339 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6340 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
6341 state_reset (curr_state);
6344 memcpy (curr_state, prev_cycle_state, dfa_state_size);
6345 state_transition (curr_state, dfa_stop_insn);
6346 state_transition (curr_state, dfa_pre_cycle_insn);
6347 state_transition (curr_state, NULL);
6350 else if (reload_completed)
6351 setup_clocks_p = TRUE;
6352 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM
6353 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6354 && asm_noperands (PATTERN (insn)) < 0)
6356 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
6358 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
6363 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
6364 if (REG_NOTE_KIND (link) == 0)
6366 enum attr_itanium_class dep_class;
6367 rtx dep_insn = XEXP (link, 0);
6369 dep_class = ia64_safe_itanium_class (dep_insn);
6370 if ((dep_class == ITANIUM_CLASS_MMMUL
6371 || dep_class == ITANIUM_CLASS_MMSHF)
6372 && last_clock - clocks [INSN_UID (dep_insn)] < 4
6374 || last_clock - clocks [INSN_UID (dep_insn)] < d))
6375 d = last_clock - clocks [INSN_UID (dep_insn)];
6378 add_cycles [INSN_UID (insn)] = 3 - d;
6386 /* The following page contains abstract data `bundle states' which are
6387 used for bundling insns (inserting nops and template generation). */
6389 /* The following describes state of insn bundling. */
6393 /* Unique bundle state number to identify them in the debugging
6396 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
6397 /* number nops before and after the insn */
6398 short before_nops_num, after_nops_num;
6399 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
6401 int cost; /* cost of the state in cycles */
6402 int accumulated_insns_num; /* number of all previous insns including
6403 nops. L is considered as 2 insns */
6404 int branch_deviation; /* deviation of previous branches from 3rd slots */
6405 struct bundle_state *next; /* next state with the same insn_num */
6406 struct bundle_state *originator; /* originator (previous insn state) */
6407 /* All bundle states are in the following chain. */
6408 struct bundle_state *allocated_states_chain;
6409 /* The DFA State after issuing the insn and the nops. */
6413 /* The following is map insn number to the corresponding bundle state. */
6415 static struct bundle_state **index_to_bundle_states;
6417 /* The unique number of next bundle state. */
6419 static int bundle_states_num;
6421 /* All allocated bundle states are in the following chain. */
6423 static struct bundle_state *allocated_bundle_states_chain;
6425 /* All allocated but not used bundle states are in the following
6428 static struct bundle_state *free_bundle_state_chain;
6431 /* The following function returns a free bundle state. */
6433 static struct bundle_state *
6434 get_free_bundle_state (void)
6436 struct bundle_state *result;
6438 if (free_bundle_state_chain != NULL)
6440 result = free_bundle_state_chain;
6441 free_bundle_state_chain = result->next;
6445 result = xmalloc (sizeof (struct bundle_state));
6446 result->dfa_state = xmalloc (dfa_state_size);
6447 result->allocated_states_chain = allocated_bundle_states_chain;
6448 allocated_bundle_states_chain = result;
6450 result->unique_num = bundle_states_num++;
6455 /* The following function frees given bundle state. */
6458 free_bundle_state (struct bundle_state *state)
6460 state->next = free_bundle_state_chain;
6461 free_bundle_state_chain = state;
6464 /* Start work with abstract data `bundle states'. */
6467 initiate_bundle_states (void)
6469 bundle_states_num = 0;
6470 free_bundle_state_chain = NULL;
6471 allocated_bundle_states_chain = NULL;
6474 /* Finish work with abstract data `bundle states'. */
6477 finish_bundle_states (void)
6479 struct bundle_state *curr_state, *next_state;
6481 for (curr_state = allocated_bundle_states_chain;
6483 curr_state = next_state)
6485 next_state = curr_state->allocated_states_chain;
6486 free (curr_state->dfa_state);
6491 /* Hash table of the bundle states. The key is dfa_state and insn_num
6492 of the bundle states. */
6494 static htab_t bundle_state_table;
6496 /* The function returns hash of BUNDLE_STATE. */
6499 bundle_state_hash (const void *bundle_state)
6501 const struct bundle_state *state = (struct bundle_state *) bundle_state;
6504 for (result = i = 0; i < dfa_state_size; i++)
6505 result += (((unsigned char *) state->dfa_state) [i]
6506 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
6507 return result + state->insn_num;
6510 /* The function returns nonzero if the bundle state keys are equal. */
6513 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
6515 const struct bundle_state * state1 = (struct bundle_state *) bundle_state_1;
6516 const struct bundle_state * state2 = (struct bundle_state *) bundle_state_2;
6518 return (state1->insn_num == state2->insn_num
6519 && memcmp (state1->dfa_state, state2->dfa_state,
6520 dfa_state_size) == 0);
6523 /* The function inserts the BUNDLE_STATE into the hash table. The
6524 function returns nonzero if the bundle has been inserted into the
6525 table. The table contains the best bundle state with given key. */
6528 insert_bundle_state (struct bundle_state *bundle_state)
6532 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
6533 if (*entry_ptr == NULL)
6535 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
6536 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
6537 *entry_ptr = (void *) bundle_state;
6540 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
6541 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
6542 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
6543 > bundle_state->accumulated_insns_num
6544 || (((struct bundle_state *)
6545 *entry_ptr)->accumulated_insns_num
6546 == bundle_state->accumulated_insns_num
6547 && ((struct bundle_state *)
6548 *entry_ptr)->branch_deviation
6549 > bundle_state->branch_deviation))))
6552 struct bundle_state temp;
6554 temp = *(struct bundle_state *) *entry_ptr;
6555 *(struct bundle_state *) *entry_ptr = *bundle_state;
6556 ((struct bundle_state *) *entry_ptr)->next = temp.next;
6557 *bundle_state = temp;
6562 /* Start work with the hash table. */
6565 initiate_bundle_state_table (void)
6567 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
6571 /* Finish work with the hash table. */
6574 finish_bundle_state_table (void)
6576 htab_delete (bundle_state_table);
6581 /* The following variable is a insn `nop' used to check bundle states
6582 with different number of inserted nops. */
6584 static rtx ia64_nop;
6586 /* The following function tries to issue NOPS_NUM nops for the current
6587 state without advancing processor cycle. If it failed, the
6588 function returns FALSE and frees the current state. */
6591 try_issue_nops (struct bundle_state *curr_state, int nops_num)
6595 for (i = 0; i < nops_num; i++)
6596 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
6598 free_bundle_state (curr_state);
6604 /* The following function tries to issue INSN for the current
6605 state without advancing processor cycle. If it failed, the
6606 function returns FALSE and frees the current state. */
6609 try_issue_insn (struct bundle_state *curr_state, rtx insn)
6611 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
6613 free_bundle_state (curr_state);
6619 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
6620 starting with ORIGINATOR without advancing processor cycle. If
6621 TRY_BUNDLE_END_P is TRUE, the function also/only (if
6622 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
6623 If it was successful, the function creates new bundle state and
6624 insert into the hash table and into `index_to_bundle_states'. */
6627 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
6628 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
6630 struct bundle_state *curr_state;
6632 curr_state = get_free_bundle_state ();
6633 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
6634 curr_state->insn = insn;
6635 curr_state->insn_num = originator->insn_num + 1;
6636 curr_state->cost = originator->cost;
6637 curr_state->originator = originator;
6638 curr_state->before_nops_num = before_nops_num;
6639 curr_state->after_nops_num = 0;
6640 curr_state->accumulated_insns_num
6641 = originator->accumulated_insns_num + before_nops_num;
6642 curr_state->branch_deviation = originator->branch_deviation;
6643 if (insn == NULL_RTX)
6645 else if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
6647 if (GET_MODE (insn) == TImode)
6649 if (!try_issue_nops (curr_state, before_nops_num))
6651 if (!try_issue_insn (curr_state, insn))
6653 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
6654 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
6655 && curr_state->accumulated_insns_num % 3 != 0)
6657 free_bundle_state (curr_state);
6661 else if (GET_MODE (insn) != TImode)
6663 if (!try_issue_nops (curr_state, before_nops_num))
6665 if (!try_issue_insn (curr_state, insn))
6667 curr_state->accumulated_insns_num++;
6668 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6669 || asm_noperands (PATTERN (insn)) >= 0)
6671 if (ia64_safe_type (insn) == TYPE_L)
6672 curr_state->accumulated_insns_num++;
6676 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
6677 state_transition (curr_state->dfa_state, NULL);
6679 if (!try_issue_nops (curr_state, before_nops_num))
6681 if (!try_issue_insn (curr_state, insn))
6683 curr_state->accumulated_insns_num++;
6684 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6685 || asm_noperands (PATTERN (insn)) >= 0)
6687 /* Finish bundle containing asm insn. */
6688 curr_state->after_nops_num
6689 = 3 - curr_state->accumulated_insns_num % 3;
6690 curr_state->accumulated_insns_num
6691 += 3 - curr_state->accumulated_insns_num % 3;
6693 else if (ia64_safe_type (insn) == TYPE_L)
6694 curr_state->accumulated_insns_num++;
6696 if (ia64_safe_type (insn) == TYPE_B)
6697 curr_state->branch_deviation
6698 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
6699 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
6701 if (!only_bundle_end_p && insert_bundle_state (curr_state))
6704 struct bundle_state *curr_state1;
6705 struct bundle_state *allocated_states_chain;
6707 curr_state1 = get_free_bundle_state ();
6708 dfa_state = curr_state1->dfa_state;
6709 allocated_states_chain = curr_state1->allocated_states_chain;
6710 *curr_state1 = *curr_state;
6711 curr_state1->dfa_state = dfa_state;
6712 curr_state1->allocated_states_chain = allocated_states_chain;
6713 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
6715 curr_state = curr_state1;
6717 if (!try_issue_nops (curr_state,
6718 3 - curr_state->accumulated_insns_num % 3))
6720 curr_state->after_nops_num
6721 = 3 - curr_state->accumulated_insns_num % 3;
6722 curr_state->accumulated_insns_num
6723 += 3 - curr_state->accumulated_insns_num % 3;
6725 if (!insert_bundle_state (curr_state))
6726 free_bundle_state (curr_state);
6730 /* The following function returns position in the two window bundle
6734 get_max_pos (state_t state)
6736 if (cpu_unit_reservation_p (state, pos_6))
6738 else if (cpu_unit_reservation_p (state, pos_5))
6740 else if (cpu_unit_reservation_p (state, pos_4))
6742 else if (cpu_unit_reservation_p (state, pos_3))
6744 else if (cpu_unit_reservation_p (state, pos_2))
6746 else if (cpu_unit_reservation_p (state, pos_1))
6752 /* The function returns code of a possible template for given position
6753 and state. The function should be called only with 2 values of
6754 position equal to 3 or 6. */
6757 get_template (state_t state, int pos)
6762 if (cpu_unit_reservation_p (state, _0mii_))
6764 else if (cpu_unit_reservation_p (state, _0mmi_))
6766 else if (cpu_unit_reservation_p (state, _0mfi_))
6768 else if (cpu_unit_reservation_p (state, _0mmf_))
6770 else if (cpu_unit_reservation_p (state, _0bbb_))
6772 else if (cpu_unit_reservation_p (state, _0mbb_))
6774 else if (cpu_unit_reservation_p (state, _0mib_))
6776 else if (cpu_unit_reservation_p (state, _0mmb_))
6778 else if (cpu_unit_reservation_p (state, _0mfb_))
6780 else if (cpu_unit_reservation_p (state, _0mlx_))
6785 if (cpu_unit_reservation_p (state, _1mii_))
6787 else if (cpu_unit_reservation_p (state, _1mmi_))
6789 else if (cpu_unit_reservation_p (state, _1mfi_))
6791 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
6793 else if (cpu_unit_reservation_p (state, _1bbb_))
6795 else if (cpu_unit_reservation_p (state, _1mbb_))
6797 else if (cpu_unit_reservation_p (state, _1mib_))
6799 else if (cpu_unit_reservation_p (state, _1mmb_))
6801 else if (cpu_unit_reservation_p (state, _1mfb_))
6803 else if (cpu_unit_reservation_p (state, _1mlx_))
6812 /* The following function returns an insn important for insn bundling
6813 followed by INSN and before TAIL. */
6816 get_next_important_insn (rtx insn, rtx tail)
6818 for (; insn && insn != tail; insn = NEXT_INSN (insn))
6820 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6821 && GET_CODE (PATTERN (insn)) != USE
6822 && GET_CODE (PATTERN (insn)) != CLOBBER)
6827 /* The following function does insn bundling. Bundling means
6828 inserting templates and nop insns to fit insn groups into permitted
6829 templates. Instruction scheduling uses NDFA (non-deterministic
6830 finite automata) encoding informations about the templates and the
6831 inserted nops. Nondeterminism of the automata permits follows
6832 all possible insn sequences very fast.
6834 Unfortunately it is not possible to get information about inserting
6835 nop insns and used templates from the automata states. The
6836 automata only says that we can issue an insn possibly inserting
6837 some nops before it and using some template. Therefore insn
6838 bundling in this function is implemented by using DFA
6839 (deterministic finite automata). We follows all possible insn
6840 sequences by inserting 0-2 nops (that is what the NDFA describe for
6841 insn scheduling) before/after each insn being bundled. We know the
6842 start of simulated processor cycle from insn scheduling (insn
6843 starting a new cycle has TImode).
6845 Simple implementation of insn bundling would create enormous
6846 number of possible insn sequences satisfying information about new
6847 cycle ticks taken from the insn scheduling. To make the algorithm
6848 practical we use dynamic programming. Each decision (about
6849 inserting nops and implicitly about previous decisions) is described
6850 by structure bundle_state (see above). If we generate the same
6851 bundle state (key is automaton state after issuing the insns and
6852 nops for it), we reuse already generated one. As consequence we
6853 reject some decisions which can not improve the solution and
6854 reduce memory for the algorithm.
6856 When we reach the end of EBB (extended basic block), we choose the
6857 best sequence and then, moving back in EBB, insert templates for
6858 the best alternative. The templates are taken from querying
6859 automaton state for each insn in chosen bundle states.
6861 So the algorithm makes two (forward and backward) passes through
6862 EBB. There is an additional forward pass through EBB for Itanium1
6863 processor. This pass inserts more nops to make dependency between
6864 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
6867 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
6869 struct bundle_state *curr_state, *next_state, *best_state;
6870 rtx insn, next_insn;
6872 int i, bundle_end_p, only_bundle_end_p, asm_p;
6873 int pos = 0, max_pos, template0, template1;
6876 enum attr_type type;
6879 /* Count insns in the EBB. */
6880 for (insn = NEXT_INSN (prev_head_insn);
6881 insn && insn != tail;
6882 insn = NEXT_INSN (insn))
6888 dfa_clean_insn_cache ();
6889 initiate_bundle_state_table ();
6890 index_to_bundle_states = xmalloc ((insn_num + 2)
6891 * sizeof (struct bundle_state *));
6892 /* First (forward) pass -- generation of bundle states. */
6893 curr_state = get_free_bundle_state ();
6894 curr_state->insn = NULL;
6895 curr_state->before_nops_num = 0;
6896 curr_state->after_nops_num = 0;
6897 curr_state->insn_num = 0;
6898 curr_state->cost = 0;
6899 curr_state->accumulated_insns_num = 0;
6900 curr_state->branch_deviation = 0;
6901 curr_state->next = NULL;
6902 curr_state->originator = NULL;
6903 state_reset (curr_state->dfa_state);
6904 index_to_bundle_states [0] = curr_state;
6906 /* Shift cycle mark if it is put on insn which could be ignored. */
6907 for (insn = NEXT_INSN (prev_head_insn);
6909 insn = NEXT_INSN (insn))
6911 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6912 || GET_CODE (PATTERN (insn)) == USE
6913 || GET_CODE (PATTERN (insn)) == CLOBBER)
6914 && GET_MODE (insn) == TImode)
6916 PUT_MODE (insn, VOIDmode);
6917 for (next_insn = NEXT_INSN (insn);
6919 next_insn = NEXT_INSN (next_insn))
6920 if (INSN_P (next_insn)
6921 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
6922 && GET_CODE (PATTERN (next_insn)) != USE
6923 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
6925 PUT_MODE (next_insn, TImode);
6929 /* Froward pass: generation of bundle states. */
6930 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
6935 || ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6936 || GET_CODE (PATTERN (insn)) == USE
6937 || GET_CODE (PATTERN (insn)) == CLOBBER)
6939 type = ia64_safe_type (insn);
6940 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
6942 index_to_bundle_states [insn_num] = NULL;
6943 for (curr_state = index_to_bundle_states [insn_num - 1];
6945 curr_state = next_state)
6947 pos = curr_state->accumulated_insns_num % 3;
6948 next_state = curr_state->next;
6949 /* We must fill up the current bundle in order to start a
6950 subsequent asm insn in a new bundle. Asm insn is always
6951 placed in a separate bundle. */
6953 = (next_insn != NULL_RTX
6954 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
6955 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
6956 /* We may fill up the current bundle if it is the cycle end
6957 without a group barrier. */
6959 = (only_bundle_end_p || next_insn == NULL_RTX
6960 || (GET_MODE (next_insn) == TImode
6961 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
6962 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
6964 /* We need to insert 2 nops for cases like M_MII. To
6965 guarantee issuing all insns on the same cycle for
6966 Itanium 1, we need to issue 2 nops after the first M
6967 insn (MnnMII where n is a nop insn). */
6968 || ((type == TYPE_M || type == TYPE_A)
6969 && ia64_tune == PROCESSOR_ITANIUM
6970 && !bundle_end_p && pos == 1))
6971 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
6973 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
6975 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
6978 if (index_to_bundle_states [insn_num] == NULL)
6980 for (curr_state = index_to_bundle_states [insn_num];
6982 curr_state = curr_state->next)
6983 if (verbose >= 2 && dump)
6985 /* This structure is taken from generated code of the
6986 pipeline hazard recognizer (see file insn-attrtab.c).
6987 Please don't forget to change the structure if a new
6988 automaton is added to .md file. */
6991 unsigned short one_automaton_state;
6992 unsigned short oneb_automaton_state;
6993 unsigned short two_automaton_state;
6994 unsigned short twob_automaton_state;
6999 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
7000 curr_state->unique_num,
7001 (curr_state->originator == NULL
7002 ? -1 : curr_state->originator->unique_num),
7004 curr_state->before_nops_num, curr_state->after_nops_num,
7005 curr_state->accumulated_insns_num, curr_state->branch_deviation,
7006 (ia64_tune == PROCESSOR_ITANIUM
7007 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
7008 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
7012 if (index_to_bundle_states [insn_num] == NULL)
7013 /* We should find a solution because the 2nd insn scheduling has
7016 /* Find a state corresponding to the best insn sequence. */
7018 for (curr_state = index_to_bundle_states [insn_num];
7020 curr_state = curr_state->next)
7021 /* We are just looking at the states with fully filled up last
7022 bundle. The first we prefer insn sequences with minimal cost
7023 then with minimal inserted nops and finally with branch insns
7024 placed in the 3rd slots. */
7025 if (curr_state->accumulated_insns_num % 3 == 0
7026 && (best_state == NULL || best_state->cost > curr_state->cost
7027 || (best_state->cost == curr_state->cost
7028 && (curr_state->accumulated_insns_num
7029 < best_state->accumulated_insns_num
7030 || (curr_state->accumulated_insns_num
7031 == best_state->accumulated_insns_num
7032 && curr_state->branch_deviation
7033 < best_state->branch_deviation)))))
7034 best_state = curr_state;
7035 /* Second (backward) pass: adding nops and templates. */
7036 insn_num = best_state->before_nops_num;
7037 template0 = template1 = -1;
7038 for (curr_state = best_state;
7039 curr_state->originator != NULL;
7040 curr_state = curr_state->originator)
7042 insn = curr_state->insn;
7043 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
7044 || asm_noperands (PATTERN (insn)) >= 0);
7046 if (verbose >= 2 && dump)
7050 unsigned short one_automaton_state;
7051 unsigned short oneb_automaton_state;
7052 unsigned short two_automaton_state;
7053 unsigned short twob_automaton_state;
7058 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
7059 curr_state->unique_num,
7060 (curr_state->originator == NULL
7061 ? -1 : curr_state->originator->unique_num),
7063 curr_state->before_nops_num, curr_state->after_nops_num,
7064 curr_state->accumulated_insns_num, curr_state->branch_deviation,
7065 (ia64_tune == PROCESSOR_ITANIUM
7066 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
7067 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
7070 /* Find the position in the current bundle window. The window can
7071 contain at most two bundles. Two bundle window means that
7072 the processor will make two bundle rotation. */
7073 max_pos = get_max_pos (curr_state->dfa_state);
7075 /* The following (negative template number) means that the
7076 processor did one bundle rotation. */
7077 || (max_pos == 3 && template0 < 0))
7079 /* We are at the end of the window -- find template(s) for
7083 template0 = get_template (curr_state->dfa_state, 3);
7086 template1 = get_template (curr_state->dfa_state, 3);
7087 template0 = get_template (curr_state->dfa_state, 6);
7090 if (max_pos > 3 && template1 < 0)
7091 /* It may happen when we have the stop inside a bundle. */
7095 template1 = get_template (curr_state->dfa_state, 3);
7099 /* Emit nops after the current insn. */
7100 for (i = 0; i < curr_state->after_nops_num; i++)
7103 emit_insn_after (nop, insn);
7109 /* We are at the start of a bundle: emit the template
7110 (it should be defined). */
7113 b = gen_bundle_selector (GEN_INT (template0));
7114 ia64_emit_insn_before (b, nop);
7115 /* If we have two bundle window, we make one bundle
7116 rotation. Otherwise template0 will be undefined
7117 (negative value). */
7118 template0 = template1;
7122 /* Move the position backward in the window. Group barrier has
7123 no slot. Asm insn takes all bundle. */
7124 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
7125 && GET_CODE (PATTERN (insn)) != ASM_INPUT
7126 && asm_noperands (PATTERN (insn)) < 0)
7128 /* Long insn takes 2 slots. */
7129 if (ia64_safe_type (insn) == TYPE_L)
7134 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
7135 && GET_CODE (PATTERN (insn)) != ASM_INPUT
7136 && asm_noperands (PATTERN (insn)) < 0)
7138 /* The current insn is at the bundle start: emit the
7142 b = gen_bundle_selector (GEN_INT (template0));
7143 ia64_emit_insn_before (b, insn);
7144 b = PREV_INSN (insn);
7146 /* See comment above in analogous place for emitting nops
7148 template0 = template1;
7151 /* Emit nops after the current insn. */
7152 for (i = 0; i < curr_state->before_nops_num; i++)
7155 ia64_emit_insn_before (nop, insn);
7156 nop = PREV_INSN (insn);
7163 /* See comment above in analogous place for emitting nops
7167 b = gen_bundle_selector (GEN_INT (template0));
7168 ia64_emit_insn_before (b, insn);
7169 b = PREV_INSN (insn);
7171 template0 = template1;
7176 if (ia64_tune == PROCESSOR_ITANIUM)
7177 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
7178 Itanium1 has a strange design, if the distance between an insn
7179 and dependent MM-insn is less 4 then we have a 6 additional
7180 cycles stall. So we make the distance equal to 4 cycles if it
7182 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
7187 || ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
7188 || GET_CODE (PATTERN (insn)) == USE
7189 || GET_CODE (PATTERN (insn)) == CLOBBER)
7191 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
7192 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
7193 /* We found a MM-insn which needs additional cycles. */
7199 /* Now we are searching for a template of the bundle in
7200 which the MM-insn is placed and the position of the
7201 insn in the bundle (0, 1, 2). Also we are searching
7202 for that there is a stop before the insn. */
7203 last = prev_active_insn (insn);
7204 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
7206 last = prev_active_insn (last);
7208 for (;; last = prev_active_insn (last))
7209 if (recog_memoized (last) == CODE_FOR_bundle_selector)
7211 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
7213 /* The insn is in MLX bundle. Change the template
7214 onto MFI because we will add nops before the
7215 insn. It simplifies subsequent code a lot. */
7217 = gen_bundle_selector (const2_rtx); /* -> MFI */
7220 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier
7221 && (ia64_safe_itanium_class (last)
7222 != ITANIUM_CLASS_IGNORE))
7224 /* Some check of correctness: the stop is not at the
7225 bundle start, there are no more 3 insns in the bundle,
7226 and the MM-insn is not at the start of bundle with
7228 if ((pred_stop_p && n == 0) || n > 2
7229 || (template0 == 9 && n != 0))
7231 /* Put nops after the insn in the bundle. */
7232 for (j = 3 - n; j > 0; j --)
7233 ia64_emit_insn_before (gen_nop (), insn);
7234 /* It takes into account that we will add more N nops
7235 before the insn lately -- please see code below. */
7236 add_cycles [INSN_UID (insn)]--;
7237 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
7238 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7241 add_cycles [INSN_UID (insn)]--;
7242 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
7244 /* Insert "MII;" template. */
7245 ia64_emit_insn_before (gen_bundle_selector (const0_rtx),
7247 ia64_emit_insn_before (gen_nop (), insn);
7248 ia64_emit_insn_before (gen_nop (), insn);
7251 /* To decrease code size, we use "MI;I;"
7253 ia64_emit_insn_before
7254 (gen_insn_group_barrier (GEN_INT (3)), insn);
7257 ia64_emit_insn_before (gen_nop (), insn);
7258 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7261 /* Put the MM-insn in the same slot of a bundle with the
7262 same template as the original one. */
7263 ia64_emit_insn_before (gen_bundle_selector (GEN_INT (template0)),
7265 /* To put the insn in the same slot, add necessary number
7267 for (j = n; j > 0; j --)
7268 ia64_emit_insn_before (gen_nop (), insn);
7269 /* Put the stop if the original bundle had it. */
7271 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7275 free (index_to_bundle_states);
7276 finish_bundle_state_table ();
7278 dfa_clean_insn_cache ();
7281 /* The following function is called at the end of scheduling BB or
7282 EBB. After reload, it inserts stop bits and does insn bundling. */
7285 ia64_sched_finish (FILE *dump, int sched_verbose)
7288 fprintf (dump, "// Finishing schedule.\n");
7289 if (!reload_completed)
7291 if (reload_completed)
7293 final_emit_insn_group_barriers (dump);
7294 bundling (dump, sched_verbose, current_sched_info->prev_head,
7295 current_sched_info->next_tail);
7296 if (sched_verbose && dump)
7297 fprintf (dump, "// finishing %d-%d\n",
7298 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
7299 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
7305 /* The following function inserts stop bits in scheduled BB or EBB. */
7308 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
7311 int need_barrier_p = 0;
7312 rtx prev_insn = NULL_RTX;
7314 init_insn_group_barriers ();
7316 for (insn = NEXT_INSN (current_sched_info->prev_head);
7317 insn != current_sched_info->next_tail;
7318 insn = NEXT_INSN (insn))
7320 if (GET_CODE (insn) == BARRIER)
7322 rtx last = prev_active_insn (insn);
7326 if (GET_CODE (last) == JUMP_INSN
7327 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
7328 last = prev_active_insn (last);
7329 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
7330 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
7332 init_insn_group_barriers ();
7334 prev_insn = NULL_RTX;
7336 else if (INSN_P (insn))
7338 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
7340 init_insn_group_barriers ();
7342 prev_insn = NULL_RTX;
7344 else if (need_barrier_p || group_barrier_needed_p (insn))
7346 if (TARGET_EARLY_STOP_BITS)
7351 last != current_sched_info->prev_head;
7352 last = PREV_INSN (last))
7353 if (INSN_P (last) && GET_MODE (last) == TImode
7354 && stops_p [INSN_UID (last)])
7356 if (last == current_sched_info->prev_head)
7358 last = prev_active_insn (last);
7360 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
7361 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
7363 init_insn_group_barriers ();
7364 for (last = NEXT_INSN (last);
7366 last = NEXT_INSN (last))
7368 group_barrier_needed_p (last);
7372 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7374 init_insn_group_barriers ();
7376 group_barrier_needed_p (insn);
7377 prev_insn = NULL_RTX;
7379 else if (recog_memoized (insn) >= 0)
7381 need_barrier_p = (GET_CODE (insn) == CALL_INSN
7382 || GET_CODE (PATTERN (insn)) == ASM_INPUT
7383 || asm_noperands (PATTERN (insn)) >= 0);
7390 /* If the following function returns TRUE, we will use the the DFA
7394 ia64_first_cycle_multipass_dfa_lookahead (void)
7396 return (reload_completed ? 6 : 4);
7399 /* The following function initiates variable `dfa_pre_cycle_insn'. */
7402 ia64_init_dfa_pre_cycle_insn (void)
7404 if (temp_dfa_state == NULL)
7406 dfa_state_size = state_size ();
7407 temp_dfa_state = xmalloc (dfa_state_size);
7408 prev_cycle_state = xmalloc (dfa_state_size);
7410 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
7411 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
7412 recog_memoized (dfa_pre_cycle_insn);
7413 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
7414 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
7415 recog_memoized (dfa_stop_insn);
7418 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
7419 used by the DFA insn scheduler. */
7422 ia64_dfa_pre_cycle_insn (void)
7424 return dfa_pre_cycle_insn;
7427 /* The following function returns TRUE if PRODUCER (of type ilog or
7428 ld) produces address for CONSUMER (of type st or stf). */
7431 ia64_st_address_bypass_p (rtx producer, rtx consumer)
7435 if (producer == NULL_RTX || consumer == NULL_RTX)
7437 dest = ia64_single_set (producer);
7438 if (dest == NULL_RTX || (reg = SET_DEST (dest)) == NULL_RTX
7439 || (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG))
7441 if (GET_CODE (reg) == SUBREG)
7442 reg = SUBREG_REG (reg);
7443 dest = ia64_single_set (consumer);
7444 if (dest == NULL_RTX || (mem = SET_DEST (dest)) == NULL_RTX
7445 || GET_CODE (mem) != MEM)
7447 return reg_mentioned_p (reg, mem);
7450 /* The following function returns TRUE if PRODUCER (of type ilog or
7451 ld) produces address for CONSUMER (of type ld or fld). */
7454 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
7456 rtx dest, src, reg, mem;
7458 if (producer == NULL_RTX || consumer == NULL_RTX)
7460 dest = ia64_single_set (producer);
7461 if (dest == NULL_RTX || (reg = SET_DEST (dest)) == NULL_RTX
7462 || (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG))
7464 if (GET_CODE (reg) == SUBREG)
7465 reg = SUBREG_REG (reg);
7466 src = ia64_single_set (consumer);
7467 if (src == NULL_RTX || (mem = SET_SRC (src)) == NULL_RTX)
7469 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
7470 mem = XVECEXP (mem, 0, 0);
7471 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
7472 mem = XEXP (mem, 0);
7474 /* Note that LO_SUM is used for GOT loads. */
7475 if (GET_CODE (mem) != LO_SUM && GET_CODE (mem) != MEM)
7478 return reg_mentioned_p (reg, mem);
7481 /* The following function returns TRUE if INSN produces address for a
7482 load/store insn. We will place such insns into M slot because it
7483 decreases its latency time. */
7486 ia64_produce_address_p (rtx insn)
7492 /* Emit pseudo-ops for the assembler to describe predicate relations.
7493 At present this assumes that we only consider predicate pairs to
7494 be mutex, and that the assembler can deduce proper values from
7495 straight-line code. */
7498 emit_predicate_relation_info (void)
7502 FOR_EACH_BB_REVERSE (bb)
7505 rtx head = BB_HEAD (bb);
7507 /* We only need such notes at code labels. */
7508 if (GET_CODE (head) != CODE_LABEL)
7510 if (GET_CODE (NEXT_INSN (head)) == NOTE
7511 && NOTE_LINE_NUMBER (NEXT_INSN (head)) == NOTE_INSN_BASIC_BLOCK)
7512 head = NEXT_INSN (head);
7514 for (r = PR_REG (0); r < PR_REG (64); r += 2)
7515 if (REGNO_REG_SET_P (bb->global_live_at_start, r))
7517 rtx p = gen_rtx_REG (BImode, r);
7518 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
7519 if (head == BB_END (bb))
7525 /* Look for conditional calls that do not return, and protect predicate
7526 relations around them. Otherwise the assembler will assume the call
7527 returns, and complain about uses of call-clobbered predicates after
7529 FOR_EACH_BB_REVERSE (bb)
7531 rtx insn = BB_HEAD (bb);
7535 if (GET_CODE (insn) == CALL_INSN
7536 && GET_CODE (PATTERN (insn)) == COND_EXEC
7537 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
7539 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
7540 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
7541 if (BB_HEAD (bb) == insn)
7543 if (BB_END (bb) == insn)
7547 if (insn == BB_END (bb))
7549 insn = NEXT_INSN (insn);
7554 /* Perform machine dependent operations on the rtl chain INSNS. */
7559 /* We are freeing block_for_insn in the toplev to keep compatibility
7560 with old MDEP_REORGS that are not CFG based. Recompute it now. */
7561 compute_bb_for_insn ();
7563 /* If optimizing, we'll have split before scheduling. */
7565 split_all_insns (0);
7567 /* ??? update_life_info_in_dirty_blocks fails to terminate during
7568 non-optimizing bootstrap. */
7569 update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES);
7571 if (ia64_flag_schedule_insns2)
7573 timevar_push (TV_SCHED2);
7574 ia64_final_schedule = 1;
7576 initiate_bundle_states ();
7577 ia64_nop = make_insn_raw (gen_nop ());
7578 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
7579 recog_memoized (ia64_nop);
7580 clocks_length = get_max_uid () + 1;
7581 stops_p = xcalloc (1, clocks_length);
7582 if (ia64_tune == PROCESSOR_ITANIUM)
7584 clocks = xcalloc (clocks_length, sizeof (int));
7585 add_cycles = xcalloc (clocks_length, sizeof (int));
7587 if (ia64_tune == PROCESSOR_ITANIUM2)
7589 pos_1 = get_cpu_unit_code ("2_1");
7590 pos_2 = get_cpu_unit_code ("2_2");
7591 pos_3 = get_cpu_unit_code ("2_3");
7592 pos_4 = get_cpu_unit_code ("2_4");
7593 pos_5 = get_cpu_unit_code ("2_5");
7594 pos_6 = get_cpu_unit_code ("2_6");
7595 _0mii_ = get_cpu_unit_code ("2b_0mii.");
7596 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
7597 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
7598 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
7599 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
7600 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
7601 _0mib_ = get_cpu_unit_code ("2b_0mib.");
7602 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
7603 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
7604 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
7605 _1mii_ = get_cpu_unit_code ("2b_1mii.");
7606 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
7607 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
7608 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
7609 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
7610 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
7611 _1mib_ = get_cpu_unit_code ("2b_1mib.");
7612 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
7613 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
7614 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
7618 pos_1 = get_cpu_unit_code ("1_1");
7619 pos_2 = get_cpu_unit_code ("1_2");
7620 pos_3 = get_cpu_unit_code ("1_3");
7621 pos_4 = get_cpu_unit_code ("1_4");
7622 pos_5 = get_cpu_unit_code ("1_5");
7623 pos_6 = get_cpu_unit_code ("1_6");
7624 _0mii_ = get_cpu_unit_code ("1b_0mii.");
7625 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
7626 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
7627 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
7628 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
7629 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
7630 _0mib_ = get_cpu_unit_code ("1b_0mib.");
7631 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
7632 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
7633 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
7634 _1mii_ = get_cpu_unit_code ("1b_1mii.");
7635 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
7636 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
7637 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
7638 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
7639 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
7640 _1mib_ = get_cpu_unit_code ("1b_1mib.");
7641 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
7642 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
7643 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
7645 schedule_ebbs (dump_file);
7646 finish_bundle_states ();
7647 if (ia64_tune == PROCESSOR_ITANIUM)
7653 emit_insn_group_barriers (dump_file);
7655 ia64_final_schedule = 0;
7656 timevar_pop (TV_SCHED2);
7659 emit_all_insn_group_barriers (dump_file);
7661 /* A call must not be the last instruction in a function, so that the
7662 return address is still within the function, so that unwinding works
7663 properly. Note that IA-64 differs from dwarf2 on this point. */
7664 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7669 insn = get_last_insn ();
7670 if (! INSN_P (insn))
7671 insn = prev_active_insn (insn);
7672 /* Skip over insns that expand to nothing. */
7673 while (GET_CODE (insn) == INSN && get_attr_empty (insn) == EMPTY_YES)
7675 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
7676 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
7678 insn = prev_active_insn (insn);
7680 if (GET_CODE (insn) == CALL_INSN)
7683 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7684 emit_insn (gen_break_f ());
7685 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7690 emit_predicate_relation_info ();
7692 if (ia64_flag_var_tracking)
7694 timevar_push (TV_VAR_TRACKING);
7695 variable_tracking_main ();
7696 timevar_pop (TV_VAR_TRACKING);
7700 /* Return true if REGNO is used by the epilogue. */
7703 ia64_epilogue_uses (int regno)
7708 /* With a call to a function in another module, we will write a new
7709 value to "gp". After returning from such a call, we need to make
7710 sure the function restores the original gp-value, even if the
7711 function itself does not use the gp anymore. */
7712 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
7714 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
7715 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
7716 /* For functions defined with the syscall_linkage attribute, all
7717 input registers are marked as live at all function exits. This
7718 prevents the register allocator from using the input registers,
7719 which in turn makes it possible to restart a system call after
7720 an interrupt without having to save/restore the input registers.
7721 This also prevents kernel data from leaking to application code. */
7722 return lookup_attribute ("syscall_linkage",
7723 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
7726 /* Conditional return patterns can't represent the use of `b0' as
7727 the return address, so we force the value live this way. */
7731 /* Likewise for ar.pfs, which is used by br.ret. */
7739 /* Return true if REGNO is used by the frame unwinder. */
7742 ia64_eh_uses (int regno)
7744 if (! reload_completed)
7747 if (current_frame_info.reg_save_b0
7748 && regno == current_frame_info.reg_save_b0)
7750 if (current_frame_info.reg_save_pr
7751 && regno == current_frame_info.reg_save_pr)
7753 if (current_frame_info.reg_save_ar_pfs
7754 && regno == current_frame_info.reg_save_ar_pfs)
7756 if (current_frame_info.reg_save_ar_unat
7757 && regno == current_frame_info.reg_save_ar_unat)
7759 if (current_frame_info.reg_save_ar_lc
7760 && regno == current_frame_info.reg_save_ar_lc)
7766 /* Return true if this goes in small data/bss. */
7768 /* ??? We could also support own long data here. Generating movl/add/ld8
7769 instead of addl,ld8/ld8. This makes the code bigger, but should make the
7770 code faster because there is one less load. This also includes incomplete
7771 types which can't go in sdata/sbss. */
7774 ia64_in_small_data_p (tree exp)
7776 if (TARGET_NO_SDATA)
7779 /* We want to merge strings, so we never consider them small data. */
7780 if (TREE_CODE (exp) == STRING_CST)
7783 /* Functions are never small data. */
7784 if (TREE_CODE (exp) == FUNCTION_DECL)
7787 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
7789 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
7790 if (strcmp (section, ".sdata") == 0
7791 || strcmp (section, ".sbss") == 0)
7796 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
7798 /* If this is an incomplete type with size 0, then we can't put it
7799 in sdata because it might be too big when completed. */
7800 if (size > 0 && size <= ia64_section_threshold)
7807 /* Output assembly directives for prologue regions. */
7809 /* The current basic block number. */
7811 static bool last_block;
7813 /* True if we need a copy_state command at the start of the next block. */
7815 static bool need_copy_state;
7817 /* The function emits unwind directives for the start of an epilogue. */
7820 process_epilogue (void)
7822 /* If this isn't the last block of the function, then we need to label the
7823 current state, and copy it back in at the start of the next block. */
7827 fprintf (asm_out_file, "\t.label_state 1\n");
7828 need_copy_state = true;
7831 fprintf (asm_out_file, "\t.restore sp\n");
7834 /* This function processes a SET pattern looking for specific patterns
7835 which result in emitting an assembly directive required for unwinding. */
7838 process_set (FILE *asm_out_file, rtx pat)
7840 rtx src = SET_SRC (pat);
7841 rtx dest = SET_DEST (pat);
7842 int src_regno, dest_regno;
7844 /* Look for the ALLOC insn. */
7845 if (GET_CODE (src) == UNSPEC_VOLATILE
7846 && XINT (src, 1) == UNSPECV_ALLOC
7847 && GET_CODE (dest) == REG)
7849 dest_regno = REGNO (dest);
7851 /* If this isn't the final destination for ar.pfs, the alloc
7852 shouldn't have been marked frame related. */
7853 if (dest_regno != current_frame_info.reg_save_ar_pfs)
7856 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
7857 ia64_dbx_register_number (dest_regno));
7861 /* Look for SP = .... */
7862 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
7864 if (GET_CODE (src) == PLUS)
7866 rtx op0 = XEXP (src, 0);
7867 rtx op1 = XEXP (src, 1);
7868 if (op0 == dest && GET_CODE (op1) == CONST_INT)
7870 if (INTVAL (op1) < 0)
7871 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
7874 process_epilogue ();
7879 else if (GET_CODE (src) == REG
7880 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
7881 process_epilogue ();
7888 /* Register move we need to look at. */
7889 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
7891 src_regno = REGNO (src);
7892 dest_regno = REGNO (dest);
7897 /* Saving return address pointer. */
7898 if (dest_regno != current_frame_info.reg_save_b0)
7900 fprintf (asm_out_file, "\t.save rp, r%d\n",
7901 ia64_dbx_register_number (dest_regno));
7905 if (dest_regno != current_frame_info.reg_save_pr)
7907 fprintf (asm_out_file, "\t.save pr, r%d\n",
7908 ia64_dbx_register_number (dest_regno));
7911 case AR_UNAT_REGNUM:
7912 if (dest_regno != current_frame_info.reg_save_ar_unat)
7914 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
7915 ia64_dbx_register_number (dest_regno));
7919 if (dest_regno != current_frame_info.reg_save_ar_lc)
7921 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
7922 ia64_dbx_register_number (dest_regno));
7925 case STACK_POINTER_REGNUM:
7926 if (dest_regno != HARD_FRAME_POINTER_REGNUM
7927 || ! frame_pointer_needed)
7929 fprintf (asm_out_file, "\t.vframe r%d\n",
7930 ia64_dbx_register_number (dest_regno));
7934 /* Everything else should indicate being stored to memory. */
7939 /* Memory store we need to look at. */
7940 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
7946 if (GET_CODE (XEXP (dest, 0)) == REG)
7948 base = XEXP (dest, 0);
7951 else if (GET_CODE (XEXP (dest, 0)) == PLUS
7952 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT)
7954 base = XEXP (XEXP (dest, 0), 0);
7955 off = INTVAL (XEXP (XEXP (dest, 0), 1));
7960 if (base == hard_frame_pointer_rtx)
7962 saveop = ".savepsp";
7965 else if (base == stack_pointer_rtx)
7970 src_regno = REGNO (src);
7974 if (current_frame_info.reg_save_b0 != 0)
7976 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
7980 if (current_frame_info.reg_save_pr != 0)
7982 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
7986 if (current_frame_info.reg_save_ar_lc != 0)
7988 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
7992 if (current_frame_info.reg_save_ar_pfs != 0)
7994 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
7997 case AR_UNAT_REGNUM:
7998 if (current_frame_info.reg_save_ar_unat != 0)
8000 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
8007 fprintf (asm_out_file, "\t.save.g 0x%x\n",
8008 1 << (src_regno - GR_REG (4)));
8016 fprintf (asm_out_file, "\t.save.b 0x%x\n",
8017 1 << (src_regno - BR_REG (1)));
8024 fprintf (asm_out_file, "\t.save.f 0x%x\n",
8025 1 << (src_regno - FR_REG (2)));
8028 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
8029 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
8030 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
8031 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
8032 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
8033 1 << (src_regno - FR_REG (12)));
8045 /* This function looks at a single insn and emits any directives
8046 required to unwind this insn. */
8048 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
8050 if (flag_unwind_tables
8051 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
8055 if (GET_CODE (insn) == NOTE
8056 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
8058 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
8060 /* Restore unwind state from immediately before the epilogue. */
8061 if (need_copy_state)
8063 fprintf (asm_out_file, "\t.body\n");
8064 fprintf (asm_out_file, "\t.copy_state 1\n");
8065 need_copy_state = false;
8069 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
8072 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
8074 pat = XEXP (pat, 0);
8076 pat = PATTERN (insn);
8078 switch (GET_CODE (pat))
8081 process_set (asm_out_file, pat);
8087 int limit = XVECLEN (pat, 0);
8088 for (par_index = 0; par_index < limit; par_index++)
8090 rtx x = XVECEXP (pat, 0, par_index);
8091 if (GET_CODE (x) == SET)
8092 process_set (asm_out_file, x);
8105 ia64_init_builtins (void)
8107 tree psi_type_node = build_pointer_type (integer_type_node);
8108 tree pdi_type_node = build_pointer_type (long_integer_type_node);
8110 /* __sync_val_compare_and_swap_si, __sync_bool_compare_and_swap_si */
8111 tree si_ftype_psi_si_si
8112 = build_function_type_list (integer_type_node,
8113 psi_type_node, integer_type_node,
8114 integer_type_node, NULL_TREE);
8116 /* __sync_val_compare_and_swap_di */
8117 tree di_ftype_pdi_di_di
8118 = build_function_type_list (long_integer_type_node,
8119 pdi_type_node, long_integer_type_node,
8120 long_integer_type_node, NULL_TREE);
8121 /* __sync_bool_compare_and_swap_di */
8122 tree si_ftype_pdi_di_di
8123 = build_function_type_list (integer_type_node,
8124 pdi_type_node, long_integer_type_node,
8125 long_integer_type_node, NULL_TREE);
8126 /* __sync_synchronize */
8127 tree void_ftype_void
8128 = build_function_type (void_type_node, void_list_node);
8130 /* __sync_lock_test_and_set_si */
8131 tree si_ftype_psi_si
8132 = build_function_type_list (integer_type_node,
8133 psi_type_node, integer_type_node, NULL_TREE);
8135 /* __sync_lock_test_and_set_di */
8136 tree di_ftype_pdi_di
8137 = build_function_type_list (long_integer_type_node,
8138 pdi_type_node, long_integer_type_node,
8141 /* __sync_lock_release_si */
8143 = build_function_type_list (void_type_node, psi_type_node, NULL_TREE);
8145 /* __sync_lock_release_di */
8147 = build_function_type_list (void_type_node, pdi_type_node, NULL_TREE);
8152 /* The __fpreg type. */
8153 fpreg_type = make_node (REAL_TYPE);
8154 /* ??? The back end should know to load/save __fpreg variables using
8155 the ldf.fill and stf.spill instructions. */
8156 TYPE_PRECISION (fpreg_type) = 96;
8157 layout_type (fpreg_type);
8158 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
8160 /* The __float80 type. */
8161 float80_type = make_node (REAL_TYPE);
8162 TYPE_PRECISION (float80_type) = 96;
8163 layout_type (float80_type);
8164 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
8166 /* The __float128 type. */
8169 tree float128_type = make_node (REAL_TYPE);
8170 TYPE_PRECISION (float128_type) = 128;
8171 layout_type (float128_type);
8172 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
8175 /* Under HPUX, this is a synonym for "long double". */
8176 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
8179 #define def_builtin(name, type, code) \
8180 builtin_function ((name), (type), (code), BUILT_IN_MD, NULL, NULL_TREE)
8182 def_builtin ("__sync_val_compare_and_swap_si", si_ftype_psi_si_si,
8183 IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI);
8184 def_builtin ("__sync_val_compare_and_swap_di", di_ftype_pdi_di_di,
8185 IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI);
8186 def_builtin ("__sync_bool_compare_and_swap_si", si_ftype_psi_si_si,
8187 IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI);
8188 def_builtin ("__sync_bool_compare_and_swap_di", si_ftype_pdi_di_di,
8189 IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI);
8191 def_builtin ("__sync_synchronize", void_ftype_void,
8192 IA64_BUILTIN_SYNCHRONIZE);
8194 def_builtin ("__sync_lock_test_and_set_si", si_ftype_psi_si,
8195 IA64_BUILTIN_LOCK_TEST_AND_SET_SI);
8196 def_builtin ("__sync_lock_test_and_set_di", di_ftype_pdi_di,
8197 IA64_BUILTIN_LOCK_TEST_AND_SET_DI);
8198 def_builtin ("__sync_lock_release_si", void_ftype_psi,
8199 IA64_BUILTIN_LOCK_RELEASE_SI);
8200 def_builtin ("__sync_lock_release_di", void_ftype_pdi,
8201 IA64_BUILTIN_LOCK_RELEASE_DI);
8203 def_builtin ("__builtin_ia64_bsp",
8204 build_function_type (ptr_type_node, void_list_node),
8207 def_builtin ("__builtin_ia64_flushrs",
8208 build_function_type (void_type_node, void_list_node),
8209 IA64_BUILTIN_FLUSHRS);
8211 def_builtin ("__sync_fetch_and_add_si", si_ftype_psi_si,
8212 IA64_BUILTIN_FETCH_AND_ADD_SI);
8213 def_builtin ("__sync_fetch_and_sub_si", si_ftype_psi_si,
8214 IA64_BUILTIN_FETCH_AND_SUB_SI);
8215 def_builtin ("__sync_fetch_and_or_si", si_ftype_psi_si,
8216 IA64_BUILTIN_FETCH_AND_OR_SI);
8217 def_builtin ("__sync_fetch_and_and_si", si_ftype_psi_si,
8218 IA64_BUILTIN_FETCH_AND_AND_SI);
8219 def_builtin ("__sync_fetch_and_xor_si", si_ftype_psi_si,
8220 IA64_BUILTIN_FETCH_AND_XOR_SI);
8221 def_builtin ("__sync_fetch_and_nand_si", si_ftype_psi_si,
8222 IA64_BUILTIN_FETCH_AND_NAND_SI);
8224 def_builtin ("__sync_add_and_fetch_si", si_ftype_psi_si,
8225 IA64_BUILTIN_ADD_AND_FETCH_SI);
8226 def_builtin ("__sync_sub_and_fetch_si", si_ftype_psi_si,
8227 IA64_BUILTIN_SUB_AND_FETCH_SI);
8228 def_builtin ("__sync_or_and_fetch_si", si_ftype_psi_si,
8229 IA64_BUILTIN_OR_AND_FETCH_SI);
8230 def_builtin ("__sync_and_and_fetch_si", si_ftype_psi_si,
8231 IA64_BUILTIN_AND_AND_FETCH_SI);
8232 def_builtin ("__sync_xor_and_fetch_si", si_ftype_psi_si,
8233 IA64_BUILTIN_XOR_AND_FETCH_SI);
8234 def_builtin ("__sync_nand_and_fetch_si", si_ftype_psi_si,
8235 IA64_BUILTIN_NAND_AND_FETCH_SI);
8237 def_builtin ("__sync_fetch_and_add_di", di_ftype_pdi_di,
8238 IA64_BUILTIN_FETCH_AND_ADD_DI);
8239 def_builtin ("__sync_fetch_and_sub_di", di_ftype_pdi_di,
8240 IA64_BUILTIN_FETCH_AND_SUB_DI);
8241 def_builtin ("__sync_fetch_and_or_di", di_ftype_pdi_di,
8242 IA64_BUILTIN_FETCH_AND_OR_DI);
8243 def_builtin ("__sync_fetch_and_and_di", di_ftype_pdi_di,
8244 IA64_BUILTIN_FETCH_AND_AND_DI);
8245 def_builtin ("__sync_fetch_and_xor_di", di_ftype_pdi_di,
8246 IA64_BUILTIN_FETCH_AND_XOR_DI);
8247 def_builtin ("__sync_fetch_and_nand_di", di_ftype_pdi_di,
8248 IA64_BUILTIN_FETCH_AND_NAND_DI);
8250 def_builtin ("__sync_add_and_fetch_di", di_ftype_pdi_di,
8251 IA64_BUILTIN_ADD_AND_FETCH_DI);
8252 def_builtin ("__sync_sub_and_fetch_di", di_ftype_pdi_di,
8253 IA64_BUILTIN_SUB_AND_FETCH_DI);
8254 def_builtin ("__sync_or_and_fetch_di", di_ftype_pdi_di,
8255 IA64_BUILTIN_OR_AND_FETCH_DI);
8256 def_builtin ("__sync_and_and_fetch_di", di_ftype_pdi_di,
8257 IA64_BUILTIN_AND_AND_FETCH_DI);
8258 def_builtin ("__sync_xor_and_fetch_di", di_ftype_pdi_di,
8259 IA64_BUILTIN_XOR_AND_FETCH_DI);
8260 def_builtin ("__sync_nand_and_fetch_di", di_ftype_pdi_di,
8261 IA64_BUILTIN_NAND_AND_FETCH_DI);
8266 /* Expand fetch_and_op intrinsics. The basic code sequence is:
8274 cmpxchgsz.acq tmp = [ptr], tmp
8275 } while (tmp != ret)
8279 ia64_expand_fetch_and_op (optab binoptab, enum machine_mode mode,
8280 tree arglist, rtx target)
8282 rtx ret, label, tmp, ccv, insn, mem, value;
8285 arg0 = TREE_VALUE (arglist);
8286 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8287 mem = expand_expr (arg0, NULL_RTX, Pmode, 0);
8288 #ifdef POINTERS_EXTEND_UNSIGNED
8289 if (GET_MODE(mem) != Pmode)
8290 mem = convert_memory_address (Pmode, mem);
8292 value = expand_expr (arg1, NULL_RTX, mode, 0);
8294 mem = gen_rtx_MEM (mode, force_reg (Pmode, mem));
8295 MEM_VOLATILE_P (mem) = 1;
8297 if (target && register_operand (target, mode))
8300 ret = gen_reg_rtx (mode);
8302 emit_insn (gen_mf ());
8304 /* Special case for fetchadd instructions. */
8305 if (binoptab == add_optab && fetchadd_operand (value, VOIDmode))
8308 insn = gen_fetchadd_acq_si (ret, mem, value);
8310 insn = gen_fetchadd_acq_di (ret, mem, value);
8315 tmp = gen_reg_rtx (mode);
8316 /* ar.ccv must always be loaded with a zero-extended DImode value. */
8317 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
8318 emit_move_insn (tmp, mem);
8320 label = gen_label_rtx ();
8322 emit_move_insn (ret, tmp);
8323 convert_move (ccv, tmp, /*unsignedp=*/1);
8325 /* Perform the specific operation. Special case NAND by noticing
8326 one_cmpl_optab instead. */
8327 if (binoptab == one_cmpl_optab)
8329 tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN);
8330 binoptab = and_optab;
8332 tmp = expand_binop (mode, binoptab, tmp, value, tmp, 1, OPTAB_WIDEN);
8335 insn = gen_cmpxchg_acq_si (tmp, mem, tmp, ccv);
8337 insn = gen_cmpxchg_acq_di (tmp, mem, tmp, ccv);
8340 emit_cmp_and_jump_insns (tmp, ret, NE, 0, mode, 1, label);
8345 /* Expand op_and_fetch intrinsics. The basic code sequence is:
8352 ret = tmp <op> value;
8353 cmpxchgsz.acq tmp = [ptr], ret
8354 } while (tmp != old)
8358 ia64_expand_op_and_fetch (optab binoptab, enum machine_mode mode,
8359 tree arglist, rtx target)
8361 rtx old, label, tmp, ret, ccv, insn, mem, value;
8364 arg0 = TREE_VALUE (arglist);
8365 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8366 mem = expand_expr (arg0, NULL_RTX, Pmode, 0);
8367 #ifdef POINTERS_EXTEND_UNSIGNED
8368 if (GET_MODE(mem) != Pmode)
8369 mem = convert_memory_address (Pmode, mem);
8372 value = expand_expr (arg1, NULL_RTX, mode, 0);
8374 mem = gen_rtx_MEM (mode, force_reg (Pmode, mem));
8375 MEM_VOLATILE_P (mem) = 1;
8377 if (target && ! register_operand (target, mode))
8380 emit_insn (gen_mf ());
8381 tmp = gen_reg_rtx (mode);
8382 old = gen_reg_rtx (mode);
8383 /* ar.ccv must always be loaded with a zero-extended DImode value. */
8384 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
8386 emit_move_insn (tmp, mem);
8388 label = gen_label_rtx ();
8390 emit_move_insn (old, tmp);
8391 convert_move (ccv, tmp, /*unsignedp=*/1);
8393 /* Perform the specific operation. Special case NAND by noticing
8394 one_cmpl_optab instead. */
8395 if (binoptab == one_cmpl_optab)
8397 tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN);
8398 binoptab = and_optab;
8400 ret = expand_binop (mode, binoptab, tmp, value, target, 1, OPTAB_WIDEN);
8403 insn = gen_cmpxchg_acq_si (tmp, mem, ret, ccv);
8405 insn = gen_cmpxchg_acq_di (tmp, mem, ret, ccv);
8408 emit_cmp_and_jump_insns (tmp, old, NE, 0, mode, 1, label);
8413 /* Expand val_ and bool_compare_and_swap. For val_ we want:
8417 cmpxchgsz.acq ret = [ptr], newval, ar.ccv
8420 For bool_ it's the same except return ret == oldval.
8424 ia64_expand_compare_and_swap (enum machine_mode rmode, enum machine_mode mode,
8425 int boolp, tree arglist, rtx target)
8427 tree arg0, arg1, arg2;
8428 rtx mem, old, new, ccv, tmp, insn;
8430 arg0 = TREE_VALUE (arglist);
8431 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8432 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8433 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
8434 old = expand_expr (arg1, NULL_RTX, mode, 0);
8435 new = expand_expr (arg2, NULL_RTX, mode, 0);
8437 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
8438 MEM_VOLATILE_P (mem) = 1;
8440 if (GET_MODE (old) != mode)
8441 old = convert_to_mode (mode, old, /*unsignedp=*/1);
8442 if (GET_MODE (new) != mode)
8443 new = convert_to_mode (mode, new, /*unsignedp=*/1);
8445 if (! register_operand (old, mode))
8446 old = copy_to_mode_reg (mode, old);
8447 if (! register_operand (new, mode))
8448 new = copy_to_mode_reg (mode, new);
8450 if (! boolp && target && register_operand (target, mode))
8453 tmp = gen_reg_rtx (mode);
8455 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
8456 convert_move (ccv, old, /*unsignedp=*/1);
8457 emit_insn (gen_mf ());
8459 insn = gen_cmpxchg_acq_si (tmp, mem, new, ccv);
8461 insn = gen_cmpxchg_acq_di (tmp, mem, new, ccv);
8467 target = gen_reg_rtx (rmode);
8468 return emit_store_flag_force (target, EQ, tmp, old, mode, 1, 1);
8474 /* Expand lock_test_and_set. I.e. `xchgsz ret = [ptr], new'. */
8477 ia64_expand_lock_test_and_set (enum machine_mode mode, tree arglist,
8481 rtx mem, new, ret, insn;
8483 arg0 = TREE_VALUE (arglist);
8484 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8485 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
8486 new = expand_expr (arg1, NULL_RTX, mode, 0);
8488 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
8489 MEM_VOLATILE_P (mem) = 1;
8490 if (! register_operand (new, mode))
8491 new = copy_to_mode_reg (mode, new);
8493 if (target && register_operand (target, mode))
8496 ret = gen_reg_rtx (mode);
8499 insn = gen_xchgsi (ret, mem, new);
8501 insn = gen_xchgdi (ret, mem, new);
8507 /* Expand lock_release. I.e. `stsz.rel [ptr] = r0'. */
8510 ia64_expand_lock_release (enum machine_mode mode, tree arglist,
8511 rtx target ATTRIBUTE_UNUSED)
8516 arg0 = TREE_VALUE (arglist);
8517 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
8519 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
8520 MEM_VOLATILE_P (mem) = 1;
8522 emit_move_insn (mem, const0_rtx);
8528 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
8529 enum machine_mode mode ATTRIBUTE_UNUSED,
8530 int ignore ATTRIBUTE_UNUSED)
8532 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
8533 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8534 tree arglist = TREE_OPERAND (exp, 1);
8535 enum machine_mode rmode = VOIDmode;
8539 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI:
8540 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI:
8545 case IA64_BUILTIN_LOCK_TEST_AND_SET_SI:
8546 case IA64_BUILTIN_LOCK_RELEASE_SI:
8547 case IA64_BUILTIN_FETCH_AND_ADD_SI:
8548 case IA64_BUILTIN_FETCH_AND_SUB_SI:
8549 case IA64_BUILTIN_FETCH_AND_OR_SI:
8550 case IA64_BUILTIN_FETCH_AND_AND_SI:
8551 case IA64_BUILTIN_FETCH_AND_XOR_SI:
8552 case IA64_BUILTIN_FETCH_AND_NAND_SI:
8553 case IA64_BUILTIN_ADD_AND_FETCH_SI:
8554 case IA64_BUILTIN_SUB_AND_FETCH_SI:
8555 case IA64_BUILTIN_OR_AND_FETCH_SI:
8556 case IA64_BUILTIN_AND_AND_FETCH_SI:
8557 case IA64_BUILTIN_XOR_AND_FETCH_SI:
8558 case IA64_BUILTIN_NAND_AND_FETCH_SI:
8562 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI:
8567 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI:
8572 case IA64_BUILTIN_LOCK_TEST_AND_SET_DI:
8573 case IA64_BUILTIN_LOCK_RELEASE_DI:
8574 case IA64_BUILTIN_FETCH_AND_ADD_DI:
8575 case IA64_BUILTIN_FETCH_AND_SUB_DI:
8576 case IA64_BUILTIN_FETCH_AND_OR_DI:
8577 case IA64_BUILTIN_FETCH_AND_AND_DI:
8578 case IA64_BUILTIN_FETCH_AND_XOR_DI:
8579 case IA64_BUILTIN_FETCH_AND_NAND_DI:
8580 case IA64_BUILTIN_ADD_AND_FETCH_DI:
8581 case IA64_BUILTIN_SUB_AND_FETCH_DI:
8582 case IA64_BUILTIN_OR_AND_FETCH_DI:
8583 case IA64_BUILTIN_AND_AND_FETCH_DI:
8584 case IA64_BUILTIN_XOR_AND_FETCH_DI:
8585 case IA64_BUILTIN_NAND_AND_FETCH_DI:
8595 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI:
8596 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI:
8597 return ia64_expand_compare_and_swap (rmode, mode, 1, arglist,
8600 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI:
8601 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI:
8602 return ia64_expand_compare_and_swap (rmode, mode, 0, arglist,
8605 case IA64_BUILTIN_SYNCHRONIZE:
8606 emit_insn (gen_mf ());
8609 case IA64_BUILTIN_LOCK_TEST_AND_SET_SI:
8610 case IA64_BUILTIN_LOCK_TEST_AND_SET_DI:
8611 return ia64_expand_lock_test_and_set (mode, arglist, target);
8613 case IA64_BUILTIN_LOCK_RELEASE_SI:
8614 case IA64_BUILTIN_LOCK_RELEASE_DI:
8615 return ia64_expand_lock_release (mode, arglist, target);
8617 case IA64_BUILTIN_BSP:
8618 if (! target || ! register_operand (target, DImode))
8619 target = gen_reg_rtx (DImode);
8620 emit_insn (gen_bsp_value (target));
8621 #ifdef POINTERS_EXTEND_UNSIGNED
8622 target = convert_memory_address (ptr_mode, target);
8626 case IA64_BUILTIN_FLUSHRS:
8627 emit_insn (gen_flushrs ());
8630 case IA64_BUILTIN_FETCH_AND_ADD_SI:
8631 case IA64_BUILTIN_FETCH_AND_ADD_DI:
8632 return ia64_expand_fetch_and_op (add_optab, mode, arglist, target);
8634 case IA64_BUILTIN_FETCH_AND_SUB_SI:
8635 case IA64_BUILTIN_FETCH_AND_SUB_DI:
8636 return ia64_expand_fetch_and_op (sub_optab, mode, arglist, target);
8638 case IA64_BUILTIN_FETCH_AND_OR_SI:
8639 case IA64_BUILTIN_FETCH_AND_OR_DI:
8640 return ia64_expand_fetch_and_op (ior_optab, mode, arglist, target);
8642 case IA64_BUILTIN_FETCH_AND_AND_SI:
8643 case IA64_BUILTIN_FETCH_AND_AND_DI:
8644 return ia64_expand_fetch_and_op (and_optab, mode, arglist, target);
8646 case IA64_BUILTIN_FETCH_AND_XOR_SI:
8647 case IA64_BUILTIN_FETCH_AND_XOR_DI:
8648 return ia64_expand_fetch_and_op (xor_optab, mode, arglist, target);
8650 case IA64_BUILTIN_FETCH_AND_NAND_SI:
8651 case IA64_BUILTIN_FETCH_AND_NAND_DI:
8652 return ia64_expand_fetch_and_op (one_cmpl_optab, mode, arglist, target);
8654 case IA64_BUILTIN_ADD_AND_FETCH_SI:
8655 case IA64_BUILTIN_ADD_AND_FETCH_DI:
8656 return ia64_expand_op_and_fetch (add_optab, mode, arglist, target);
8658 case IA64_BUILTIN_SUB_AND_FETCH_SI:
8659 case IA64_BUILTIN_SUB_AND_FETCH_DI:
8660 return ia64_expand_op_and_fetch (sub_optab, mode, arglist, target);
8662 case IA64_BUILTIN_OR_AND_FETCH_SI:
8663 case IA64_BUILTIN_OR_AND_FETCH_DI:
8664 return ia64_expand_op_and_fetch (ior_optab, mode, arglist, target);
8666 case IA64_BUILTIN_AND_AND_FETCH_SI:
8667 case IA64_BUILTIN_AND_AND_FETCH_DI:
8668 return ia64_expand_op_and_fetch (and_optab, mode, arglist, target);
8670 case IA64_BUILTIN_XOR_AND_FETCH_SI:
8671 case IA64_BUILTIN_XOR_AND_FETCH_DI:
8672 return ia64_expand_op_and_fetch (xor_optab, mode, arglist, target);
8674 case IA64_BUILTIN_NAND_AND_FETCH_SI:
8675 case IA64_BUILTIN_NAND_AND_FETCH_DI:
8676 return ia64_expand_op_and_fetch (one_cmpl_optab, mode, arglist, target);
8685 /* For the HP-UX IA64 aggregate parameters are passed stored in the
8686 most significant bits of the stack slot. */
8689 ia64_hpux_function_arg_padding (enum machine_mode mode, tree type)
8691 /* Exception to normal case for structures/unions/etc. */
8693 if (type && AGGREGATE_TYPE_P (type)
8694 && int_size_in_bytes (type) < UNITS_PER_WORD)
8697 /* Fall back to the default. */
8698 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
8701 /* Linked list of all external functions that are to be emitted by GCC.
8702 We output the name if and only if TREE_SYMBOL_REFERENCED is set in
8703 order to avoid putting out names that are never really used. */
8705 struct extern_func_list GTY(())
8707 struct extern_func_list *next;
8711 static GTY(()) struct extern_func_list *extern_func_head;
8714 ia64_hpux_add_extern_decl (tree decl)
8716 struct extern_func_list *p = ggc_alloc (sizeof (struct extern_func_list));
8719 p->next = extern_func_head;
8720 extern_func_head = p;
8723 /* Print out the list of used global functions. */
8726 ia64_hpux_file_end (void)
8728 struct extern_func_list *p;
8730 for (p = extern_func_head; p; p = p->next)
8732 tree decl = p->decl;
8733 tree id = DECL_ASSEMBLER_NAME (decl);
8738 if (!TREE_ASM_WRITTEN (decl) && TREE_SYMBOL_REFERENCED (id))
8740 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
8742 TREE_ASM_WRITTEN (decl) = 1;
8743 (*targetm.asm_out.globalize_label) (asm_out_file, name);
8744 fputs (TYPE_ASM_OP, asm_out_file);
8745 assemble_name (asm_out_file, name);
8746 fprintf (asm_out_file, "," TYPE_OPERAND_FMT "\n", "function");
8750 extern_func_head = 0;
8753 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
8754 modes of word_mode and larger. Rename the TFmode libfuncs using the
8755 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
8756 backward compatibility. */
8759 ia64_init_libfuncs (void)
8761 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
8762 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
8763 set_optab_libfunc (smod_optab, SImode, "__modsi3");
8764 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
8766 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
8767 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
8768 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
8769 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
8770 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
8772 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
8773 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
8774 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
8775 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
8776 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
8777 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
8779 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
8780 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
8781 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
8782 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
8784 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
8785 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
8788 /* Rename all the TFmode libfuncs using the HPUX conventions. */
8791 ia64_hpux_init_libfuncs (void)
8793 ia64_init_libfuncs ();
8795 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
8796 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
8797 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
8799 /* ia64_expand_compare uses this. */
8800 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
8802 /* These should never be used. */
8803 set_optab_libfunc (eq_optab, TFmode, 0);
8804 set_optab_libfunc (ne_optab, TFmode, 0);
8805 set_optab_libfunc (gt_optab, TFmode, 0);
8806 set_optab_libfunc (ge_optab, TFmode, 0);
8807 set_optab_libfunc (lt_optab, TFmode, 0);
8808 set_optab_libfunc (le_optab, TFmode, 0);
8811 /* Rename the division and modulus functions in VMS. */
8814 ia64_vms_init_libfuncs (void)
8816 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
8817 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
8818 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
8819 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
8820 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
8821 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
8822 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
8823 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
8826 /* Rename the TFmode libfuncs available from soft-fp in glibc using
8827 the HPUX conventions. */
8830 ia64_sysv4_init_libfuncs (void)
8832 ia64_init_libfuncs ();
8834 /* These functions are not part of the HPUX TFmode interface. We
8835 use them instead of _U_Qfcmp, which doesn't work the way we
8837 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
8838 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
8839 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
8840 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
8841 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
8842 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
8844 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
8845 glibc doesn't have them. */
8848 /* Switch to the section to which we should output X. The only thing
8849 special we do here is to honor small data. */
8852 ia64_select_rtx_section (enum machine_mode mode, rtx x,
8853 unsigned HOST_WIDE_INT align)
8855 if (GET_MODE_SIZE (mode) > 0
8856 && GET_MODE_SIZE (mode) <= ia64_section_threshold)
8859 default_elf_select_rtx_section (mode, x, align);
8862 /* It is illegal to have relocations in shared segments on AIX and HPUX.
8863 Pretend flag_pic is always set. */
8866 ia64_rwreloc_select_section (tree exp, int reloc, unsigned HOST_WIDE_INT align)
8868 default_elf_select_section_1 (exp, reloc, align, true);
8872 ia64_rwreloc_unique_section (tree decl, int reloc)
8874 default_unique_section_1 (decl, reloc, true);
8878 ia64_rwreloc_select_rtx_section (enum machine_mode mode, rtx x,
8879 unsigned HOST_WIDE_INT align)
8881 int save_pic = flag_pic;
8883 ia64_select_rtx_section (mode, x, align);
8884 flag_pic = save_pic;
8888 ia64_rwreloc_section_type_flags (tree decl, const char *name, int reloc)
8890 return default_section_type_flags_1 (decl, name, reloc, true);
8893 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
8894 structure type and that the address of that type should be passed
8895 in out0, rather than in r8. */
8898 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
8900 tree ret_type = TREE_TYPE (fntype);
8902 /* The Itanium C++ ABI requires that out0, rather than r8, be used
8903 as the structure return address parameter, if the return value
8904 type has a non-trivial copy constructor or destructor. It is not
8905 clear if this same convention should be used for other
8906 programming languages. Until G++ 3.4, we incorrectly used r8 for
8907 these return values. */
8908 return (abi_version_at_least (2)
8910 && TYPE_MODE (ret_type) == BLKmode
8911 && TREE_ADDRESSABLE (ret_type)
8912 && strcmp (lang_hooks.name, "GNU C++") == 0);
8915 /* Output the assembler code for a thunk function. THUNK_DECL is the
8916 declaration for the thunk function itself, FUNCTION is the decl for
8917 the target function. DELTA is an immediate constant offset to be
8918 added to THIS. If VCALL_OFFSET is nonzero, the word at
8919 *(*this + vcall_offset) should be added to THIS. */
8922 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
8923 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8926 rtx this, insn, funexp;
8927 unsigned int this_parmno;
8928 unsigned int this_regno;
8930 reload_completed = 1;
8931 epilogue_completed = 1;
8933 reset_block_changes ();
8935 /* Set things up as ia64_expand_prologue might. */
8936 last_scratch_gr_reg = 15;
8938 memset (¤t_frame_info, 0, sizeof (current_frame_info));
8939 current_frame_info.spill_cfa_off = -16;
8940 current_frame_info.n_input_regs = 1;
8941 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
8943 /* Mark the end of the (empty) prologue. */
8944 emit_note (NOTE_INSN_PROLOGUE_END);
8946 /* Figure out whether "this" will be the first parameter (the
8947 typical case) or the second parameter (as happens when the
8948 virtual function returns certain class objects). */
8950 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
8952 this_regno = IN_REG (this_parmno);
8953 if (!TARGET_REG_NAMES)
8954 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
8956 this = gen_rtx_REG (Pmode, this_regno);
8959 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
8960 REG_POINTER (tmp) = 1;
8961 if (delta && CONST_OK_FOR_I (delta))
8963 emit_insn (gen_ptr_extend_plus_imm (this, tmp, GEN_INT (delta)));
8967 emit_insn (gen_ptr_extend (this, tmp));
8970 /* Apply the constant offset, if required. */
8973 rtx delta_rtx = GEN_INT (delta);
8975 if (!CONST_OK_FOR_I (delta))
8977 rtx tmp = gen_rtx_REG (Pmode, 2);
8978 emit_move_insn (tmp, delta_rtx);
8981 emit_insn (gen_adddi3 (this, this, delta_rtx));
8984 /* Apply the offset from the vtable, if required. */
8987 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8988 rtx tmp = gen_rtx_REG (Pmode, 2);
8992 rtx t = gen_rtx_REG (ptr_mode, 2);
8993 REG_POINTER (t) = 1;
8994 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this));
8995 if (CONST_OK_FOR_I (vcall_offset))
8997 emit_insn (gen_ptr_extend_plus_imm (tmp, t,
9002 emit_insn (gen_ptr_extend (tmp, t));
9005 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
9009 if (!CONST_OK_FOR_J (vcall_offset))
9011 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
9012 emit_move_insn (tmp2, vcall_offset_rtx);
9013 vcall_offset_rtx = tmp2;
9015 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
9019 emit_move_insn (gen_rtx_REG (ptr_mode, 2),
9020 gen_rtx_MEM (ptr_mode, tmp));
9022 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
9024 emit_insn (gen_adddi3 (this, this, tmp));
9027 /* Generate a tail call to the target function. */
9028 if (! TREE_USED (function))
9030 assemble_external (function);
9031 TREE_USED (function) = 1;
9033 funexp = XEXP (DECL_RTL (function), 0);
9034 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
9035 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
9036 insn = get_last_insn ();
9037 SIBLING_CALL_P (insn) = 1;
9039 /* Code generation for calls relies on splitting. */
9040 reload_completed = 1;
9041 epilogue_completed = 1;
9042 try_split (PATTERN (insn), insn, 0);
9046 /* Run just enough of rest_of_compilation to get the insns emitted.
9047 There's not really enough bulk here to make other passes such as
9048 instruction scheduling worth while. Note that use_thunk calls
9049 assemble_start_function and assemble_end_function. */
9051 insn_locators_initialize ();
9052 emit_all_insn_group_barriers (NULL);
9053 insn = get_insns ();
9054 shorten_branches (insn);
9055 final_start_function (insn, file, 1);
9056 final (insn, file, 1, 0);
9057 final_end_function ();
9059 reload_completed = 0;
9060 epilogue_completed = 0;
9064 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9067 ia64_struct_value_rtx (tree fntype,
9068 int incoming ATTRIBUTE_UNUSED)
9070 if (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype))
9072 return gen_rtx_REG (Pmode, GR_REG (8));
9075 #include "gt-ia64.h"