1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to
21 the Free Software Foundation, 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
44 #include "basic-block.h"
46 #include "sched-int.h"
49 #include "target-def.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
56 /* This is used for communication between ASM_OUTPUT_LABEL and
57 ASM_OUTPUT_LABELREF. */
58 int ia64_asm_output_label = 0;
60 /* Define the information needed to generate branch and scc insns. This is
61 stored from the compare operation. */
62 struct rtx_def * ia64_compare_op0;
63 struct rtx_def * ia64_compare_op1;
65 /* Register names for ia64_expand_prologue. */
66 static const char * const ia64_reg_numbers[96] =
67 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
68 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
69 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
70 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
71 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
72 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
73 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
74 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
75 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
76 "r104","r105","r106","r107","r108","r109","r110","r111",
77 "r112","r113","r114","r115","r116","r117","r118","r119",
78 "r120","r121","r122","r123","r124","r125","r126","r127"};
80 /* ??? These strings could be shared with REGISTER_NAMES. */
81 static const char * const ia64_input_reg_names[8] =
82 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
84 /* ??? These strings could be shared with REGISTER_NAMES. */
85 static const char * const ia64_local_reg_names[80] =
86 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
87 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
88 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
89 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
90 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
91 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
92 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
93 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
94 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
95 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
97 /* ??? These strings could be shared with REGISTER_NAMES. */
98 static const char * const ia64_output_reg_names[8] =
99 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
101 /* String used with the -mfixed-range= option. */
102 const char *ia64_fixed_range_string;
104 /* Determines whether we use adds, addl, or movl to generate our
105 TLS immediate offsets. */
106 int ia64_tls_size = 22;
108 /* String used with the -mtls-size= option. */
109 const char *ia64_tls_size_string;
111 /* Which cpu are we scheduling for. */
112 enum processor_type ia64_tune;
114 /* String used with the -tune= option. */
115 const char *ia64_tune_string;
117 /* Determines whether we run our final scheduling pass or not. We always
118 avoid the normal second scheduling pass. */
119 static int ia64_flag_schedule_insns2;
121 /* Determines whether we run variable tracking in machine dependent
123 static int ia64_flag_var_tracking;
125 /* Variables which are this size or smaller are put in the sdata/sbss
128 unsigned int ia64_section_threshold;
130 /* The following variable is used by the DFA insn scheduler. The value is
131 TRUE if we do insn bundling instead of insn scheduling. */
134 /* Structure to be filled in by ia64_compute_frame_size with register
135 save masks and offsets for the current function. */
137 struct ia64_frame_info
139 HOST_WIDE_INT total_size; /* size of the stack frame, not including
140 the caller's scratch area. */
141 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
142 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
143 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
144 HARD_REG_SET mask; /* mask of saved registers. */
145 unsigned int gr_used_mask; /* mask of registers in use as gr spill
146 registers or long-term scratches. */
147 int n_spilled; /* number of spilled registers. */
148 int reg_fp; /* register for fp. */
149 int reg_save_b0; /* save register for b0. */
150 int reg_save_pr; /* save register for prs. */
151 int reg_save_ar_pfs; /* save register for ar.pfs. */
152 int reg_save_ar_unat; /* save register for ar.unat. */
153 int reg_save_ar_lc; /* save register for ar.lc. */
154 int reg_save_gp; /* save register for gp. */
155 int n_input_regs; /* number of input registers used. */
156 int n_local_regs; /* number of local registers used. */
157 int n_output_regs; /* number of output registers used. */
158 int n_rotate_regs; /* number of rotating registers used. */
160 char need_regstk; /* true if a .regstk directive needed. */
161 char initialized; /* true if the data is finalized. */
164 /* Current frame information calculated by ia64_compute_frame_size. */
165 static struct ia64_frame_info current_frame_info;
167 static int ia64_use_dfa_pipeline_interface (void);
168 static int ia64_first_cycle_multipass_dfa_lookahead (void);
169 static void ia64_dependencies_evaluation_hook (rtx, rtx);
170 static void ia64_init_dfa_pre_cycle_insn (void);
171 static rtx ia64_dfa_pre_cycle_insn (void);
172 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
173 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
174 static rtx gen_tls_get_addr (void);
175 static rtx gen_thread_pointer (void);
176 static rtx ia64_expand_tls_address (enum tls_model, rtx, rtx);
177 static int find_gr_spill (int);
178 static int next_scratch_gr_reg (void);
179 static void mark_reg_gr_used_mask (rtx, void *);
180 static void ia64_compute_frame_size (HOST_WIDE_INT);
181 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
182 static void finish_spill_pointers (void);
183 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
184 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
185 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
186 static rtx gen_movdi_x (rtx, rtx, rtx);
187 static rtx gen_fr_spill_x (rtx, rtx, rtx);
188 static rtx gen_fr_restore_x (rtx, rtx, rtx);
190 static enum machine_mode hfa_element_mode (tree, int);
191 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
193 static bool ia64_function_ok_for_sibcall (tree, tree);
194 static bool ia64_return_in_memory (tree, tree);
195 static bool ia64_rtx_costs (rtx, int, int, int *);
196 static void fix_range (const char *);
197 static struct machine_function * ia64_init_machine_status (void);
198 static void emit_insn_group_barriers (FILE *);
199 static void emit_all_insn_group_barriers (FILE *);
200 static void final_emit_insn_group_barriers (FILE *);
201 static void emit_predicate_relation_info (void);
202 static void ia64_reorg (void);
203 static bool ia64_in_small_data_p (tree);
204 static void process_epilogue (void);
205 static int process_set (FILE *, rtx);
207 static rtx ia64_expand_fetch_and_op (optab, enum machine_mode, tree, rtx);
208 static rtx ia64_expand_op_and_fetch (optab, enum machine_mode, tree, rtx);
209 static rtx ia64_expand_compare_and_swap (enum machine_mode, enum machine_mode,
211 static rtx ia64_expand_lock_test_and_set (enum machine_mode, tree, rtx);
212 static rtx ia64_expand_lock_release (enum machine_mode, tree, rtx);
213 static bool ia64_assemble_integer (rtx, unsigned int, int);
214 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
215 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
216 static void ia64_output_function_end_prologue (FILE *);
218 static int ia64_issue_rate (void);
219 static int ia64_adjust_cost (rtx, rtx, rtx, int);
220 static void ia64_sched_init (FILE *, int, int);
221 static void ia64_sched_finish (FILE *, int);
222 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
223 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
224 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
225 static int ia64_variable_issue (FILE *, int, rtx, int);
227 static struct bundle_state *get_free_bundle_state (void);
228 static void free_bundle_state (struct bundle_state *);
229 static void initiate_bundle_states (void);
230 static void finish_bundle_states (void);
231 static unsigned bundle_state_hash (const void *);
232 static int bundle_state_eq_p (const void *, const void *);
233 static int insert_bundle_state (struct bundle_state *);
234 static void initiate_bundle_state_table (void);
235 static void finish_bundle_state_table (void);
236 static int try_issue_nops (struct bundle_state *, int);
237 static int try_issue_insn (struct bundle_state *, rtx);
238 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
239 static int get_max_pos (state_t);
240 static int get_template (state_t, int);
242 static rtx get_next_important_insn (rtx, rtx);
243 static void bundling (FILE *, int, rtx, rtx);
245 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
246 HOST_WIDE_INT, tree);
247 static void ia64_file_start (void);
249 static void ia64_select_rtx_section (enum machine_mode, rtx,
250 unsigned HOST_WIDE_INT);
251 static void ia64_rwreloc_select_section (tree, int, unsigned HOST_WIDE_INT)
253 static void ia64_rwreloc_unique_section (tree, int)
255 static void ia64_rwreloc_select_rtx_section (enum machine_mode, rtx,
256 unsigned HOST_WIDE_INT)
258 static unsigned int ia64_rwreloc_section_type_flags (tree, const char *, int)
261 static void ia64_hpux_add_extern_decl (tree decl)
263 static void ia64_hpux_file_end (void)
265 static void ia64_init_libfuncs (void)
267 static void ia64_hpux_init_libfuncs (void)
269 static void ia64_sysv4_init_libfuncs (void)
271 static void ia64_vms_init_libfuncs (void)
274 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
275 static void ia64_encode_section_info (tree, rtx, int);
276 static rtx ia64_struct_value_rtx (tree, int);
277 static tree ia64_gimplify_va_arg (tree, tree, tree *, tree *);
280 /* Table of valid machine attributes. */
281 static const struct attribute_spec ia64_attribute_table[] =
283 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
284 { "syscall_linkage", 0, 0, false, true, true, NULL },
285 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
286 { NULL, 0, 0, false, false, false, NULL }
289 /* Initialize the GCC target structure. */
290 #undef TARGET_ATTRIBUTE_TABLE
291 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
293 #undef TARGET_INIT_BUILTINS
294 #define TARGET_INIT_BUILTINS ia64_init_builtins
296 #undef TARGET_EXPAND_BUILTIN
297 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
299 #undef TARGET_ASM_BYTE_OP
300 #define TARGET_ASM_BYTE_OP "\tdata1\t"
301 #undef TARGET_ASM_ALIGNED_HI_OP
302 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
303 #undef TARGET_ASM_ALIGNED_SI_OP
304 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
305 #undef TARGET_ASM_ALIGNED_DI_OP
306 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
307 #undef TARGET_ASM_UNALIGNED_HI_OP
308 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
309 #undef TARGET_ASM_UNALIGNED_SI_OP
310 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
311 #undef TARGET_ASM_UNALIGNED_DI_OP
312 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
313 #undef TARGET_ASM_INTEGER
314 #define TARGET_ASM_INTEGER ia64_assemble_integer
316 #undef TARGET_ASM_FUNCTION_PROLOGUE
317 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
318 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
319 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
320 #undef TARGET_ASM_FUNCTION_EPILOGUE
321 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
323 #undef TARGET_IN_SMALL_DATA_P
324 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
326 #undef TARGET_SCHED_ADJUST_COST
327 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
328 #undef TARGET_SCHED_ISSUE_RATE
329 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
330 #undef TARGET_SCHED_VARIABLE_ISSUE
331 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
332 #undef TARGET_SCHED_INIT
333 #define TARGET_SCHED_INIT ia64_sched_init
334 #undef TARGET_SCHED_FINISH
335 #define TARGET_SCHED_FINISH ia64_sched_finish
336 #undef TARGET_SCHED_REORDER
337 #define TARGET_SCHED_REORDER ia64_sched_reorder
338 #undef TARGET_SCHED_REORDER2
339 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
341 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
342 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
344 #undef TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE
345 #define TARGET_SCHED_USE_DFA_PIPELINE_INTERFACE ia64_use_dfa_pipeline_interface
347 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
348 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
350 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
351 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
352 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
353 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
355 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
356 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
357 ia64_first_cycle_multipass_dfa_lookahead_guard
359 #undef TARGET_SCHED_DFA_NEW_CYCLE
360 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
362 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
363 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
365 #undef TARGET_ASM_OUTPUT_MI_THUNK
366 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
367 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
368 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
370 #undef TARGET_ASM_FILE_START
371 #define TARGET_ASM_FILE_START ia64_file_start
373 #undef TARGET_RTX_COSTS
374 #define TARGET_RTX_COSTS ia64_rtx_costs
375 #undef TARGET_ADDRESS_COST
376 #define TARGET_ADDRESS_COST hook_int_rtx_0
378 #undef TARGET_MACHINE_DEPENDENT_REORG
379 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
381 #undef TARGET_ENCODE_SECTION_INFO
382 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
384 /* ??? ABI doesn't allow us to define this. */
386 #undef TARGET_PROMOTE_FUNCTION_ARGS
387 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
390 /* ??? ABI doesn't allow us to define this. */
392 #undef TARGET_PROMOTE_FUNCTION_RETURN
393 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
396 /* ??? Investigate. */
398 #undef TARGET_PROMOTE_PROTOTYPES
399 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
402 #undef TARGET_STRUCT_VALUE_RTX
403 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
404 #undef TARGET_RETURN_IN_MEMORY
405 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
407 #undef TARGET_SETUP_INCOMING_VARARGS
408 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
409 #undef TARGET_STRICT_ARGUMENT_NAMING
410 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
412 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
413 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
415 struct gcc_target targetm = TARGET_INITIALIZER;
417 /* Return 1 if OP is a valid operand for the MEM of a CALL insn. */
420 call_operand (rtx op, enum machine_mode mode)
422 if (mode != GET_MODE (op) && mode != VOIDmode)
425 return (GET_CODE (op) == SYMBOL_REF || GET_CODE (op) == REG
426 || (GET_CODE (op) == SUBREG && GET_CODE (XEXP (op, 0)) == REG));
429 /* Return 1 if OP refers to a symbol in the sdata section. */
432 sdata_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
434 switch (GET_CODE (op))
437 if (GET_CODE (XEXP (op, 0)) != PLUS
438 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF)
440 op = XEXP (XEXP (op, 0), 0);
444 if (CONSTANT_POOL_ADDRESS_P (op))
445 return GET_MODE_SIZE (get_pool_mode (op)) <= ia64_section_threshold;
447 return SYMBOL_REF_LOCAL_P (op) && SYMBOL_REF_SMALL_P (op);
457 small_addr_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
459 return SYMBOL_REF_SMALL_ADDR_P (op);
462 /* Return 1 if OP refers to a symbol, and is appropriate for a GOT load. */
465 got_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
467 switch (GET_CODE (op))
471 if (GET_CODE (op) != PLUS)
473 if (GET_CODE (XEXP (op, 0)) != SYMBOL_REF)
476 if (GET_CODE (op) != CONST_INT)
481 /* Ok if we're not using GOT entries at all. */
482 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
485 /* "Ok" while emitting rtl, since otherwise we won't be provided
486 with the entire offset during emission, which makes it very
487 hard to split the offset into high and low parts. */
488 if (rtx_equal_function_value_matters)
491 /* Force the low 14 bits of the constant to zero so that we do not
492 use up so many GOT entries. */
493 return (INTVAL (op) & 0x3fff) == 0;
496 if (SYMBOL_REF_SMALL_ADDR_P (op))
507 /* Return 1 if OP refers to a symbol. */
510 symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
512 switch (GET_CODE (op))
525 /* Return tls_model if OP refers to a TLS symbol. */
528 tls_symbolic_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
530 if (GET_CODE (op) != SYMBOL_REF)
532 return SYMBOL_REF_TLS_MODEL (op);
536 /* Return 1 if OP refers to a function. */
539 function_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
541 if (GET_CODE (op) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (op))
547 /* Return 1 if OP is setjmp or a similar function. */
549 /* ??? This is an unsatisfying solution. Should rethink. */
552 setjmp_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
557 if (GET_CODE (op) != SYMBOL_REF)
562 /* The following code is borrowed from special_function_p in calls.c. */
564 /* Disregard prefix _, __ or __x. */
567 if (name[1] == '_' && name[2] == 'x')
569 else if (name[1] == '_')
579 && (! strcmp (name, "setjmp")
580 || ! strcmp (name, "setjmp_syscall")))
582 && ! strcmp (name, "sigsetjmp"))
584 && ! strcmp (name, "savectx")));
586 else if ((name[0] == 'q' && name[1] == 's'
587 && ! strcmp (name, "qsetjmp"))
588 || (name[0] == 'v' && name[1] == 'f'
589 && ! strcmp (name, "vfork")))
595 /* Return 1 if OP is a general operand, excluding tls symbolic operands. */
598 move_operand (rtx op, enum machine_mode mode)
600 return general_operand (op, mode) && !tls_symbolic_operand (op, mode);
603 /* Return 1 if OP is a register operand that is (or could be) a GR reg. */
606 gr_register_operand (rtx op, enum machine_mode mode)
608 if (! register_operand (op, mode))
610 if (GET_CODE (op) == SUBREG)
611 op = SUBREG_REG (op);
612 if (GET_CODE (op) == REG)
614 unsigned int regno = REGNO (op);
615 if (regno < FIRST_PSEUDO_REGISTER)
616 return GENERAL_REGNO_P (regno);
621 /* Return 1 if OP is a register operand that is (or could be) an FR reg. */
624 fr_register_operand (rtx op, enum machine_mode mode)
626 if (! register_operand (op, mode))
628 if (GET_CODE (op) == SUBREG)
629 op = SUBREG_REG (op);
630 if (GET_CODE (op) == REG)
632 unsigned int regno = REGNO (op);
633 if (regno < FIRST_PSEUDO_REGISTER)
634 return FR_REGNO_P (regno);
639 /* Return 1 if OP is a register operand that is (or could be) a GR/FR reg. */
642 grfr_register_operand (rtx op, enum machine_mode mode)
644 if (! register_operand (op, mode))
646 if (GET_CODE (op) == SUBREG)
647 op = SUBREG_REG (op);
648 if (GET_CODE (op) == REG)
650 unsigned int regno = REGNO (op);
651 if (regno < FIRST_PSEUDO_REGISTER)
652 return GENERAL_REGNO_P (regno) || FR_REGNO_P (regno);
657 /* Return 1 if OP is a nonimmediate operand that is (or could be) a GR reg. */
660 gr_nonimmediate_operand (rtx op, enum machine_mode mode)
662 if (! nonimmediate_operand (op, mode))
664 if (GET_CODE (op) == SUBREG)
665 op = SUBREG_REG (op);
666 if (GET_CODE (op) == REG)
668 unsigned int regno = REGNO (op);
669 if (regno < FIRST_PSEUDO_REGISTER)
670 return GENERAL_REGNO_P (regno);
675 /* Return 1 if OP is a nonimmediate operand that is (or could be) a FR reg. */
678 fr_nonimmediate_operand (rtx op, enum machine_mode mode)
680 if (! nonimmediate_operand (op, mode))
682 if (GET_CODE (op) == SUBREG)
683 op = SUBREG_REG (op);
684 if (GET_CODE (op) == REG)
686 unsigned int regno = REGNO (op);
687 if (regno < FIRST_PSEUDO_REGISTER)
688 return FR_REGNO_P (regno);
693 /* Return 1 if OP is a nonimmediate operand that is a GR/FR reg. */
696 grfr_nonimmediate_operand (rtx op, enum machine_mode mode)
698 if (! nonimmediate_operand (op, mode))
700 if (GET_CODE (op) == SUBREG)
701 op = SUBREG_REG (op);
702 if (GET_CODE (op) == REG)
704 unsigned int regno = REGNO (op);
705 if (regno < FIRST_PSEUDO_REGISTER)
706 return GENERAL_REGNO_P (regno) || FR_REGNO_P (regno);
711 /* Return 1 if OP is a GR register operand, or zero. */
714 gr_reg_or_0_operand (rtx op, enum machine_mode mode)
716 return (op == const0_rtx || gr_register_operand (op, mode));
719 /* Return 1 if OP is a GR register operand, or a 5 bit immediate operand. */
722 gr_reg_or_5bit_operand (rtx op, enum machine_mode mode)
724 return ((GET_CODE (op) == CONST_INT && INTVAL (op) >= 0 && INTVAL (op) < 32)
725 || gr_register_operand (op, mode));
728 /* Return 1 if OP is a GR register operand, or a 6 bit immediate operand. */
731 gr_reg_or_6bit_operand (rtx op, enum machine_mode mode)
733 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (INTVAL (op)))
734 || gr_register_operand (op, mode));
737 /* Return 1 if OP is a GR register operand, or an 8 bit immediate operand. */
740 gr_reg_or_8bit_operand (rtx op, enum machine_mode mode)
742 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
743 || gr_register_operand (op, mode));
746 /* Return 1 if OP is a GR/FR register operand, or an 8 bit immediate. */
749 grfr_reg_or_8bit_operand (rtx op, enum machine_mode mode)
751 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op)))
752 || grfr_register_operand (op, mode));
755 /* Return 1 if OP is a register operand, or an 8 bit adjusted immediate
759 gr_reg_or_8bit_adjusted_operand (rtx op, enum machine_mode mode)
761 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_L (INTVAL (op)))
762 || gr_register_operand (op, mode));
765 /* Return 1 if OP is a register operand, or is valid for both an 8 bit
766 immediate and an 8 bit adjusted immediate operand. This is necessary
767 because when we emit a compare, we don't know what the condition will be,
768 so we need the union of the immediates accepted by GT and LT. */
771 gr_reg_or_8bit_and_adjusted_operand (rtx op, enum machine_mode mode)
773 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_K (INTVAL (op))
774 && CONST_OK_FOR_L (INTVAL (op)))
775 || gr_register_operand (op, mode));
778 /* Return 1 if OP is a register operand, or a 14 bit immediate operand. */
781 gr_reg_or_14bit_operand (rtx op, enum machine_mode mode)
783 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_I (INTVAL (op)))
784 || gr_register_operand (op, mode));
787 /* Return 1 if OP is a register operand, or a 22 bit immediate operand. */
790 gr_reg_or_22bit_operand (rtx op, enum machine_mode mode)
792 return ((GET_CODE (op) == CONST_INT && CONST_OK_FOR_J (INTVAL (op)))
793 || gr_register_operand (op, mode));
796 /* Return 1 if OP is a 6 bit immediate operand. */
799 shift_count_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
801 return (GET_CODE (op) == CONST_INT && CONST_OK_FOR_M (INTVAL (op)));
804 /* Return 1 if OP is a 5 bit immediate operand. */
807 shift_32bit_count_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
809 return (GET_CODE (op) == CONST_INT
810 && (INTVAL (op) >= 0 && INTVAL (op) < 32));
813 /* Return 1 if OP is a 2, 4, 8, or 16 immediate operand. */
816 shladd_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
818 return (GET_CODE (op) == CONST_INT
819 && (INTVAL (op) == 2 || INTVAL (op) == 4
820 || INTVAL (op) == 8 || INTVAL (op) == 16));
823 /* Return 1 if OP is a -16, -8, -4, -1, 1, 4, 8, or 16 immediate operand. */
826 fetchadd_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
828 return (GET_CODE (op) == CONST_INT
829 && (INTVAL (op) == -16 || INTVAL (op) == -8 ||
830 INTVAL (op) == -4 || INTVAL (op) == -1 ||
831 INTVAL (op) == 1 || INTVAL (op) == 4 ||
832 INTVAL (op) == 8 || INTVAL (op) == 16));
835 /* Return 1 if OP is a floating-point constant zero, one, or a register. */
838 fr_reg_or_fp01_operand (rtx op, enum machine_mode mode)
840 return ((GET_CODE (op) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (op))
841 || fr_register_operand (op, mode));
844 /* Like nonimmediate_operand, but don't allow MEMs that try to use a
845 POST_MODIFY with a REG as displacement. */
848 destination_operand (rtx op, enum machine_mode mode)
850 if (! nonimmediate_operand (op, mode))
852 if (GET_CODE (op) == MEM
853 && GET_CODE (XEXP (op, 0)) == POST_MODIFY
854 && GET_CODE (XEXP (XEXP (XEXP (op, 0), 1), 1)) == REG)
859 /* Like memory_operand, but don't allow post-increments. */
862 not_postinc_memory_operand (rtx op, enum machine_mode mode)
864 return (memory_operand (op, mode)
865 && GET_RTX_CLASS (GET_CODE (XEXP (op, 0))) != RTX_AUTOINC);
868 /* Return 1 if this is a comparison operator, which accepts a normal 8-bit
869 signed immediate operand. */
872 normal_comparison_operator (register rtx op, enum machine_mode mode)
874 enum rtx_code code = GET_CODE (op);
875 return ((mode == VOIDmode || GET_MODE (op) == mode)
876 && (code == EQ || code == NE
877 || code == GT || code == LE || code == GTU || code == LEU));
880 /* Return 1 if this is a comparison operator, which accepts an adjusted 8-bit
881 signed immediate operand. */
884 adjusted_comparison_operator (register rtx op, enum machine_mode mode)
886 enum rtx_code code = GET_CODE (op);
887 return ((mode == VOIDmode || GET_MODE (op) == mode)
888 && (code == LT || code == GE || code == LTU || code == GEU));
891 /* Return 1 if this is a signed inequality operator. */
894 signed_inequality_operator (register rtx op, enum machine_mode mode)
896 enum rtx_code code = GET_CODE (op);
897 return ((mode == VOIDmode || GET_MODE (op) == mode)
898 && (code == GE || code == GT
899 || code == LE || code == LT));
902 /* Return 1 if this operator is valid for predication. */
905 predicate_operator (register rtx op, enum machine_mode mode)
907 enum rtx_code code = GET_CODE (op);
908 return ((GET_MODE (op) == mode || mode == VOIDmode)
909 && (code == EQ || code == NE));
912 /* Return 1 if this operator can be used in a conditional operation. */
915 condop_operator (register rtx op, enum machine_mode mode)
917 enum rtx_code code = GET_CODE (op);
918 return ((GET_MODE (op) == mode || mode == VOIDmode)
919 && (code == PLUS || code == MINUS || code == AND
920 || code == IOR || code == XOR));
923 /* Return 1 if this is the ar.lc register. */
926 ar_lc_reg_operand (register rtx op, enum machine_mode mode)
928 return (GET_MODE (op) == DImode
929 && (mode == DImode || mode == VOIDmode)
930 && GET_CODE (op) == REG
931 && REGNO (op) == AR_LC_REGNUM);
934 /* Return 1 if this is the ar.ccv register. */
937 ar_ccv_reg_operand (register rtx op, enum machine_mode mode)
939 return ((GET_MODE (op) == mode || mode == VOIDmode)
940 && GET_CODE (op) == REG
941 && REGNO (op) == AR_CCV_REGNUM);
944 /* Return 1 if this is the ar.pfs register. */
947 ar_pfs_reg_operand (register rtx op, enum machine_mode mode)
949 return ((GET_MODE (op) == mode || mode == VOIDmode)
950 && GET_CODE (op) == REG
951 && REGNO (op) == AR_PFS_REGNUM);
954 /* Like general_operand, but don't allow (mem (addressof)). */
957 general_xfmode_operand (rtx op, enum machine_mode mode)
959 if (! general_operand (op, mode))
967 destination_xfmode_operand (rtx op, enum machine_mode mode)
969 if (! destination_operand (op, mode))
977 xfreg_or_fp01_operand (rtx op, enum machine_mode mode)
979 if (GET_CODE (op) == SUBREG)
981 return fr_reg_or_fp01_operand (op, mode);
984 /* Return 1 if OP is valid as a base register in a reg + offset address. */
987 basereg_operand (rtx op, enum machine_mode mode)
989 /* ??? Should I copy the flag_omit_frame_pointer and cse_not_expected
990 checks from pa.c basereg_operand as well? Seems to be OK without them
993 return (register_operand (op, mode) &&
994 REG_POINTER ((GET_CODE (op) == SUBREG) ? SUBREG_REG (op) : op));
999 ADDR_AREA_NORMAL, /* normal address area */
1000 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
1004 static GTY(()) tree small_ident1;
1005 static GTY(()) tree small_ident2;
1010 if (small_ident1 == 0)
1012 small_ident1 = get_identifier ("small");
1013 small_ident2 = get_identifier ("__small__");
1017 /* Retrieve the address area that has been chosen for the given decl. */
1019 static ia64_addr_area
1020 ia64_get_addr_area (tree decl)
1024 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
1030 id = TREE_VALUE (TREE_VALUE (model_attr));
1031 if (id == small_ident1 || id == small_ident2)
1032 return ADDR_AREA_SMALL;
1034 return ADDR_AREA_NORMAL;
1038 ia64_handle_model_attribute (tree *node, tree name, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
1040 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
1041 ia64_addr_area area;
1042 tree arg, decl = *node;
1045 arg = TREE_VALUE (args);
1046 if (arg == small_ident1 || arg == small_ident2)
1048 addr_area = ADDR_AREA_SMALL;
1052 warning ("invalid argument of `%s' attribute",
1053 IDENTIFIER_POINTER (name));
1054 *no_add_attrs = true;
1057 switch (TREE_CODE (decl))
1060 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
1062 && !TREE_STATIC (decl))
1064 error ("%Jan address area attribute cannot be specified for "
1065 "local variables", decl, decl);
1066 *no_add_attrs = true;
1068 area = ia64_get_addr_area (decl);
1069 if (area != ADDR_AREA_NORMAL && addr_area != area)
1071 error ("%Jaddress area of '%s' conflicts with previous "
1072 "declaration", decl, decl);
1073 *no_add_attrs = true;
1078 error ("%Jaddress area attribute cannot be specified for functions",
1080 *no_add_attrs = true;
1084 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
1085 *no_add_attrs = true;
1093 ia64_encode_addr_area (tree decl, rtx symbol)
1097 flags = SYMBOL_REF_FLAGS (symbol);
1098 switch (ia64_get_addr_area (decl))
1100 case ADDR_AREA_NORMAL: break;
1101 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
1104 SYMBOL_REF_FLAGS (symbol) = flags;
1108 ia64_encode_section_info (tree decl, rtx rtl, int first)
1110 default_encode_section_info (decl, rtl, first);
1112 /* Careful not to prod global register variables. */
1113 if (TREE_CODE (decl) == VAR_DECL
1114 && GET_CODE (DECL_RTL (decl)) == MEM
1115 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
1116 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
1117 ia64_encode_addr_area (decl, XEXP (rtl, 0));
1120 /* Return 1 if the operands of a move are ok. */
1123 ia64_move_ok (rtx dst, rtx src)
1125 /* If we're under init_recog_no_volatile, we'll not be able to use
1126 memory_operand. So check the code directly and don't worry about
1127 the validity of the underlying address, which should have been
1128 checked elsewhere anyway. */
1129 if (GET_CODE (dst) != MEM)
1131 if (GET_CODE (src) == MEM)
1133 if (register_operand (src, VOIDmode))
1136 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
1137 if (INTEGRAL_MODE_P (GET_MODE (dst)))
1138 return src == const0_rtx;
1140 return GET_CODE (src) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (src);
1144 addp4_optimize_ok (rtx op1, rtx op2)
1146 return (basereg_operand (op1, GET_MODE(op1)) !=
1147 basereg_operand (op2, GET_MODE(op2)));
1150 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
1151 Return the length of the field, or <= 0 on failure. */
1154 ia64_depz_field_mask (rtx rop, rtx rshift)
1156 unsigned HOST_WIDE_INT op = INTVAL (rop);
1157 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
1159 /* Get rid of the zero bits we're shifting in. */
1162 /* We must now have a solid block of 1's at bit 0. */
1163 return exact_log2 (op + 1);
1166 /* Expand a symbolic constant load. */
1169 ia64_expand_load_address (rtx dest, rtx src)
1171 if (tls_symbolic_operand (src, VOIDmode))
1173 if (GET_CODE (dest) != REG)
1176 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
1177 having to pointer-extend the value afterward. Other forms of address
1178 computation below are also more natural to compute as 64-bit quantities.
1179 If we've been given an SImode destination register, change it. */
1180 if (GET_MODE (dest) != Pmode)
1181 dest = gen_rtx_REG (Pmode, REGNO (dest));
1183 if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_SMALL_ADDR_P (src))
1185 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
1188 else if (TARGET_AUTO_PIC)
1190 emit_insn (gen_load_gprel64 (dest, src));
1193 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
1195 emit_insn (gen_load_fptr (dest, src));
1198 else if (sdata_symbolic_operand (src, VOIDmode))
1200 emit_insn (gen_load_gprel (dest, src));
1204 if (GET_CODE (src) == CONST
1205 && GET_CODE (XEXP (src, 0)) == PLUS
1206 && GET_CODE (XEXP (XEXP (src, 0), 1)) == CONST_INT
1207 && (INTVAL (XEXP (XEXP (src, 0), 1)) & 0x1fff) != 0)
1209 rtx sym = XEXP (XEXP (src, 0), 0);
1210 HOST_WIDE_INT ofs, hi, lo;
1212 /* Split the offset into a sign extended 14-bit low part
1213 and a complementary high part. */
1214 ofs = INTVAL (XEXP (XEXP (src, 0), 1));
1215 lo = ((ofs & 0x3fff) ^ 0x2000) - 0x2000;
1218 ia64_expand_load_address (dest, plus_constant (sym, hi));
1219 emit_insn (gen_adddi3 (dest, dest, GEN_INT (lo)));
1225 tmp = gen_rtx_HIGH (Pmode, src);
1226 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
1227 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1229 tmp = gen_rtx_LO_SUM (GET_MODE (dest), dest, src);
1230 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
1234 static GTY(()) rtx gen_tls_tga;
1236 gen_tls_get_addr (void)
1239 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
1243 static GTY(()) rtx thread_pointer_rtx;
1245 gen_thread_pointer (void)
1247 if (!thread_pointer_rtx)
1249 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
1250 RTX_UNCHANGING_P (thread_pointer_rtx) = 1;
1252 return thread_pointer_rtx;
1256 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1)
1258 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
1263 case TLS_MODEL_GLOBAL_DYNAMIC:
1266 tga_op1 = gen_reg_rtx (Pmode);
1267 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
1268 tga_op1 = gen_rtx_MEM (Pmode, tga_op1);
1269 RTX_UNCHANGING_P (tga_op1) = 1;
1271 tga_op2 = gen_reg_rtx (Pmode);
1272 emit_insn (gen_load_ltoff_dtprel (tga_op2, op1));
1273 tga_op2 = gen_rtx_MEM (Pmode, tga_op2);
1274 RTX_UNCHANGING_P (tga_op2) = 1;
1276 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1277 LCT_CONST, Pmode, 2, tga_op1,
1278 Pmode, tga_op2, Pmode);
1280 insns = get_insns ();
1283 if (GET_MODE (op0) != Pmode)
1285 emit_libcall_block (insns, op0, tga_ret, op1);
1288 case TLS_MODEL_LOCAL_DYNAMIC:
1289 /* ??? This isn't the completely proper way to do local-dynamic
1290 If the call to __tls_get_addr is used only by a single symbol,
1291 then we should (somehow) move the dtprel to the second arg
1292 to avoid the extra add. */
1295 tga_op1 = gen_reg_rtx (Pmode);
1296 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
1297 tga_op1 = gen_rtx_MEM (Pmode, tga_op1);
1298 RTX_UNCHANGING_P (tga_op1) = 1;
1300 tga_op2 = const0_rtx;
1302 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
1303 LCT_CONST, Pmode, 2, tga_op1,
1304 Pmode, tga_op2, Pmode);
1306 insns = get_insns ();
1309 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1311 tmp = gen_reg_rtx (Pmode);
1312 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
1314 if (!register_operand (op0, Pmode))
1315 op0 = gen_reg_rtx (Pmode);
1318 emit_insn (gen_load_dtprel (op0, op1));
1319 emit_insn (gen_adddi3 (op0, tmp, op0));
1322 emit_insn (gen_add_dtprel (op0, tmp, op1));
1325 case TLS_MODEL_INITIAL_EXEC:
1326 tmp = gen_reg_rtx (Pmode);
1327 emit_insn (gen_load_ltoff_tprel (tmp, op1));
1328 tmp = gen_rtx_MEM (Pmode, tmp);
1329 RTX_UNCHANGING_P (tmp) = 1;
1330 tmp = force_reg (Pmode, tmp);
1332 if (!register_operand (op0, Pmode))
1333 op0 = gen_reg_rtx (Pmode);
1334 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
1337 case TLS_MODEL_LOCAL_EXEC:
1338 if (!register_operand (op0, Pmode))
1339 op0 = gen_reg_rtx (Pmode);
1342 emit_insn (gen_load_tprel (op0, op1));
1343 emit_insn (gen_adddi3 (op0, gen_thread_pointer (), op0));
1346 emit_insn (gen_add_tprel (op0, gen_thread_pointer (), op1));
1353 if (orig_op0 == op0)
1355 if (GET_MODE (orig_op0) == Pmode)
1357 return gen_lowpart (GET_MODE (orig_op0), op0);
1361 ia64_expand_move (rtx op0, rtx op1)
1363 enum machine_mode mode = GET_MODE (op0);
1365 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
1366 op1 = force_reg (mode, op1);
1368 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
1370 enum tls_model tls_kind;
1371 if ((tls_kind = tls_symbolic_operand (op1, VOIDmode)))
1372 return ia64_expand_tls_address (tls_kind, op0, op1);
1374 if (!TARGET_NO_PIC && reload_completed)
1376 ia64_expand_load_address (op0, op1);
1384 /* Split a move from OP1 to OP0 conditional on COND. */
1387 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1389 rtx insn, first = get_last_insn ();
1391 emit_move_insn (op0, op1);
1393 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1395 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1399 /* Split a post-reload TImode or TFmode reference into two DImode
1400 components. This is made extra difficult by the fact that we do
1401 not get any scratch registers to work with, because reload cannot
1402 be prevented from giving us a scratch that overlaps the register
1403 pair involved. So instead, when addressing memory, we tweak the
1404 pointer register up and back down with POST_INCs. Or up and not
1405 back down when we can get away with it.
1407 REVERSED is true when the loads must be done in reversed order
1408 (high word first) for correctness. DEAD is true when the pointer
1409 dies with the second insn we generate and therefore the second
1410 address must not carry a postmodify.
1412 May return an insn which is to be emitted after the moves. */
1415 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1419 switch (GET_CODE (in))
1422 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1423 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1428 /* Cannot occur reversed. */
1429 if (reversed) abort ();
1431 if (GET_MODE (in) != TFmode)
1432 split_double (in, &out[0], &out[1]);
1434 /* split_double does not understand how to split a TFmode
1435 quantity into a pair of DImode constants. */
1438 unsigned HOST_WIDE_INT p[2];
1439 long l[4]; /* TFmode is 128 bits */
1441 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1442 real_to_target (l, &r, TFmode);
1444 if (FLOAT_WORDS_BIG_ENDIAN)
1446 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1447 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1451 p[0] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1452 p[1] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1454 out[0] = GEN_INT (p[0]);
1455 out[1] = GEN_INT (p[1]);
1461 rtx base = XEXP (in, 0);
1464 switch (GET_CODE (base))
1469 out[0] = adjust_automodify_address
1470 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1471 out[1] = adjust_automodify_address
1472 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1476 /* Reversal requires a pre-increment, which can only
1477 be done as a separate insn. */
1478 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1479 out[0] = adjust_automodify_address
1480 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1481 out[1] = adjust_address (in, DImode, 0);
1486 if (reversed || dead) abort ();
1487 /* Just do the increment in two steps. */
1488 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1489 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1493 if (reversed || dead) abort ();
1494 /* Add 8, subtract 24. */
1495 base = XEXP (base, 0);
1496 out[0] = adjust_automodify_address
1497 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1498 out[1] = adjust_automodify_address
1500 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1505 if (reversed || dead) abort ();
1506 /* Extract and adjust the modification. This case is
1507 trickier than the others, because we might have an
1508 index register, or we might have a combined offset that
1509 doesn't fit a signed 9-bit displacement field. We can
1510 assume the incoming expression is already legitimate. */
1511 offset = XEXP (base, 1);
1512 base = XEXP (base, 0);
1514 out[0] = adjust_automodify_address
1515 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1517 if (GET_CODE (XEXP (offset, 1)) == REG)
1519 /* Can't adjust the postmodify to match. Emit the
1520 original, then a separate addition insn. */
1521 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1522 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1524 else if (GET_CODE (XEXP (offset, 1)) != CONST_INT)
1526 else if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1528 /* Again the postmodify cannot be made to match, but
1529 in this case it's more efficient to get rid of the
1530 postmodify entirely and fix up with an add insn. */
1531 out[1] = adjust_automodify_address (in, DImode, base, 8);
1532 fixup = gen_adddi3 (base, base,
1533 GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1537 /* Combined offset still fits in the displacement field.
1538 (We cannot overflow it at the high end.) */
1539 out[1] = adjust_automodify_address
1541 gen_rtx_POST_MODIFY (Pmode, base,
1542 gen_rtx_PLUS (Pmode, base,
1543 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1561 /* Split a TImode or TFmode move instruction after reload.
1562 This is used by *movtf_internal and *movti_internal. */
1564 ia64_split_tmode_move (rtx operands[])
1566 rtx in[2], out[2], insn;
1569 bool reversed = false;
1571 /* It is possible for reload to decide to overwrite a pointer with
1572 the value it points to. In that case we have to do the loads in
1573 the appropriate order so that the pointer is not destroyed too
1574 early. Also we must not generate a postmodify for that second
1575 load, or rws_access_regno will abort. */
1576 if (GET_CODE (operands[1]) == MEM
1577 && reg_overlap_mentioned_p (operands[0], operands[1]))
1579 rtx base = XEXP (operands[1], 0);
1580 while (GET_CODE (base) != REG)
1581 base = XEXP (base, 0);
1583 if (REGNO (base) == REGNO (operands[0]))
1587 /* Another reason to do the moves in reversed order is if the first
1588 element of the target register pair is also the second element of
1589 the source register pair. */
1590 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1591 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1594 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1595 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1597 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1598 if (GET_CODE (EXP) == MEM \
1599 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1600 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1601 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1602 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1603 XEXP (XEXP (EXP, 0), 0), \
1606 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1607 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1608 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1610 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1611 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1612 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1615 emit_insn (fixup[0]);
1617 emit_insn (fixup[1]);
1619 #undef MAYBE_ADD_REG_INC_NOTE
1622 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1623 through memory plus an extra GR scratch register. Except that you can
1624 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1625 SECONDARY_RELOAD_CLASS, but not both.
1627 We got into problems in the first place by allowing a construct like
1628 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1629 This solution attempts to prevent this situation from occurring. When
1630 we see something like the above, we spill the inner register to memory. */
1633 spill_xfmode_operand (rtx in, int force)
1635 if (GET_CODE (in) == SUBREG
1636 && GET_MODE (SUBREG_REG (in)) == TImode
1637 && GET_CODE (SUBREG_REG (in)) == REG)
1639 rtx memt = assign_stack_temp (TImode, 16, 0);
1640 emit_move_insn (memt, SUBREG_REG (in));
1641 return adjust_address (memt, XFmode, 0);
1643 else if (force && GET_CODE (in) == REG)
1645 rtx memx = assign_stack_temp (XFmode, 16, 0);
1646 emit_move_insn (memx, in);
1653 /* Emit comparison instruction if necessary, returning the expression
1654 that holds the compare result in the proper mode. */
1656 static GTY(()) rtx cmptf_libfunc;
1659 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1661 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1664 /* If we have a BImode input, then we already have a compare result, and
1665 do not need to emit another comparison. */
1666 if (GET_MODE (op0) == BImode)
1668 if ((code == NE || code == EQ) && op1 == const0_rtx)
1673 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1674 magic number as its third argument, that indicates what to do.
1675 The return value is an integer to be compared against zero. */
1676 else if (GET_MODE (op0) == TFmode)
1679 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1685 enum rtx_code ncode;
1687 if (!cmptf_libfunc || GET_MODE (op1) != TFmode)
1691 /* 1 = equal, 0 = not equal. Equality operators do
1692 not raise FP_INVALID when given an SNaN operand. */
1693 case EQ: magic = QCMP_EQ; ncode = NE; break;
1694 case NE: magic = QCMP_EQ; ncode = EQ; break;
1695 /* isunordered() from C99. */
1696 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1697 /* Relational operators raise FP_INVALID when given
1699 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1700 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1701 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1702 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1703 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1704 Expanders for buneq etc. weuld have to be added to ia64.md
1705 for this to be useful. */
1711 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1712 op0, TFmode, op1, TFmode,
1713 GEN_INT (magic), DImode);
1714 cmp = gen_reg_rtx (BImode);
1715 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1716 gen_rtx_fmt_ee (ncode, BImode,
1719 insns = get_insns ();
1722 emit_libcall_block (insns, cmp, cmp,
1723 gen_rtx_fmt_ee (code, BImode, op0, op1));
1728 cmp = gen_reg_rtx (BImode);
1729 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1730 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1734 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1737 /* Emit the appropriate sequence for a call. */
1740 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1745 addr = XEXP (addr, 0);
1746 addr = convert_memory_address (DImode, addr);
1747 b0 = gen_rtx_REG (DImode, R_BR (0));
1749 /* ??? Should do this for functions known to bind local too. */
1750 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1753 insn = gen_sibcall_nogp (addr);
1755 insn = gen_call_nogp (addr, b0);
1757 insn = gen_call_value_nogp (retval, addr, b0);
1758 insn = emit_call_insn (insn);
1763 insn = gen_sibcall_gp (addr);
1765 insn = gen_call_gp (addr, b0);
1767 insn = gen_call_value_gp (retval, addr, b0);
1768 insn = emit_call_insn (insn);
1770 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1774 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1778 ia64_reload_gp (void)
1782 if (current_frame_info.reg_save_gp)
1783 tmp = gen_rtx_REG (DImode, current_frame_info.reg_save_gp);
1786 HOST_WIDE_INT offset;
1788 offset = (current_frame_info.spill_cfa_off
1789 + current_frame_info.spill_size);
1790 if (frame_pointer_needed)
1792 tmp = hard_frame_pointer_rtx;
1797 tmp = stack_pointer_rtx;
1798 offset = current_frame_info.total_size - offset;
1801 if (CONST_OK_FOR_I (offset))
1802 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1803 tmp, GEN_INT (offset)));
1806 emit_move_insn (pic_offset_table_rtx, GEN_INT (offset));
1807 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1808 pic_offset_table_rtx, tmp));
1811 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1814 emit_move_insn (pic_offset_table_rtx, tmp);
1818 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1819 rtx scratch_b, int noreturn_p, int sibcall_p)
1822 bool is_desc = false;
1824 /* If we find we're calling through a register, then we're actually
1825 calling through a descriptor, so load up the values. */
1826 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1831 /* ??? We are currently constrained to *not* use peep2, because
1832 we can legitimately change the global lifetime of the GP
1833 (in the form of killing where previously live). This is
1834 because a call through a descriptor doesn't use the previous
1835 value of the GP, while a direct call does, and we do not
1836 commit to either form until the split here.
1838 That said, this means that we lack precise life info for
1839 whether ADDR is dead after this call. This is not terribly
1840 important, since we can fix things up essentially for free
1841 with the POST_DEC below, but it's nice to not use it when we
1842 can immediately tell it's not necessary. */
1843 addr_dead_p = ((noreturn_p || sibcall_p
1844 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1846 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1848 /* Load the code address into scratch_b. */
1849 tmp = gen_rtx_POST_INC (Pmode, addr);
1850 tmp = gen_rtx_MEM (Pmode, tmp);
1851 emit_move_insn (scratch_r, tmp);
1852 emit_move_insn (scratch_b, scratch_r);
1854 /* Load the GP address. If ADDR is not dead here, then we must
1855 revert the change made above via the POST_INCREMENT. */
1857 tmp = gen_rtx_POST_DEC (Pmode, addr);
1860 tmp = gen_rtx_MEM (Pmode, tmp);
1861 emit_move_insn (pic_offset_table_rtx, tmp);
1868 insn = gen_sibcall_nogp (addr);
1870 insn = gen_call_value_nogp (retval, addr, retaddr);
1872 insn = gen_call_nogp (addr, retaddr);
1873 emit_call_insn (insn);
1875 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
1879 /* Begin the assembly file. */
1882 ia64_file_start (void)
1884 default_file_start ();
1885 emit_safe_across_calls ();
1889 emit_safe_across_calls (void)
1891 unsigned int rs, re;
1898 while (rs < 64 && call_used_regs[PR_REG (rs)])
1902 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
1906 fputs ("\t.pred.safe_across_calls ", asm_out_file);
1910 fputc (',', asm_out_file);
1912 fprintf (asm_out_file, "p%u", rs);
1914 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
1918 fputc ('\n', asm_out_file);
1921 /* Helper function for ia64_compute_frame_size: find an appropriate general
1922 register to spill some special register to. SPECIAL_SPILL_MASK contains
1923 bits in GR0 to GR31 that have already been allocated by this routine.
1924 TRY_LOCALS is true if we should attempt to locate a local regnum. */
1927 find_gr_spill (int try_locals)
1931 /* If this is a leaf function, first try an otherwise unused
1932 call-clobbered register. */
1933 if (current_function_is_leaf)
1935 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1936 if (! regs_ever_live[regno]
1937 && call_used_regs[regno]
1938 && ! fixed_regs[regno]
1939 && ! global_regs[regno]
1940 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1942 current_frame_info.gr_used_mask |= 1 << regno;
1949 regno = current_frame_info.n_local_regs;
1950 /* If there is a frame pointer, then we can't use loc79, because
1951 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
1952 reg_name switching code in ia64_expand_prologue. */
1953 if (regno < (80 - frame_pointer_needed))
1955 current_frame_info.n_local_regs = regno + 1;
1956 return LOC_REG (0) + regno;
1960 /* Failed to find a general register to spill to. Must use stack. */
1964 /* In order to make for nice schedules, we try to allocate every temporary
1965 to a different register. We must of course stay away from call-saved,
1966 fixed, and global registers. We must also stay away from registers
1967 allocated in current_frame_info.gr_used_mask, since those include regs
1968 used all through the prologue.
1970 Any register allocated here must be used immediately. The idea is to
1971 aid scheduling, not to solve data flow problems. */
1973 static int last_scratch_gr_reg;
1976 next_scratch_gr_reg (void)
1980 for (i = 0; i < 32; ++i)
1982 regno = (last_scratch_gr_reg + i + 1) & 31;
1983 if (call_used_regs[regno]
1984 && ! fixed_regs[regno]
1985 && ! global_regs[regno]
1986 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1988 last_scratch_gr_reg = regno;
1993 /* There must be _something_ available. */
1997 /* Helper function for ia64_compute_frame_size, called through
1998 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2001 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2003 unsigned int regno = REGNO (reg);
2006 unsigned int i, n = HARD_REGNO_NREGS (regno, GET_MODE (reg));
2007 for (i = 0; i < n; ++i)
2008 current_frame_info.gr_used_mask |= 1 << (regno + i);
2012 /* Returns the number of bytes offset between the frame pointer and the stack
2013 pointer for the current function. SIZE is the number of bytes of space
2014 needed for local variables. */
2017 ia64_compute_frame_size (HOST_WIDE_INT size)
2019 HOST_WIDE_INT total_size;
2020 HOST_WIDE_INT spill_size = 0;
2021 HOST_WIDE_INT extra_spill_size = 0;
2022 HOST_WIDE_INT pretend_args_size;
2025 int spilled_gr_p = 0;
2026 int spilled_fr_p = 0;
2030 if (current_frame_info.initialized)
2033 memset (¤t_frame_info, 0, sizeof current_frame_info);
2034 CLEAR_HARD_REG_SET (mask);
2036 /* Don't allocate scratches to the return register. */
2037 diddle_return_value (mark_reg_gr_used_mask, NULL);
2039 /* Don't allocate scratches to the EH scratch registers. */
2040 if (cfun->machine->ia64_eh_epilogue_sp)
2041 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2042 if (cfun->machine->ia64_eh_epilogue_bsp)
2043 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2045 /* Find the size of the register stack frame. We have only 80 local
2046 registers, because we reserve 8 for the inputs and 8 for the
2049 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2050 since we'll be adjusting that down later. */
2051 regno = LOC_REG (78) + ! frame_pointer_needed;
2052 for (; regno >= LOC_REG (0); regno--)
2053 if (regs_ever_live[regno])
2055 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2057 /* For functions marked with the syscall_linkage attribute, we must mark
2058 all eight input registers as in use, so that locals aren't visible to
2061 if (cfun->machine->n_varargs > 0
2062 || lookup_attribute ("syscall_linkage",
2063 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2064 current_frame_info.n_input_regs = 8;
2067 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2068 if (regs_ever_live[regno])
2070 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2073 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2074 if (regs_ever_live[regno])
2076 i = regno - OUT_REG (0) + 1;
2078 /* When -p profiling, we need one output register for the mcount argument.
2079 Likewise for -a profiling for the bb_init_func argument. For -ax
2080 profiling, we need two output registers for the two bb_init_trace_func
2082 if (current_function_profile)
2084 current_frame_info.n_output_regs = i;
2086 /* ??? No rotating register support yet. */
2087 current_frame_info.n_rotate_regs = 0;
2089 /* Discover which registers need spilling, and how much room that
2090 will take. Begin with floating point and general registers,
2091 which will always wind up on the stack. */
2093 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2094 if (regs_ever_live[regno] && ! call_used_regs[regno])
2096 SET_HARD_REG_BIT (mask, regno);
2102 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2103 if (regs_ever_live[regno] && ! call_used_regs[regno])
2105 SET_HARD_REG_BIT (mask, regno);
2111 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2112 if (regs_ever_live[regno] && ! call_used_regs[regno])
2114 SET_HARD_REG_BIT (mask, regno);
2119 /* Now come all special registers that might get saved in other
2120 general registers. */
2122 if (frame_pointer_needed)
2124 current_frame_info.reg_fp = find_gr_spill (1);
2125 /* If we did not get a register, then we take LOC79. This is guaranteed
2126 to be free, even if regs_ever_live is already set, because this is
2127 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2128 as we don't count loc79 above. */
2129 if (current_frame_info.reg_fp == 0)
2131 current_frame_info.reg_fp = LOC_REG (79);
2132 current_frame_info.n_local_regs++;
2136 if (! current_function_is_leaf)
2138 /* Emit a save of BR0 if we call other functions. Do this even
2139 if this function doesn't return, as EH depends on this to be
2140 able to unwind the stack. */
2141 SET_HARD_REG_BIT (mask, BR_REG (0));
2143 current_frame_info.reg_save_b0 = find_gr_spill (1);
2144 if (current_frame_info.reg_save_b0 == 0)
2150 /* Similarly for ar.pfs. */
2151 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2152 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
2153 if (current_frame_info.reg_save_ar_pfs == 0)
2155 extra_spill_size += 8;
2159 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2160 registers are clobbered, so we fall back to the stack. */
2161 current_frame_info.reg_save_gp
2162 = (current_function_calls_setjmp ? 0 : find_gr_spill (1));
2163 if (current_frame_info.reg_save_gp == 0)
2165 SET_HARD_REG_BIT (mask, GR_REG (1));
2172 if (regs_ever_live[BR_REG (0)] && ! call_used_regs[BR_REG (0)])
2174 SET_HARD_REG_BIT (mask, BR_REG (0));
2179 if (regs_ever_live[AR_PFS_REGNUM])
2181 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2182 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
2183 if (current_frame_info.reg_save_ar_pfs == 0)
2185 extra_spill_size += 8;
2191 /* Unwind descriptor hackery: things are most efficient if we allocate
2192 consecutive GR save registers for RP, PFS, FP in that order. However,
2193 it is absolutely critical that FP get the only hard register that's
2194 guaranteed to be free, so we allocated it first. If all three did
2195 happen to be allocated hard regs, and are consecutive, rearrange them
2196 into the preferred order now. */
2197 if (current_frame_info.reg_fp != 0
2198 && current_frame_info.reg_save_b0 == current_frame_info.reg_fp + 1
2199 && current_frame_info.reg_save_ar_pfs == current_frame_info.reg_fp + 2)
2201 current_frame_info.reg_save_b0 = current_frame_info.reg_fp;
2202 current_frame_info.reg_save_ar_pfs = current_frame_info.reg_fp + 1;
2203 current_frame_info.reg_fp = current_frame_info.reg_fp + 2;
2206 /* See if we need to store the predicate register block. */
2207 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2208 if (regs_ever_live[regno] && ! call_used_regs[regno])
2210 if (regno <= PR_REG (63))
2212 SET_HARD_REG_BIT (mask, PR_REG (0));
2213 current_frame_info.reg_save_pr = find_gr_spill (1);
2214 if (current_frame_info.reg_save_pr == 0)
2216 extra_spill_size += 8;
2220 /* ??? Mark them all as used so that register renaming and such
2221 are free to use them. */
2222 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2223 regs_ever_live[regno] = 1;
2226 /* If we're forced to use st8.spill, we're forced to save and restore
2227 ar.unat as well. The check for existing liveness allows inline asm
2228 to touch ar.unat. */
2229 if (spilled_gr_p || cfun->machine->n_varargs
2230 || regs_ever_live[AR_UNAT_REGNUM])
2232 regs_ever_live[AR_UNAT_REGNUM] = 1;
2233 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2234 current_frame_info.reg_save_ar_unat = find_gr_spill (spill_size == 0);
2235 if (current_frame_info.reg_save_ar_unat == 0)
2237 extra_spill_size += 8;
2242 if (regs_ever_live[AR_LC_REGNUM])
2244 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2245 current_frame_info.reg_save_ar_lc = find_gr_spill (spill_size == 0);
2246 if (current_frame_info.reg_save_ar_lc == 0)
2248 extra_spill_size += 8;
2253 /* If we have an odd number of words of pretend arguments written to
2254 the stack, then the FR save area will be unaligned. We round the
2255 size of this area up to keep things 16 byte aligned. */
2257 pretend_args_size = IA64_STACK_ALIGN (current_function_pretend_args_size);
2259 pretend_args_size = current_function_pretend_args_size;
2261 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2262 + current_function_outgoing_args_size);
2263 total_size = IA64_STACK_ALIGN (total_size);
2265 /* We always use the 16-byte scratch area provided by the caller, but
2266 if we are a leaf function, there's no one to which we need to provide
2268 if (current_function_is_leaf)
2269 total_size = MAX (0, total_size - 16);
2271 current_frame_info.total_size = total_size;
2272 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2273 current_frame_info.spill_size = spill_size;
2274 current_frame_info.extra_spill_size = extra_spill_size;
2275 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2276 current_frame_info.n_spilled = n_spilled;
2277 current_frame_info.initialized = reload_completed;
2280 /* Compute the initial difference between the specified pair of registers. */
2283 ia64_initial_elimination_offset (int from, int to)
2285 HOST_WIDE_INT offset;
2287 ia64_compute_frame_size (get_frame_size ());
2290 case FRAME_POINTER_REGNUM:
2291 if (to == HARD_FRAME_POINTER_REGNUM)
2293 if (current_function_is_leaf)
2294 offset = -current_frame_info.total_size;
2296 offset = -(current_frame_info.total_size
2297 - current_function_outgoing_args_size - 16);
2299 else if (to == STACK_POINTER_REGNUM)
2301 if (current_function_is_leaf)
2304 offset = 16 + current_function_outgoing_args_size;
2310 case ARG_POINTER_REGNUM:
2311 /* Arguments start above the 16 byte save area, unless stdarg
2312 in which case we store through the 16 byte save area. */
2313 if (to == HARD_FRAME_POINTER_REGNUM)
2314 offset = 16 - current_function_pretend_args_size;
2315 else if (to == STACK_POINTER_REGNUM)
2316 offset = (current_frame_info.total_size
2317 + 16 - current_function_pretend_args_size);
2329 /* If there are more than a trivial number of register spills, we use
2330 two interleaved iterators so that we can get two memory references
2333 In order to simplify things in the prologue and epilogue expanders,
2334 we use helper functions to fix up the memory references after the
2335 fact with the appropriate offsets to a POST_MODIFY memory mode.
2336 The following data structure tracks the state of the two iterators
2337 while insns are being emitted. */
2339 struct spill_fill_data
2341 rtx init_after; /* point at which to emit initializations */
2342 rtx init_reg[2]; /* initial base register */
2343 rtx iter_reg[2]; /* the iterator registers */
2344 rtx *prev_addr[2]; /* address of last memory use */
2345 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2346 HOST_WIDE_INT prev_off[2]; /* last offset */
2347 int n_iter; /* number of iterators in use */
2348 int next_iter; /* next iterator to use */
2349 unsigned int save_gr_used_mask;
2352 static struct spill_fill_data spill_fill_data;
2355 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2359 spill_fill_data.init_after = get_last_insn ();
2360 spill_fill_data.init_reg[0] = init_reg;
2361 spill_fill_data.init_reg[1] = init_reg;
2362 spill_fill_data.prev_addr[0] = NULL;
2363 spill_fill_data.prev_addr[1] = NULL;
2364 spill_fill_data.prev_insn[0] = NULL;
2365 spill_fill_data.prev_insn[1] = NULL;
2366 spill_fill_data.prev_off[0] = cfa_off;
2367 spill_fill_data.prev_off[1] = cfa_off;
2368 spill_fill_data.next_iter = 0;
2369 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2371 spill_fill_data.n_iter = 1 + (n_spills > 2);
2372 for (i = 0; i < spill_fill_data.n_iter; ++i)
2374 int regno = next_scratch_gr_reg ();
2375 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2376 current_frame_info.gr_used_mask |= 1 << regno;
2381 finish_spill_pointers (void)
2383 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
2387 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
2389 int iter = spill_fill_data.next_iter;
2390 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
2391 rtx disp_rtx = GEN_INT (disp);
2394 if (spill_fill_data.prev_addr[iter])
2396 if (CONST_OK_FOR_N (disp))
2398 *spill_fill_data.prev_addr[iter]
2399 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2400 gen_rtx_PLUS (DImode,
2401 spill_fill_data.iter_reg[iter],
2403 REG_NOTES (spill_fill_data.prev_insn[iter])
2404 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
2405 REG_NOTES (spill_fill_data.prev_insn[iter]));
2409 /* ??? Could use register post_modify for loads. */
2410 if (! CONST_OK_FOR_I (disp))
2412 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2413 emit_move_insn (tmp, disp_rtx);
2416 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2417 spill_fill_data.iter_reg[iter], disp_rtx));
2420 /* Micro-optimization: if we've created a frame pointer, it's at
2421 CFA 0, which may allow the real iterator to be initialized lower,
2422 slightly increasing parallelism. Also, if there are few saves
2423 it may eliminate the iterator entirely. */
2425 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
2426 && frame_pointer_needed)
2428 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
2429 set_mem_alias_set (mem, get_varargs_alias_set ());
2437 seq = gen_movdi (spill_fill_data.iter_reg[iter],
2438 spill_fill_data.init_reg[iter]);
2443 if (! CONST_OK_FOR_I (disp))
2445 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2446 emit_move_insn (tmp, disp_rtx);
2450 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2451 spill_fill_data.init_reg[iter],
2458 /* Careful for being the first insn in a sequence. */
2459 if (spill_fill_data.init_after)
2460 insn = emit_insn_after (seq, spill_fill_data.init_after);
2463 rtx first = get_insns ();
2465 insn = emit_insn_before (seq, first);
2467 insn = emit_insn (seq);
2469 spill_fill_data.init_after = insn;
2471 /* If DISP is 0, we may or may not have a further adjustment
2472 afterward. If we do, then the load/store insn may be modified
2473 to be a post-modify. If we don't, then this copy may be
2474 eliminated by copyprop_hardreg_forward, which makes this
2475 insn garbage, which runs afoul of the sanity check in
2476 propagate_one_insn. So mark this insn as legal to delete. */
2478 REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx,
2482 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
2484 /* ??? Not all of the spills are for varargs, but some of them are.
2485 The rest of the spills belong in an alias set of their own. But
2486 it doesn't actually hurt to include them here. */
2487 set_mem_alias_set (mem, get_varargs_alias_set ());
2489 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
2490 spill_fill_data.prev_off[iter] = cfa_off;
2492 if (++iter >= spill_fill_data.n_iter)
2494 spill_fill_data.next_iter = iter;
2500 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
2503 int iter = spill_fill_data.next_iter;
2506 mem = spill_restore_mem (reg, cfa_off);
2507 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
2508 spill_fill_data.prev_insn[iter] = insn;
2515 RTX_FRAME_RELATED_P (insn) = 1;
2517 /* Don't even pretend that the unwind code can intuit its way
2518 through a pair of interleaved post_modify iterators. Just
2519 provide the correct answer. */
2521 if (frame_pointer_needed)
2523 base = hard_frame_pointer_rtx;
2528 base = stack_pointer_rtx;
2529 off = current_frame_info.total_size - cfa_off;
2533 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2534 gen_rtx_SET (VOIDmode,
2535 gen_rtx_MEM (GET_MODE (reg),
2536 plus_constant (base, off)),
2543 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
2545 int iter = spill_fill_data.next_iter;
2548 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
2549 GEN_INT (cfa_off)));
2550 spill_fill_data.prev_insn[iter] = insn;
2553 /* Wrapper functions that discards the CONST_INT spill offset. These
2554 exist so that we can give gr_spill/gr_fill the offset they need and
2555 use a consistent function interface. */
2558 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2560 return gen_movdi (dest, src);
2564 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2566 return gen_fr_spill (dest, src);
2570 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2572 return gen_fr_restore (dest, src);
2575 /* Called after register allocation to add any instructions needed for the
2576 prologue. Using a prologue insn is favored compared to putting all of the
2577 instructions in output_function_prologue(), since it allows the scheduler
2578 to intermix instructions with the saves of the caller saved registers. In
2579 some cases, it might be necessary to emit a barrier instruction as the last
2580 insn to prevent such scheduling.
2582 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2583 so that the debug info generation code can handle them properly.
2585 The register save area is layed out like so:
2587 [ varargs spill area ]
2588 [ fr register spill area ]
2589 [ br register spill area ]
2590 [ ar register spill area ]
2591 [ pr register spill area ]
2592 [ gr register spill area ] */
2594 /* ??? Get inefficient code when the frame size is larger than can fit in an
2595 adds instruction. */
2598 ia64_expand_prologue (void)
2600 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2601 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2604 ia64_compute_frame_size (get_frame_size ());
2605 last_scratch_gr_reg = 15;
2607 /* If there is no epilogue, then we don't need some prologue insns.
2608 We need to avoid emitting the dead prologue insns, because flow
2609 will complain about them. */
2614 for (e = EXIT_BLOCK_PTR->pred; e ; e = e->pred_next)
2615 if ((e->flags & EDGE_FAKE) == 0
2616 && (e->flags & EDGE_FALLTHRU) != 0)
2618 epilogue_p = (e != NULL);
2623 /* Set the local, input, and output register names. We need to do this
2624 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2625 half. If we use in/loc/out register names, then we get assembler errors
2626 in crtn.S because there is no alloc insn or regstk directive in there. */
2627 if (! TARGET_REG_NAMES)
2629 int inputs = current_frame_info.n_input_regs;
2630 int locals = current_frame_info.n_local_regs;
2631 int outputs = current_frame_info.n_output_regs;
2633 for (i = 0; i < inputs; i++)
2634 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2635 for (i = 0; i < locals; i++)
2636 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2637 for (i = 0; i < outputs; i++)
2638 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2641 /* Set the frame pointer register name. The regnum is logically loc79,
2642 but of course we'll not have allocated that many locals. Rather than
2643 worrying about renumbering the existing rtxs, we adjust the name. */
2644 /* ??? This code means that we can never use one local register when
2645 there is a frame pointer. loc79 gets wasted in this case, as it is
2646 renamed to a register that will never be used. See also the try_locals
2647 code in find_gr_spill. */
2648 if (current_frame_info.reg_fp)
2650 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2651 reg_names[HARD_FRAME_POINTER_REGNUM]
2652 = reg_names[current_frame_info.reg_fp];
2653 reg_names[current_frame_info.reg_fp] = tmp;
2656 /* We don't need an alloc instruction if we've used no outputs or locals. */
2657 if (current_frame_info.n_local_regs == 0
2658 && current_frame_info.n_output_regs == 0
2659 && current_frame_info.n_input_regs <= current_function_args_info.int_regs
2660 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2662 /* If there is no alloc, but there are input registers used, then we
2663 need a .regstk directive. */
2664 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
2665 ar_pfs_save_reg = NULL_RTX;
2669 current_frame_info.need_regstk = 0;
2671 if (current_frame_info.reg_save_ar_pfs)
2672 regno = current_frame_info.reg_save_ar_pfs;
2674 regno = next_scratch_gr_reg ();
2675 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
2677 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
2678 GEN_INT (current_frame_info.n_input_regs),
2679 GEN_INT (current_frame_info.n_local_regs),
2680 GEN_INT (current_frame_info.n_output_regs),
2681 GEN_INT (current_frame_info.n_rotate_regs)));
2682 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_pfs != 0);
2685 /* Set up frame pointer, stack pointer, and spill iterators. */
2687 n_varargs = cfun->machine->n_varargs;
2688 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
2689 stack_pointer_rtx, 0);
2691 if (frame_pointer_needed)
2693 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
2694 RTX_FRAME_RELATED_P (insn) = 1;
2697 if (current_frame_info.total_size != 0)
2699 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
2702 if (CONST_OK_FOR_I (- current_frame_info.total_size))
2703 offset = frame_size_rtx;
2706 regno = next_scratch_gr_reg ();
2707 offset = gen_rtx_REG (DImode, regno);
2708 emit_move_insn (offset, frame_size_rtx);
2711 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
2712 stack_pointer_rtx, offset));
2714 if (! frame_pointer_needed)
2716 RTX_FRAME_RELATED_P (insn) = 1;
2717 if (GET_CODE (offset) != CONST_INT)
2720 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2721 gen_rtx_SET (VOIDmode,
2723 gen_rtx_PLUS (DImode,
2730 /* ??? At this point we must generate a magic insn that appears to
2731 modify the stack pointer, the frame pointer, and all spill
2732 iterators. This would allow the most scheduling freedom. For
2733 now, just hard stop. */
2734 emit_insn (gen_blockage ());
2737 /* Must copy out ar.unat before doing any integer spills. */
2738 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2740 if (current_frame_info.reg_save_ar_unat)
2742 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2745 alt_regno = next_scratch_gr_reg ();
2746 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2747 current_frame_info.gr_used_mask |= 1 << alt_regno;
2750 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2751 insn = emit_move_insn (ar_unat_save_reg, reg);
2752 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_unat != 0);
2754 /* Even if we're not going to generate an epilogue, we still
2755 need to save the register so that EH works. */
2756 if (! epilogue_p && current_frame_info.reg_save_ar_unat)
2757 emit_insn (gen_prologue_use (ar_unat_save_reg));
2760 ar_unat_save_reg = NULL_RTX;
2762 /* Spill all varargs registers. Do this before spilling any GR registers,
2763 since we want the UNAT bits for the GR registers to override the UNAT
2764 bits from varargs, which we don't care about. */
2767 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
2769 reg = gen_rtx_REG (DImode, regno);
2770 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
2773 /* Locate the bottom of the register save area. */
2774 cfa_off = (current_frame_info.spill_cfa_off
2775 + current_frame_info.spill_size
2776 + current_frame_info.extra_spill_size);
2778 /* Save the predicate register block either in a register or in memory. */
2779 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2781 reg = gen_rtx_REG (DImode, PR_REG (0));
2782 if (current_frame_info.reg_save_pr != 0)
2784 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2785 insn = emit_move_insn (alt_reg, reg);
2787 /* ??? Denote pr spill/fill by a DImode move that modifies all
2788 64 hard registers. */
2789 RTX_FRAME_RELATED_P (insn) = 1;
2791 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2792 gen_rtx_SET (VOIDmode, alt_reg, reg),
2795 /* Even if we're not going to generate an epilogue, we still
2796 need to save the register so that EH works. */
2798 emit_insn (gen_prologue_use (alt_reg));
2802 alt_regno = next_scratch_gr_reg ();
2803 alt_reg = gen_rtx_REG (DImode, alt_regno);
2804 insn = emit_move_insn (alt_reg, reg);
2805 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2810 /* Handle AR regs in numerical order. All of them get special handling. */
2811 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
2812 && current_frame_info.reg_save_ar_unat == 0)
2814 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2815 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
2819 /* The alloc insn already copied ar.pfs into a general register. The
2820 only thing we have to do now is copy that register to a stack slot
2821 if we'd not allocated a local register for the job. */
2822 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
2823 && current_frame_info.reg_save_ar_pfs == 0)
2825 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2826 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
2830 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2832 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2833 if (current_frame_info.reg_save_ar_lc != 0)
2835 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2836 insn = emit_move_insn (alt_reg, reg);
2837 RTX_FRAME_RELATED_P (insn) = 1;
2839 /* Even if we're not going to generate an epilogue, we still
2840 need to save the register so that EH works. */
2842 emit_insn (gen_prologue_use (alt_reg));
2846 alt_regno = next_scratch_gr_reg ();
2847 alt_reg = gen_rtx_REG (DImode, alt_regno);
2848 emit_move_insn (alt_reg, reg);
2849 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2854 if (current_frame_info.reg_save_gp)
2856 insn = emit_move_insn (gen_rtx_REG (DImode,
2857 current_frame_info.reg_save_gp),
2858 pic_offset_table_rtx);
2859 /* We don't know for sure yet if this is actually needed, since
2860 we've not split the PIC call patterns. If all of the calls
2861 are indirect, and not followed by any uses of the gp, then
2862 this save is dead. Allow it to go away. */
2864 = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, REG_NOTES (insn));
2867 /* We should now be at the base of the gr/br/fr spill area. */
2868 if (cfa_off != (current_frame_info.spill_cfa_off
2869 + current_frame_info.spill_size))
2872 /* Spill all general registers. */
2873 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
2874 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2876 reg = gen_rtx_REG (DImode, regno);
2877 do_spill (gen_gr_spill, reg, cfa_off, reg);
2881 /* Handle BR0 specially -- it may be getting stored permanently in
2882 some GR register. */
2883 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2885 reg = gen_rtx_REG (DImode, BR_REG (0));
2886 if (current_frame_info.reg_save_b0 != 0)
2888 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2889 insn = emit_move_insn (alt_reg, reg);
2890 RTX_FRAME_RELATED_P (insn) = 1;
2892 /* Even if we're not going to generate an epilogue, we still
2893 need to save the register so that EH works. */
2895 emit_insn (gen_prologue_use (alt_reg));
2899 alt_regno = next_scratch_gr_reg ();
2900 alt_reg = gen_rtx_REG (DImode, alt_regno);
2901 emit_move_insn (alt_reg, reg);
2902 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2907 /* Spill the rest of the BR registers. */
2908 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2909 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2911 alt_regno = next_scratch_gr_reg ();
2912 alt_reg = gen_rtx_REG (DImode, alt_regno);
2913 reg = gen_rtx_REG (DImode, regno);
2914 emit_move_insn (alt_reg, reg);
2915 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2919 /* Align the frame and spill all FR registers. */
2920 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2921 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2925 reg = gen_rtx_REG (XFmode, regno);
2926 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
2930 if (cfa_off != current_frame_info.spill_cfa_off)
2933 finish_spill_pointers ();
2936 /* Called after register allocation to add any instructions needed for the
2937 epilogue. Using an epilogue insn is favored compared to putting all of the
2938 instructions in output_function_prologue(), since it allows the scheduler
2939 to intermix instructions with the saves of the caller saved registers. In
2940 some cases, it might be necessary to emit a barrier instruction as the last
2941 insn to prevent such scheduling. */
2944 ia64_expand_epilogue (int sibcall_p)
2946 rtx insn, reg, alt_reg, ar_unat_save_reg;
2947 int regno, alt_regno, cfa_off;
2949 ia64_compute_frame_size (get_frame_size ());
2951 /* If there is a frame pointer, then we use it instead of the stack
2952 pointer, so that the stack pointer does not need to be valid when
2953 the epilogue starts. See EXIT_IGNORE_STACK. */
2954 if (frame_pointer_needed)
2955 setup_spill_pointers (current_frame_info.n_spilled,
2956 hard_frame_pointer_rtx, 0);
2958 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
2959 current_frame_info.total_size);
2961 if (current_frame_info.total_size != 0)
2963 /* ??? At this point we must generate a magic insn that appears to
2964 modify the spill iterators and the frame pointer. This would
2965 allow the most scheduling freedom. For now, just hard stop. */
2966 emit_insn (gen_blockage ());
2969 /* Locate the bottom of the register save area. */
2970 cfa_off = (current_frame_info.spill_cfa_off
2971 + current_frame_info.spill_size
2972 + current_frame_info.extra_spill_size);
2974 /* Restore the predicate registers. */
2975 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2977 if (current_frame_info.reg_save_pr != 0)
2978 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2981 alt_regno = next_scratch_gr_reg ();
2982 alt_reg = gen_rtx_REG (DImode, alt_regno);
2983 do_restore (gen_movdi_x, alt_reg, cfa_off);
2986 reg = gen_rtx_REG (DImode, PR_REG (0));
2987 emit_move_insn (reg, alt_reg);
2990 /* Restore the application registers. */
2992 /* Load the saved unat from the stack, but do not restore it until
2993 after the GRs have been restored. */
2994 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2996 if (current_frame_info.reg_save_ar_unat != 0)
2998 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
3001 alt_regno = next_scratch_gr_reg ();
3002 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3003 current_frame_info.gr_used_mask |= 1 << alt_regno;
3004 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3009 ar_unat_save_reg = NULL_RTX;
3011 if (current_frame_info.reg_save_ar_pfs != 0)
3013 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_pfs);
3014 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3015 emit_move_insn (reg, alt_reg);
3017 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3019 alt_regno = next_scratch_gr_reg ();
3020 alt_reg = gen_rtx_REG (DImode, alt_regno);
3021 do_restore (gen_movdi_x, alt_reg, cfa_off);
3023 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3024 emit_move_insn (reg, alt_reg);
3027 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3029 if (current_frame_info.reg_save_ar_lc != 0)
3030 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
3033 alt_regno = next_scratch_gr_reg ();
3034 alt_reg = gen_rtx_REG (DImode, alt_regno);
3035 do_restore (gen_movdi_x, alt_reg, cfa_off);
3038 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3039 emit_move_insn (reg, alt_reg);
3042 /* We should now be at the base of the gr/br/fr spill area. */
3043 if (cfa_off != (current_frame_info.spill_cfa_off
3044 + current_frame_info.spill_size))
3047 /* The GP may be stored on the stack in the prologue, but it's
3048 never restored in the epilogue. Skip the stack slot. */
3049 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3052 /* Restore all general registers. */
3053 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3054 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3056 reg = gen_rtx_REG (DImode, regno);
3057 do_restore (gen_gr_restore, reg, cfa_off);
3061 /* Restore the branch registers. Handle B0 specially, as it may
3062 have gotten stored in some GR register. */
3063 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3065 if (current_frame_info.reg_save_b0 != 0)
3066 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3069 alt_regno = next_scratch_gr_reg ();
3070 alt_reg = gen_rtx_REG (DImode, alt_regno);
3071 do_restore (gen_movdi_x, alt_reg, cfa_off);
3074 reg = gen_rtx_REG (DImode, BR_REG (0));
3075 emit_move_insn (reg, alt_reg);
3078 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3079 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3081 alt_regno = next_scratch_gr_reg ();
3082 alt_reg = gen_rtx_REG (DImode, alt_regno);
3083 do_restore (gen_movdi_x, alt_reg, cfa_off);
3085 reg = gen_rtx_REG (DImode, regno);
3086 emit_move_insn (reg, alt_reg);
3089 /* Restore floating point registers. */
3090 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3091 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3095 reg = gen_rtx_REG (XFmode, regno);
3096 do_restore (gen_fr_restore_x, reg, cfa_off);
3100 /* Restore ar.unat for real. */
3101 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3103 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3104 emit_move_insn (reg, ar_unat_save_reg);
3107 if (cfa_off != current_frame_info.spill_cfa_off)
3110 finish_spill_pointers ();
3112 if (current_frame_info.total_size || cfun->machine->ia64_eh_epilogue_sp)
3114 /* ??? At this point we must generate a magic insn that appears to
3115 modify the spill iterators, the stack pointer, and the frame
3116 pointer. This would allow the most scheduling freedom. For now,
3118 emit_insn (gen_blockage ());
3121 if (cfun->machine->ia64_eh_epilogue_sp)
3122 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
3123 else if (frame_pointer_needed)
3125 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
3126 RTX_FRAME_RELATED_P (insn) = 1;
3128 else if (current_frame_info.total_size)
3130 rtx offset, frame_size_rtx;
3132 frame_size_rtx = GEN_INT (current_frame_info.total_size);
3133 if (CONST_OK_FOR_I (current_frame_info.total_size))
3134 offset = frame_size_rtx;
3137 regno = next_scratch_gr_reg ();
3138 offset = gen_rtx_REG (DImode, regno);
3139 emit_move_insn (offset, frame_size_rtx);
3142 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3145 RTX_FRAME_RELATED_P (insn) = 1;
3146 if (GET_CODE (offset) != CONST_INT)
3149 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3150 gen_rtx_SET (VOIDmode,
3152 gen_rtx_PLUS (DImode,
3159 if (cfun->machine->ia64_eh_epilogue_bsp)
3160 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3163 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
3166 int fp = GR_REG (2);
3167 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
3168 first available call clobbered register. If there was a frame_pointer
3169 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
3170 so we have to make sure we're using the string "r2" when emitting
3171 the register name for the assembler. */
3172 if (current_frame_info.reg_fp && current_frame_info.reg_fp == GR_REG (2))
3173 fp = HARD_FRAME_POINTER_REGNUM;
3175 /* We must emit an alloc to force the input registers to become output
3176 registers. Otherwise, if the callee tries to pass its parameters
3177 through to another call without an intervening alloc, then these
3179 /* ??? We don't need to preserve all input registers. We only need to
3180 preserve those input registers used as arguments to the sibling call.
3181 It is unclear how to compute that number here. */
3182 if (current_frame_info.n_input_regs != 0)
3183 emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
3184 const0_rtx, const0_rtx,
3185 GEN_INT (current_frame_info.n_input_regs),
3190 /* Return 1 if br.ret can do all the work required to return from a
3194 ia64_direct_return (void)
3196 if (reload_completed && ! frame_pointer_needed)
3198 ia64_compute_frame_size (get_frame_size ());
3200 return (current_frame_info.total_size == 0
3201 && current_frame_info.n_spilled == 0
3202 && current_frame_info.reg_save_b0 == 0
3203 && current_frame_info.reg_save_pr == 0
3204 && current_frame_info.reg_save_ar_pfs == 0
3205 && current_frame_info.reg_save_ar_unat == 0
3206 && current_frame_info.reg_save_ar_lc == 0);
3211 /* Return the magic cookie that we use to hold the return address
3212 during early compilation. */
3215 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3219 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3222 /* Split this value after reload, now that we know where the return
3223 address is saved. */
3226 ia64_split_return_addr_rtx (rtx dest)
3230 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3232 if (current_frame_info.reg_save_b0 != 0)
3233 src = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3239 /* Compute offset from CFA for BR0. */
3240 /* ??? Must be kept in sync with ia64_expand_prologue. */
3241 off = (current_frame_info.spill_cfa_off
3242 + current_frame_info.spill_size);
3243 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3244 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3247 /* Convert CFA offset to a register based offset. */
3248 if (frame_pointer_needed)
3249 src = hard_frame_pointer_rtx;
3252 src = stack_pointer_rtx;
3253 off += current_frame_info.total_size;
3256 /* Load address into scratch register. */
3257 if (CONST_OK_FOR_I (off))
3258 emit_insn (gen_adddi3 (dest, src, GEN_INT (off)));
3261 emit_move_insn (dest, GEN_INT (off));
3262 emit_insn (gen_adddi3 (dest, src, dest));
3265 src = gen_rtx_MEM (Pmode, dest);
3269 src = gen_rtx_REG (DImode, BR_REG (0));
3271 emit_move_insn (dest, src);
3275 ia64_hard_regno_rename_ok (int from, int to)
3277 /* Don't clobber any of the registers we reserved for the prologue. */
3278 if (to == current_frame_info.reg_fp
3279 || to == current_frame_info.reg_save_b0
3280 || to == current_frame_info.reg_save_pr
3281 || to == current_frame_info.reg_save_ar_pfs
3282 || to == current_frame_info.reg_save_ar_unat
3283 || to == current_frame_info.reg_save_ar_lc)
3286 if (from == current_frame_info.reg_fp
3287 || from == current_frame_info.reg_save_b0
3288 || from == current_frame_info.reg_save_pr
3289 || from == current_frame_info.reg_save_ar_pfs
3290 || from == current_frame_info.reg_save_ar_unat
3291 || from == current_frame_info.reg_save_ar_lc)
3294 /* Don't use output registers outside the register frame. */
3295 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3298 /* Retain even/oddness on predicate register pairs. */
3299 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3300 return (from & 1) == (to & 1);
3305 /* Target hook for assembling integer objects. Handle word-sized
3306 aligned objects and detect the cases when @fptr is needed. */
3309 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3311 if (size == POINTER_SIZE / BITS_PER_UNIT
3313 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3314 && GET_CODE (x) == SYMBOL_REF
3315 && SYMBOL_REF_FUNCTION_P (x))
3317 if (POINTER_SIZE == 32)
3318 fputs ("\tdata4\t@fptr(", asm_out_file);
3320 fputs ("\tdata8\t@fptr(", asm_out_file);
3321 output_addr_const (asm_out_file, x);
3322 fputs (")\n", asm_out_file);
3325 return default_assemble_integer (x, size, aligned_p);
3328 /* Emit the function prologue. */
3331 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3333 int mask, grsave, grsave_prev;
3335 if (current_frame_info.need_regstk)
3336 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
3337 current_frame_info.n_input_regs,
3338 current_frame_info.n_local_regs,
3339 current_frame_info.n_output_regs,
3340 current_frame_info.n_rotate_regs);
3342 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3345 /* Emit the .prologue directive. */
3348 grsave = grsave_prev = 0;
3349 if (current_frame_info.reg_save_b0 != 0)
3352 grsave = grsave_prev = current_frame_info.reg_save_b0;
3354 if (current_frame_info.reg_save_ar_pfs != 0
3355 && (grsave_prev == 0
3356 || current_frame_info.reg_save_ar_pfs == grsave_prev + 1))
3359 if (grsave_prev == 0)
3360 grsave = current_frame_info.reg_save_ar_pfs;
3361 grsave_prev = current_frame_info.reg_save_ar_pfs;
3363 if (current_frame_info.reg_fp != 0
3364 && (grsave_prev == 0
3365 || current_frame_info.reg_fp == grsave_prev + 1))
3368 if (grsave_prev == 0)
3369 grsave = HARD_FRAME_POINTER_REGNUM;
3370 grsave_prev = current_frame_info.reg_fp;
3372 if (current_frame_info.reg_save_pr != 0
3373 && (grsave_prev == 0
3374 || current_frame_info.reg_save_pr == grsave_prev + 1))
3377 if (grsave_prev == 0)
3378 grsave = current_frame_info.reg_save_pr;
3381 if (mask && TARGET_GNU_AS)
3382 fprintf (file, "\t.prologue %d, %d\n", mask,
3383 ia64_dbx_register_number (grsave));
3385 fputs ("\t.prologue\n", file);
3387 /* Emit a .spill directive, if necessary, to relocate the base of
3388 the register spill area. */
3389 if (current_frame_info.spill_cfa_off != -16)
3390 fprintf (file, "\t.spill %ld\n",
3391 (long) (current_frame_info.spill_cfa_off
3392 + current_frame_info.spill_size));
3395 /* Emit the .body directive at the scheduled end of the prologue. */
3398 ia64_output_function_end_prologue (FILE *file)
3400 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3403 fputs ("\t.body\n", file);
3406 /* Emit the function epilogue. */
3409 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
3410 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3414 if (current_frame_info.reg_fp)
3416 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3417 reg_names[HARD_FRAME_POINTER_REGNUM]
3418 = reg_names[current_frame_info.reg_fp];
3419 reg_names[current_frame_info.reg_fp] = tmp;
3421 if (! TARGET_REG_NAMES)
3423 for (i = 0; i < current_frame_info.n_input_regs; i++)
3424 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
3425 for (i = 0; i < current_frame_info.n_local_regs; i++)
3426 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
3427 for (i = 0; i < current_frame_info.n_output_regs; i++)
3428 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
3431 current_frame_info.initialized = 0;
3435 ia64_dbx_register_number (int regno)
3437 /* In ia64_expand_prologue we quite literally renamed the frame pointer
3438 from its home at loc79 to something inside the register frame. We
3439 must perform the same renumbering here for the debug info. */
3440 if (current_frame_info.reg_fp)
3442 if (regno == HARD_FRAME_POINTER_REGNUM)
3443 regno = current_frame_info.reg_fp;
3444 else if (regno == current_frame_info.reg_fp)
3445 regno = HARD_FRAME_POINTER_REGNUM;
3448 if (IN_REGNO_P (regno))
3449 return 32 + regno - IN_REG (0);
3450 else if (LOC_REGNO_P (regno))
3451 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
3452 else if (OUT_REGNO_P (regno))
3453 return (32 + current_frame_info.n_input_regs
3454 + current_frame_info.n_local_regs + regno - OUT_REG (0));
3460 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
3462 rtx addr_reg, eight = GEN_INT (8);
3464 /* The Intel assembler requires that the global __ia64_trampoline symbol
3465 be declared explicitly */
3468 static bool declared_ia64_trampoline = false;
3470 if (!declared_ia64_trampoline)
3472 declared_ia64_trampoline = true;
3473 (*targetm.asm_out.globalize_label) (asm_out_file,
3474 "__ia64_trampoline");
3478 /* Load up our iterator. */
3479 addr_reg = gen_reg_rtx (Pmode);
3480 emit_move_insn (addr_reg, addr);
3482 /* The first two words are the fake descriptor:
3483 __ia64_trampoline, ADDR+16. */
3484 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3485 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
3486 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3488 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3489 copy_to_reg (plus_constant (addr, 16)));
3490 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3492 /* The third word is the target descriptor. */
3493 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
3494 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3496 /* The fourth word is the static chain. */
3497 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
3500 /* Do any needed setup for a variadic function. CUM has not been updated
3501 for the last named argument which has type TYPE and mode MODE.
3503 We generate the actual spill instructions during prologue generation. */
3506 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3507 tree type, int * pretend_size,
3508 int second_time ATTRIBUTE_UNUSED)
3510 CUMULATIVE_ARGS next_cum = *cum;
3512 /* Skip the current argument. */
3513 ia64_function_arg_advance (&next_cum, mode, type, 1);
3515 if (next_cum.words < MAX_ARGUMENT_SLOTS)
3517 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
3518 *pretend_size = n * UNITS_PER_WORD;
3519 cfun->machine->n_varargs = n;
3523 /* Check whether TYPE is a homogeneous floating point aggregate. If
3524 it is, return the mode of the floating point type that appears
3525 in all leafs. If it is not, return VOIDmode.
3527 An aggregate is a homogeneous floating point aggregate is if all
3528 fields/elements in it have the same floating point type (e.g,
3529 SFmode). 128-bit quad-precision floats are excluded. */
3531 static enum machine_mode
3532 hfa_element_mode (tree type, int nested)
3534 enum machine_mode element_mode = VOIDmode;
3535 enum machine_mode mode;
3536 enum tree_code code = TREE_CODE (type);
3537 int know_element_mode = 0;
3542 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
3543 case BOOLEAN_TYPE: case CHAR_TYPE: case POINTER_TYPE:
3544 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
3545 case FILE_TYPE: case SET_TYPE: case LANG_TYPE:
3549 /* Fortran complex types are supposed to be HFAs, so we need to handle
3550 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
3553 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
3554 && TYPE_MODE (type) != TCmode)
3555 return GET_MODE_INNER (TYPE_MODE (type));
3560 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
3561 mode if this is contained within an aggregate. */
3562 if (nested && TYPE_MODE (type) != TFmode)
3563 return TYPE_MODE (type);
3568 return hfa_element_mode (TREE_TYPE (type), 1);
3572 case QUAL_UNION_TYPE:
3573 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3575 if (TREE_CODE (t) != FIELD_DECL)
3578 mode = hfa_element_mode (TREE_TYPE (t), 1);
3579 if (know_element_mode)
3581 if (mode != element_mode)
3584 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3588 know_element_mode = 1;
3589 element_mode = mode;
3592 return element_mode;
3595 /* If we reach here, we probably have some front-end specific type
3596 that the backend doesn't know about. This can happen via the
3597 aggregate_value_p call in init_function_start. All we can do is
3598 ignore unknown tree types. */
3605 /* Return the number of words required to hold a quantity of TYPE and MODE
3606 when passed as an argument. */
3608 ia64_function_arg_words (tree type, enum machine_mode mode)
3612 if (mode == BLKmode)
3613 words = int_size_in_bytes (type);
3615 words = GET_MODE_SIZE (mode);
3617 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
3620 /* Return the number of registers that should be skipped so the current
3621 argument (described by TYPE and WORDS) will be properly aligned.
3623 Integer and float arguments larger than 8 bytes start at the next
3624 even boundary. Aggregates larger than 8 bytes start at the next
3625 even boundary if the aggregate has 16 byte alignment. Note that
3626 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
3627 but are still to be aligned in registers.
3629 ??? The ABI does not specify how to handle aggregates with
3630 alignment from 9 to 15 bytes, or greater than 16. We handle them
3631 all as if they had 16 byte alignment. Such aggregates can occur
3632 only if gcc extensions are used. */
3634 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
3636 if ((cum->words & 1) == 0)
3640 && TREE_CODE (type) != INTEGER_TYPE
3641 && TREE_CODE (type) != REAL_TYPE)
3642 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
3647 /* Return rtx for register where argument is passed, or zero if it is passed
3649 /* ??? 128-bit quad-precision floats are always passed in general
3653 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
3654 int named, int incoming)
3656 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
3657 int words = ia64_function_arg_words (type, mode);
3658 int offset = ia64_function_arg_offset (cum, type, words);
3659 enum machine_mode hfa_mode = VOIDmode;
3661 /* If all argument slots are used, then it must go on the stack. */
3662 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3665 /* Check for and handle homogeneous FP aggregates. */
3667 hfa_mode = hfa_element_mode (type, 0);
3669 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3670 and unprototyped hfas are passed specially. */
3671 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3675 int fp_regs = cum->fp_regs;
3676 int int_regs = cum->words + offset;
3677 int hfa_size = GET_MODE_SIZE (hfa_mode);
3681 /* If prototyped, pass it in FR regs then GR regs.
3682 If not prototyped, pass it in both FR and GR regs.
3684 If this is an SFmode aggregate, then it is possible to run out of
3685 FR regs while GR regs are still left. In that case, we pass the
3686 remaining part in the GR regs. */
3688 /* Fill the FP regs. We do this always. We stop if we reach the end
3689 of the argument, the last FP register, or the last argument slot. */
3691 byte_size = ((mode == BLKmode)
3692 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3693 args_byte_size = int_regs * UNITS_PER_WORD;
3695 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3696 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
3698 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3699 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
3703 args_byte_size += hfa_size;
3707 /* If no prototype, then the whole thing must go in GR regs. */
3708 if (! cum->prototype)
3710 /* If this is an SFmode aggregate, then we might have some left over
3711 that needs to go in GR regs. */
3712 else if (byte_size != offset)
3713 int_regs += offset / UNITS_PER_WORD;
3715 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
3717 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
3719 enum machine_mode gr_mode = DImode;
3720 unsigned int gr_size;
3722 /* If we have an odd 4 byte hunk because we ran out of FR regs,
3723 then this goes in a GR reg left adjusted/little endian, right
3724 adjusted/big endian. */
3725 /* ??? Currently this is handled wrong, because 4-byte hunks are
3726 always right adjusted/little endian. */
3729 /* If we have an even 4 byte hunk because the aggregate is a
3730 multiple of 4 bytes in size, then this goes in a GR reg right
3731 adjusted/little endian. */
3732 else if (byte_size - offset == 4)
3735 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3736 gen_rtx_REG (gr_mode, (basereg
3740 gr_size = GET_MODE_SIZE (gr_mode);
3742 if (gr_size == UNITS_PER_WORD
3743 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
3745 else if (gr_size > UNITS_PER_WORD)
3746 int_regs += gr_size / UNITS_PER_WORD;
3749 /* If we ended up using just one location, just return that one loc, but
3750 change the mode back to the argument mode. */
3752 return gen_rtx_REG (mode, REGNO (XEXP (loc[0], 0)));
3754 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3757 /* Integral and aggregates go in general registers. If we have run out of
3758 FR registers, then FP values must also go in general registers. This can
3759 happen when we have a SFmode HFA. */
3760 else if (mode == TFmode || mode == TCmode
3761 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
3763 int byte_size = ((mode == BLKmode)
3764 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3765 if (BYTES_BIG_ENDIAN
3766 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
3767 && byte_size < UNITS_PER_WORD
3770 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3771 gen_rtx_REG (DImode,
3772 (basereg + cum->words
3775 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
3778 return gen_rtx_REG (mode, basereg + cum->words + offset);
3782 /* If there is a prototype, then FP values go in a FR register when
3783 named, and in a GR register when unnamed. */
3784 else if (cum->prototype)
3787 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
3788 /* In big-endian mode, an anonymous SFmode value must be represented
3789 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
3790 the value into the high half of the general register. */
3791 else if (BYTES_BIG_ENDIAN && mode == SFmode)
3792 return gen_rtx_PARALLEL (mode,
3794 gen_rtx_EXPR_LIST (VOIDmode,
3795 gen_rtx_REG (DImode, basereg + cum->words + offset),
3798 return gen_rtx_REG (mode, basereg + cum->words + offset);
3800 /* If there is no prototype, then FP values go in both FR and GR
3804 /* See comment above. */
3805 enum machine_mode inner_mode =
3806 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
3808 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
3809 gen_rtx_REG (mode, (FR_ARG_FIRST
3812 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3813 gen_rtx_REG (inner_mode,
3814 (basereg + cum->words
3818 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
3822 /* Return number of words, at the beginning of the argument, that must be
3823 put in registers. 0 is the argument is entirely in registers or entirely
3827 ia64_function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3828 tree type, int named ATTRIBUTE_UNUSED)
3830 int words = ia64_function_arg_words (type, mode);
3831 int offset = ia64_function_arg_offset (cum, type, words);
3833 /* If all argument slots are used, then it must go on the stack. */
3834 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3837 /* It doesn't matter whether the argument goes in FR or GR regs. If
3838 it fits within the 8 argument slots, then it goes entirely in
3839 registers. If it extends past the last argument slot, then the rest
3840 goes on the stack. */
3842 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
3845 return MAX_ARGUMENT_SLOTS - cum->words - offset;
3848 /* Update CUM to point after this argument. This is patterned after
3849 ia64_function_arg. */
3852 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3853 tree type, int named)
3855 int words = ia64_function_arg_words (type, mode);
3856 int offset = ia64_function_arg_offset (cum, type, words);
3857 enum machine_mode hfa_mode = VOIDmode;
3859 /* If all arg slots are already full, then there is nothing to do. */
3860 if (cum->words >= MAX_ARGUMENT_SLOTS)
3863 cum->words += words + offset;
3865 /* Check for and handle homogeneous FP aggregates. */
3867 hfa_mode = hfa_element_mode (type, 0);
3869 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3870 and unprototyped hfas are passed specially. */
3871 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3873 int fp_regs = cum->fp_regs;
3874 /* This is the original value of cum->words + offset. */
3875 int int_regs = cum->words - words;
3876 int hfa_size = GET_MODE_SIZE (hfa_mode);
3880 /* If prototyped, pass it in FR regs then GR regs.
3881 If not prototyped, pass it in both FR and GR regs.
3883 If this is an SFmode aggregate, then it is possible to run out of
3884 FR regs while GR regs are still left. In that case, we pass the
3885 remaining part in the GR regs. */
3887 /* Fill the FP regs. We do this always. We stop if we reach the end
3888 of the argument, the last FP register, or the last argument slot. */
3890 byte_size = ((mode == BLKmode)
3891 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3892 args_byte_size = int_regs * UNITS_PER_WORD;
3894 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3895 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
3898 args_byte_size += hfa_size;
3902 cum->fp_regs = fp_regs;
3905 /* Integral and aggregates go in general registers. If we have run out of
3906 FR registers, then FP values must also go in general registers. This can
3907 happen when we have a SFmode HFA. */
3908 else if (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS)
3909 cum->int_regs = cum->words;
3911 /* If there is a prototype, then FP values go in a FR register when
3912 named, and in a GR register when unnamed. */
3913 else if (cum->prototype)
3916 cum->int_regs = cum->words;
3918 /* ??? Complex types should not reach here. */
3919 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3921 /* If there is no prototype, then FP values go in both FR and GR
3925 /* ??? Complex types should not reach here. */
3926 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3927 cum->int_regs = cum->words;
3931 /* Variable sized types are passed by reference. */
3932 /* ??? At present this is a GCC extension to the IA-64 ABI. */
3935 ia64_function_arg_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3936 enum machine_mode mode ATTRIBUTE_UNUSED,
3937 tree type, int named ATTRIBUTE_UNUSED)
3939 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
3942 /* True if it is OK to do sibling call optimization for the specified
3943 call expression EXP. DECL will be the called function, or NULL if
3944 this is an indirect call. */
3946 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3948 /* We must always return with our current GP. This means we can
3949 only sibcall to functions defined in the current module. */
3950 return decl && (*targetm.binds_local_p) (decl);
3954 /* Implement va_arg. */
3957 ia64_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
3959 /* Variable sized types are passed by reference. */
3960 if (TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
3962 tree ptrtype = build_pointer_type (type);
3963 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
3964 return build_fold_indirect_ref (addr);
3967 /* Aggregate arguments with alignment larger than 8 bytes start at
3968 the next even boundary. Integer and floating point arguments
3969 do so if they are larger than 8 bytes, whether or not they are
3970 also aligned larger than 8 bytes. */
3971 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
3972 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
3974 tree t = build (PLUS_EXPR, TREE_TYPE (valist), valist,
3975 build_int_2 (2 * UNITS_PER_WORD - 1, 0));
3976 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
3977 build_int_2 (-2 * UNITS_PER_WORD, -1));
3978 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
3979 gimplify_and_add (t, pre_p);
3982 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3985 /* Return 1 if function return value returned in memory. Return 0 if it is
3989 ia64_return_in_memory (tree valtype, tree fntype ATTRIBUTE_UNUSED)
3991 enum machine_mode mode;
3992 enum machine_mode hfa_mode;
3993 HOST_WIDE_INT byte_size;
3995 mode = TYPE_MODE (valtype);
3996 byte_size = GET_MODE_SIZE (mode);
3997 if (mode == BLKmode)
3999 byte_size = int_size_in_bytes (valtype);
4004 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4006 hfa_mode = hfa_element_mode (valtype, 0);
4007 if (hfa_mode != VOIDmode)
4009 int hfa_size = GET_MODE_SIZE (hfa_mode);
4011 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
4016 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
4022 /* Return rtx for register that holds the function return value. */
4025 ia64_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
4027 enum machine_mode mode;
4028 enum machine_mode hfa_mode;
4030 mode = TYPE_MODE (valtype);
4031 hfa_mode = hfa_element_mode (valtype, 0);
4033 if (hfa_mode != VOIDmode)
4041 hfa_size = GET_MODE_SIZE (hfa_mode);
4042 byte_size = ((mode == BLKmode)
4043 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
4045 for (i = 0; offset < byte_size; i++)
4047 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4048 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
4054 return XEXP (loc[0], 0);
4056 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4058 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
4059 return gen_rtx_REG (mode, FR_ARG_FIRST);
4062 if (BYTES_BIG_ENDIAN
4063 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
4071 bytesize = int_size_in_bytes (valtype);
4072 for (i = 0; offset < bytesize; i++)
4074 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4075 gen_rtx_REG (DImode,
4078 offset += UNITS_PER_WORD;
4080 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4083 return gen_rtx_REG (mode, GR_RET_FIRST);
4087 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
4088 We need to emit DTP-relative relocations. */
4091 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
4095 fputs ("\tdata8.ua\t@dtprel(", file);
4096 output_addr_const (file, x);
4100 /* Print a memory address as an operand to reference that memory location. */
4102 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
4103 also call this from ia64_print_operand for memory addresses. */
4106 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
4107 rtx address ATTRIBUTE_UNUSED)
4111 /* Print an operand to an assembler instruction.
4112 C Swap and print a comparison operator.
4113 D Print an FP comparison operator.
4114 E Print 32 - constant, for SImode shifts as extract.
4115 e Print 64 - constant, for DImode rotates.
4116 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4117 a floating point register emitted normally.
4118 I Invert a predicate register by adding 1.
4119 J Select the proper predicate register for a condition.
4120 j Select the inverse predicate register for a condition.
4121 O Append .acq for volatile load.
4122 P Postincrement of a MEM.
4123 Q Append .rel for volatile store.
4124 S Shift amount for shladd instruction.
4125 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
4126 for Intel assembler.
4127 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
4128 for Intel assembler.
4129 r Print register name, or constant 0 as r0. HP compatibility for
4132 ia64_print_operand (FILE * file, rtx x, int code)
4139 /* Handled below. */
4144 enum rtx_code c = swap_condition (GET_CODE (x));
4145 fputs (GET_RTX_NAME (c), file);
4150 switch (GET_CODE (x))
4162 str = GET_RTX_NAME (GET_CODE (x));
4169 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
4173 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
4177 if (x == CONST0_RTX (GET_MODE (x)))
4178 str = reg_names [FR_REG (0)];
4179 else if (x == CONST1_RTX (GET_MODE (x)))
4180 str = reg_names [FR_REG (1)];
4181 else if (GET_CODE (x) == REG)
4182 str = reg_names [REGNO (x)];
4189 fputs (reg_names [REGNO (x) + 1], file);
4195 unsigned int regno = REGNO (XEXP (x, 0));
4196 if (GET_CODE (x) == EQ)
4200 fputs (reg_names [regno], file);
4205 if (MEM_VOLATILE_P (x))
4206 fputs(".acq", file);
4211 HOST_WIDE_INT value;
4213 switch (GET_CODE (XEXP (x, 0)))
4219 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
4220 if (GET_CODE (x) == CONST_INT)
4222 else if (GET_CODE (x) == REG)
4224 fprintf (file, ", %s", reg_names[REGNO (x)]);
4232 value = GET_MODE_SIZE (GET_MODE (x));
4236 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
4240 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
4245 if (MEM_VOLATILE_P (x))
4246 fputs(".rel", file);
4250 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4254 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4256 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
4262 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4264 const char *prefix = "0x";
4265 if (INTVAL (x) & 0x80000000)
4267 fprintf (file, "0xffffffff");
4270 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
4276 /* If this operand is the constant zero, write it as register zero.
4277 Any register, zero, or CONST_INT value is OK here. */
4278 if (GET_CODE (x) == REG)
4279 fputs (reg_names[REGNO (x)], file);
4280 else if (x == CONST0_RTX (GET_MODE (x)))
4282 else if (GET_CODE (x) == CONST_INT)
4283 output_addr_const (file, x);
4285 output_operand_lossage ("invalid %%r value");
4292 /* For conditional branches, returns or calls, substitute
4293 sptk, dptk, dpnt, or spnt for %s. */
4294 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
4297 int pred_val = INTVAL (XEXP (x, 0));
4299 /* Guess top and bottom 10% statically predicted. */
4300 if (pred_val < REG_BR_PROB_BASE / 50)
4302 else if (pred_val < REG_BR_PROB_BASE / 2)
4304 else if (pred_val < REG_BR_PROB_BASE / 100 * 98)
4309 else if (GET_CODE (current_output_insn) == CALL_INSN)
4314 fputs (which, file);
4319 x = current_insn_predicate;
4322 unsigned int regno = REGNO (XEXP (x, 0));
4323 if (GET_CODE (x) == EQ)
4325 fprintf (file, "(%s) ", reg_names [regno]);
4330 output_operand_lossage ("ia64_print_operand: unknown code");
4334 switch (GET_CODE (x))
4336 /* This happens for the spill/restore instructions. */
4341 /* ... fall through ... */
4344 fputs (reg_names [REGNO (x)], file);
4349 rtx addr = XEXP (x, 0);
4350 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
4351 addr = XEXP (addr, 0);
4352 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
4357 output_addr_const (file, x);
4364 /* Compute a (partial) cost for rtx X. Return true if the complete
4365 cost has been computed, and false if subexpressions should be
4366 scanned. In either case, *TOTAL contains the cost result. */
4367 /* ??? This is incomplete. */
4370 ia64_rtx_costs (rtx x, int code, int outer_code, int *total)
4378 *total = CONST_OK_FOR_J (INTVAL (x)) ? 0 : COSTS_N_INSNS (1);
4381 if (CONST_OK_FOR_I (INTVAL (x)))
4383 else if (CONST_OK_FOR_J (INTVAL (x)))
4386 *total = COSTS_N_INSNS (1);
4389 if (CONST_OK_FOR_K (INTVAL (x)) || CONST_OK_FOR_L (INTVAL (x)))
4392 *total = COSTS_N_INSNS (1);
4397 *total = COSTS_N_INSNS (1);
4403 *total = COSTS_N_INSNS (3);
4407 /* For multiplies wider than HImode, we have to go to the FPU,
4408 which normally involves copies. Plus there's the latency
4409 of the multiply itself, and the latency of the instructions to
4410 transfer integer regs to FP regs. */
4411 /* ??? Check for FP mode. */
4412 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
4413 *total = COSTS_N_INSNS (10);
4415 *total = COSTS_N_INSNS (2);
4423 *total = COSTS_N_INSNS (1);
4430 /* We make divide expensive, so that divide-by-constant will be
4431 optimized to a multiply. */
4432 *total = COSTS_N_INSNS (60);
4440 /* Calculate the cost of moving data from a register in class FROM to
4441 one in class TO, using MODE. */
4444 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
4447 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
4448 if (to == ADDL_REGS)
4450 if (from == ADDL_REGS)
4453 /* All costs are symmetric, so reduce cases by putting the
4454 lower number class as the destination. */
4457 enum reg_class tmp = to;
4458 to = from, from = tmp;
4461 /* Moving from FR<->GR in XFmode must be more expensive than 2,
4462 so that we get secondary memory reloads. Between FR_REGS,
4463 we have to make this at least as expensive as MEMORY_MOVE_COST
4464 to avoid spectacularly poor register class preferencing. */
4467 if (to != GR_REGS || from != GR_REGS)
4468 return MEMORY_MOVE_COST (mode, to, 0);
4476 /* Moving between PR registers takes two insns. */
4477 if (from == PR_REGS)
4479 /* Moving between PR and anything but GR is impossible. */
4480 if (from != GR_REGS)
4481 return MEMORY_MOVE_COST (mode, to, 0);
4485 /* Moving between BR and anything but GR is impossible. */
4486 if (from != GR_REGS && from != GR_AND_BR_REGS)
4487 return MEMORY_MOVE_COST (mode, to, 0);
4492 /* Moving between AR and anything but GR is impossible. */
4493 if (from != GR_REGS)
4494 return MEMORY_MOVE_COST (mode, to, 0);
4499 case GR_AND_FR_REGS:
4500 case GR_AND_BR_REGS:
4511 /* This function returns the register class required for a secondary
4512 register when copying between one of the registers in CLASS, and X,
4513 using MODE. A return value of NO_REGS means that no secondary register
4517 ia64_secondary_reload_class (enum reg_class class,
4518 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
4522 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
4523 regno = true_regnum (x);
4530 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
4531 interaction. We end up with two pseudos with overlapping lifetimes
4532 both of which are equiv to the same constant, and both which need
4533 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
4534 changes depending on the path length, which means the qty_first_reg
4535 check in make_regs_eqv can give different answers at different times.
4536 At some point I'll probably need a reload_indi pattern to handle
4539 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
4540 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
4541 non-general registers for good measure. */
4542 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
4545 /* This is needed if a pseudo used as a call_operand gets spilled to a
4547 if (GET_CODE (x) == MEM)
4552 /* Need to go through general registers to get to other class regs. */
4553 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
4556 /* This can happen when a paradoxical subreg is an operand to the
4558 /* ??? This shouldn't be necessary after instruction scheduling is
4559 enabled, because paradoxical subregs are not accepted by
4560 register_operand when INSN_SCHEDULING is defined. Or alternatively,
4561 stop the paradoxical subreg stupidity in the *_operand functions
4563 if (GET_CODE (x) == MEM
4564 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
4565 || GET_MODE (x) == QImode))
4568 /* This can happen because of the ior/and/etc patterns that accept FP
4569 registers as operands. If the third operand is a constant, then it
4570 needs to be reloaded into a FP register. */
4571 if (GET_CODE (x) == CONST_INT)
4574 /* This can happen because of register elimination in a muldi3 insn.
4575 E.g. `26107 * (unsigned long)&u'. */
4576 if (GET_CODE (x) == PLUS)
4581 /* ??? This happens if we cse/gcse a BImode value across a call,
4582 and the function has a nonlocal goto. This is because global
4583 does not allocate call crossing pseudos to hard registers when
4584 current_function_has_nonlocal_goto is true. This is relatively
4585 common for C++ programs that use exceptions. To reproduce,
4586 return NO_REGS and compile libstdc++. */
4587 if (GET_CODE (x) == MEM)
4590 /* This can happen when we take a BImode subreg of a DImode value,
4591 and that DImode value winds up in some non-GR register. */
4592 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
4604 /* Emit text to declare externally defined variables and functions, because
4605 the Intel assembler does not support undefined externals. */
4608 ia64_asm_output_external (FILE *file, tree decl, const char *name)
4610 int save_referenced;
4612 /* GNU as does not need anything here, but the HP linker does need
4613 something for external functions. */
4617 || TREE_CODE (decl) != FUNCTION_DECL
4618 || strstr (name, "__builtin_") == name))
4621 /* ??? The Intel assembler creates a reference that needs to be satisfied by
4622 the linker when we do this, so we need to be careful not to do this for
4623 builtin functions which have no library equivalent. Unfortunately, we
4624 can't tell here whether or not a function will actually be called by
4625 expand_expr, so we pull in library functions even if we may not need
4627 if (! strcmp (name, "__builtin_next_arg")
4628 || ! strcmp (name, "alloca")
4629 || ! strcmp (name, "__builtin_constant_p")
4630 || ! strcmp (name, "__builtin_args_info"))
4634 ia64_hpux_add_extern_decl (decl);
4637 /* assemble_name will set TREE_SYMBOL_REFERENCED, so we must save and
4639 save_referenced = TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl));
4640 if (TREE_CODE (decl) == FUNCTION_DECL)
4641 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
4642 (*targetm.asm_out.globalize_label) (file, name);
4643 TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)) = save_referenced;
4647 /* Parse the -mfixed-range= option string. */
4650 fix_range (const char *const_str)
4653 char *str, *dash, *comma;
4655 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
4656 REG2 are either register names or register numbers. The effect
4657 of this option is to mark the registers in the range from REG1 to
4658 REG2 as ``fixed'' so they won't be used by the compiler. This is
4659 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
4661 i = strlen (const_str);
4662 str = (char *) alloca (i + 1);
4663 memcpy (str, const_str, i + 1);
4667 dash = strchr (str, '-');
4670 warning ("value of -mfixed-range must have form REG1-REG2");
4675 comma = strchr (dash + 1, ',');
4679 first = decode_reg_name (str);
4682 warning ("unknown register name: %s", str);
4686 last = decode_reg_name (dash + 1);
4689 warning ("unknown register name: %s", dash + 1);
4697 warning ("%s-%s is an empty range", str, dash + 1);
4701 for (i = first; i <= last; ++i)
4702 fixed_regs[i] = call_used_regs[i] = 1;
4712 static struct machine_function *
4713 ia64_init_machine_status (void)
4715 return ggc_alloc_cleared (sizeof (struct machine_function));
4718 /* Handle TARGET_OPTIONS switches. */
4721 ia64_override_options (void)
4725 const char *const name; /* processor name or nickname. */
4726 const enum processor_type processor;
4728 const processor_alias_table[] =
4730 {"itanium", PROCESSOR_ITANIUM},
4731 {"itanium1", PROCESSOR_ITANIUM},
4732 {"merced", PROCESSOR_ITANIUM},
4733 {"itanium2", PROCESSOR_ITANIUM2},
4734 {"mckinley", PROCESSOR_ITANIUM2},
4737 int const pta_size = ARRAY_SIZE (processor_alias_table);
4740 if (TARGET_AUTO_PIC)
4741 target_flags |= MASK_CONST_GP;
4743 if (TARGET_INLINE_FLOAT_DIV_LAT && TARGET_INLINE_FLOAT_DIV_THR)
4745 if ((target_flags_explicit & MASK_INLINE_FLOAT_DIV_LAT)
4746 && (target_flags_explicit & MASK_INLINE_FLOAT_DIV_THR))
4748 warning ("cannot optimize floating point division for both latency and throughput");
4749 target_flags &= ~MASK_INLINE_FLOAT_DIV_THR;
4753 if (target_flags_explicit & MASK_INLINE_FLOAT_DIV_THR)
4754 target_flags &= ~MASK_INLINE_FLOAT_DIV_LAT;
4756 target_flags &= ~MASK_INLINE_FLOAT_DIV_THR;
4760 if (TARGET_INLINE_INT_DIV_LAT && TARGET_INLINE_INT_DIV_THR)
4762 if ((target_flags_explicit & MASK_INLINE_INT_DIV_LAT)
4763 && (target_flags_explicit & MASK_INLINE_INT_DIV_THR))
4765 warning ("cannot optimize integer division for both latency and throughput");
4766 target_flags &= ~MASK_INLINE_INT_DIV_THR;
4770 if (target_flags_explicit & MASK_INLINE_INT_DIV_THR)
4771 target_flags &= ~MASK_INLINE_INT_DIV_LAT;
4773 target_flags &= ~MASK_INLINE_INT_DIV_THR;
4777 if (TARGET_INLINE_SQRT_LAT && TARGET_INLINE_SQRT_THR)
4779 if ((target_flags_explicit & MASK_INLINE_SQRT_LAT)
4780 && (target_flags_explicit & MASK_INLINE_SQRT_THR))
4782 warning ("cannot optimize square root for both latency and throughput");
4783 target_flags &= ~MASK_INLINE_SQRT_THR;
4787 if (target_flags_explicit & MASK_INLINE_SQRT_THR)
4788 target_flags &= ~MASK_INLINE_SQRT_LAT;
4790 target_flags &= ~MASK_INLINE_SQRT_THR;
4794 if (TARGET_INLINE_SQRT_LAT)
4796 warning ("not yet implemented: latency-optimized inline square root");
4797 target_flags &= ~MASK_INLINE_SQRT_LAT;
4800 if (ia64_fixed_range_string)
4801 fix_range (ia64_fixed_range_string);
4803 if (ia64_tls_size_string)
4806 unsigned long tmp = strtoul (ia64_tls_size_string, &end, 10);
4807 if (*end || (tmp != 14 && tmp != 22 && tmp != 64))
4808 error ("bad value (%s) for -mtls-size= switch", ia64_tls_size_string);
4810 ia64_tls_size = tmp;
4813 if (!ia64_tune_string)
4814 ia64_tune_string = "itanium2";
4816 for (i = 0; i < pta_size; i++)
4817 if (! strcmp (ia64_tune_string, processor_alias_table[i].name))
4819 ia64_tune = processor_alias_table[i].processor;
4824 error ("bad value (%s) for -tune= switch", ia64_tune_string);
4826 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
4827 flag_schedule_insns_after_reload = 0;
4829 /* Variable tracking should be run after all optimizations which change order
4830 of insns. It also needs a valid CFG. */
4831 ia64_flag_var_tracking = flag_var_tracking;
4832 flag_var_tracking = 0;
4834 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
4836 init_machine_status = ia64_init_machine_status;
4839 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
4840 static enum attr_type ia64_safe_type (rtx);
4842 static enum attr_itanium_class
4843 ia64_safe_itanium_class (rtx insn)
4845 if (recog_memoized (insn) >= 0)
4846 return get_attr_itanium_class (insn);
4848 return ITANIUM_CLASS_UNKNOWN;
4851 static enum attr_type
4852 ia64_safe_type (rtx insn)
4854 if (recog_memoized (insn) >= 0)
4855 return get_attr_type (insn);
4857 return TYPE_UNKNOWN;
4860 /* The following collection of routines emit instruction group stop bits as
4861 necessary to avoid dependencies. */
4863 /* Need to track some additional registers as far as serialization is
4864 concerned so we can properly handle br.call and br.ret. We could
4865 make these registers visible to gcc, but since these registers are
4866 never explicitly used in gcc generated code, it seems wasteful to
4867 do so (plus it would make the call and return patterns needlessly
4869 #define REG_RP (BR_REG (0))
4870 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
4871 /* This is used for volatile asms which may require a stop bit immediately
4872 before and after them. */
4873 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
4874 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
4875 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
4877 /* For each register, we keep track of how it has been written in the
4878 current instruction group.
4880 If a register is written unconditionally (no qualifying predicate),
4881 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
4883 If a register is written if its qualifying predicate P is true, we
4884 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
4885 may be written again by the complement of P (P^1) and when this happens,
4886 WRITE_COUNT gets set to 2.
4888 The result of this is that whenever an insn attempts to write a register
4889 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
4891 If a predicate register is written by a floating-point insn, we set
4892 WRITTEN_BY_FP to true.
4894 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
4895 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
4897 struct reg_write_state
4899 unsigned int write_count : 2;
4900 unsigned int first_pred : 16;
4901 unsigned int written_by_fp : 1;
4902 unsigned int written_by_and : 1;
4903 unsigned int written_by_or : 1;
4906 /* Cumulative info for the current instruction group. */
4907 struct reg_write_state rws_sum[NUM_REGS];
4908 /* Info for the current instruction. This gets copied to rws_sum after a
4909 stop bit is emitted. */
4910 struct reg_write_state rws_insn[NUM_REGS];
4912 /* Indicates whether this is the first instruction after a stop bit,
4913 in which case we don't need another stop bit. Without this, we hit
4914 the abort in ia64_variable_issue when scheduling an alloc. */
4915 static int first_instruction;
4917 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
4918 RTL for one instruction. */
4921 unsigned int is_write : 1; /* Is register being written? */
4922 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
4923 unsigned int is_branch : 1; /* Is register used as part of a branch? */
4924 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
4925 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
4926 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
4929 static void rws_update (struct reg_write_state *, int, struct reg_flags, int);
4930 static int rws_access_regno (int, struct reg_flags, int);
4931 static int rws_access_reg (rtx, struct reg_flags, int);
4932 static void update_set_flags (rtx, struct reg_flags *, int *, rtx *);
4933 static int set_src_needs_barrier (rtx, struct reg_flags, int, rtx);
4934 static int rtx_needs_barrier (rtx, struct reg_flags, int);
4935 static void init_insn_group_barriers (void);
4936 static int group_barrier_needed_p (rtx);
4937 static int safe_group_barrier_needed_p (rtx);
4939 /* Update *RWS for REGNO, which is being written by the current instruction,
4940 with predicate PRED, and associated register flags in FLAGS. */
4943 rws_update (struct reg_write_state *rws, int regno, struct reg_flags flags, int pred)
4946 rws[regno].write_count++;
4948 rws[regno].write_count = 2;
4949 rws[regno].written_by_fp |= flags.is_fp;
4950 /* ??? Not tracking and/or across differing predicates. */
4951 rws[regno].written_by_and = flags.is_and;
4952 rws[regno].written_by_or = flags.is_or;
4953 rws[regno].first_pred = pred;
4956 /* Handle an access to register REGNO of type FLAGS using predicate register
4957 PRED. Update rws_insn and rws_sum arrays. Return 1 if this access creates
4958 a dependency with an earlier instruction in the same group. */
4961 rws_access_regno (int regno, struct reg_flags flags, int pred)
4963 int need_barrier = 0;
4965 if (regno >= NUM_REGS)
4968 if (! PR_REGNO_P (regno))
4969 flags.is_and = flags.is_or = 0;
4975 /* One insn writes same reg multiple times? */
4976 if (rws_insn[regno].write_count > 0)
4979 /* Update info for current instruction. */
4980 rws_update (rws_insn, regno, flags, pred);
4981 write_count = rws_sum[regno].write_count;
4983 switch (write_count)
4986 /* The register has not been written yet. */
4987 rws_update (rws_sum, regno, flags, pred);
4991 /* The register has been written via a predicate. If this is
4992 not a complementary predicate, then we need a barrier. */
4993 /* ??? This assumes that P and P+1 are always complementary
4994 predicates for P even. */
4995 if (flags.is_and && rws_sum[regno].written_by_and)
4997 else if (flags.is_or && rws_sum[regno].written_by_or)
4999 else if ((rws_sum[regno].first_pred ^ 1) != pred)
5001 rws_update (rws_sum, regno, flags, pred);
5005 /* The register has been unconditionally written already. We
5007 if (flags.is_and && rws_sum[regno].written_by_and)
5009 else if (flags.is_or && rws_sum[regno].written_by_or)
5013 rws_sum[regno].written_by_and = flags.is_and;
5014 rws_sum[regno].written_by_or = flags.is_or;
5023 if (flags.is_branch)
5025 /* Branches have several RAW exceptions that allow to avoid
5028 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
5029 /* RAW dependencies on branch regs are permissible as long
5030 as the writer is a non-branch instruction. Since we
5031 never generate code that uses a branch register written
5032 by a branch instruction, handling this case is
5036 if (REGNO_REG_CLASS (regno) == PR_REGS
5037 && ! rws_sum[regno].written_by_fp)
5038 /* The predicates of a branch are available within the
5039 same insn group as long as the predicate was written by
5040 something other than a floating-point instruction. */
5044 if (flags.is_and && rws_sum[regno].written_by_and)
5046 if (flags.is_or && rws_sum[regno].written_by_or)
5049 switch (rws_sum[regno].write_count)
5052 /* The register has not been written yet. */
5056 /* The register has been written via a predicate. If this is
5057 not a complementary predicate, then we need a barrier. */
5058 /* ??? This assumes that P and P+1 are always complementary
5059 predicates for P even. */
5060 if ((rws_sum[regno].first_pred ^ 1) != pred)
5065 /* The register has been unconditionally written already. We
5075 return need_barrier;
5079 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
5081 int regno = REGNO (reg);
5082 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
5085 return rws_access_regno (regno, flags, pred);
5088 int need_barrier = 0;
5090 need_barrier |= rws_access_regno (regno + n, flags, pred);
5091 return need_barrier;
5095 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
5096 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
5099 update_set_flags (rtx x, struct reg_flags *pflags, int *ppred, rtx *pcond)
5101 rtx src = SET_SRC (x);
5105 switch (GET_CODE (src))
5111 if (SET_DEST (x) == pc_rtx)
5112 /* X is a conditional branch. */
5116 int is_complemented = 0;
5118 /* X is a conditional move. */
5119 rtx cond = XEXP (src, 0);
5120 if (GET_CODE (cond) == EQ)
5121 is_complemented = 1;
5122 cond = XEXP (cond, 0);
5123 if (GET_CODE (cond) != REG
5124 && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS)
5127 if (XEXP (src, 1) == SET_DEST (x)
5128 || XEXP (src, 2) == SET_DEST (x))
5130 /* X is a conditional move that conditionally writes the
5133 /* We need another complement in this case. */
5134 if (XEXP (src, 1) == SET_DEST (x))
5135 is_complemented = ! is_complemented;
5137 *ppred = REGNO (cond);
5138 if (is_complemented)
5142 /* ??? If this is a conditional write to the dest, then this
5143 instruction does not actually read one source. This probably
5144 doesn't matter, because that source is also the dest. */
5145 /* ??? Multiple writes to predicate registers are allowed
5146 if they are all AND type compares, or if they are all OR
5147 type compares. We do not generate such instructions
5150 /* ... fall through ... */
5153 if (COMPARISON_P (src)
5154 && GET_MODE_CLASS (GET_MODE (XEXP (src, 0))) == MODE_FLOAT)
5155 /* Set pflags->is_fp to 1 so that we know we're dealing
5156 with a floating point comparison when processing the
5157 destination of the SET. */
5160 /* Discover if this is a parallel comparison. We only handle
5161 and.orcm and or.andcm at present, since we must retain a
5162 strict inverse on the predicate pair. */
5163 else if (GET_CODE (src) == AND)
5165 else if (GET_CODE (src) == IOR)
5172 /* Subroutine of rtx_needs_barrier; this function determines whether the
5173 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
5174 are as in rtx_needs_barrier. COND is an rtx that holds the condition
5178 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred, rtx cond)
5180 int need_barrier = 0;
5182 rtx src = SET_SRC (x);
5184 if (GET_CODE (src) == CALL)
5185 /* We don't need to worry about the result registers that
5186 get written by subroutine call. */
5187 return rtx_needs_barrier (src, flags, pred);
5188 else if (SET_DEST (x) == pc_rtx)
5190 /* X is a conditional branch. */
5191 /* ??? This seems redundant, as the caller sets this bit for
5193 flags.is_branch = 1;
5194 return rtx_needs_barrier (src, flags, pred);
5197 need_barrier = rtx_needs_barrier (src, flags, pred);
5199 /* This instruction unconditionally uses a predicate register. */
5201 need_barrier |= rws_access_reg (cond, flags, 0);
5204 if (GET_CODE (dst) == ZERO_EXTRACT)
5206 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
5207 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
5208 dst = XEXP (dst, 0);
5210 return need_barrier;
5213 /* Handle an access to rtx X of type FLAGS using predicate register
5214 PRED. Return 1 if this access creates a dependency with an earlier
5215 instruction in the same group. */
5218 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
5221 int is_complemented = 0;
5222 int need_barrier = 0;
5223 const char *format_ptr;
5224 struct reg_flags new_flags;
5232 switch (GET_CODE (x))
5235 update_set_flags (x, &new_flags, &pred, &cond);
5236 need_barrier = set_src_needs_barrier (x, new_flags, pred, cond);
5237 if (GET_CODE (SET_SRC (x)) != CALL)
5239 new_flags.is_write = 1;
5240 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
5245 new_flags.is_write = 0;
5246 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5248 /* Avoid multiple register writes, in case this is a pattern with
5249 multiple CALL rtx. This avoids an abort in rws_access_reg. */
5250 if (! flags.is_sibcall && ! rws_insn[REG_AR_CFM].write_count)
5252 new_flags.is_write = 1;
5253 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
5254 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
5255 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5260 /* X is a predicated instruction. */
5262 cond = COND_EXEC_TEST (x);
5265 need_barrier = rtx_needs_barrier (cond, flags, 0);
5267 if (GET_CODE (cond) == EQ)
5268 is_complemented = 1;
5269 cond = XEXP (cond, 0);
5270 if (GET_CODE (cond) != REG
5271 && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS)
5273 pred = REGNO (cond);
5274 if (is_complemented)
5277 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
5278 return need_barrier;
5282 /* Clobber & use are for earlier compiler-phases only. */
5287 /* We always emit stop bits for traditional asms. We emit stop bits
5288 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
5289 if (GET_CODE (x) != ASM_OPERANDS
5290 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
5292 /* Avoid writing the register multiple times if we have multiple
5293 asm outputs. This avoids an abort in rws_access_reg. */
5294 if (! rws_insn[REG_VOLATILE].write_count)
5296 new_flags.is_write = 1;
5297 rws_access_regno (REG_VOLATILE, new_flags, pred);
5302 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
5303 We can not just fall through here since then we would be confused
5304 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
5305 traditional asms unlike their normal usage. */
5307 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
5308 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
5313 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5315 rtx pat = XVECEXP (x, 0, i);
5316 if (GET_CODE (pat) == SET)
5318 update_set_flags (pat, &new_flags, &pred, &cond);
5319 need_barrier |= set_src_needs_barrier (pat, new_flags, pred, cond);
5321 else if (GET_CODE (pat) == USE
5322 || GET_CODE (pat) == CALL
5323 || GET_CODE (pat) == ASM_OPERANDS)
5324 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5325 else if (GET_CODE (pat) != CLOBBER && GET_CODE (pat) != RETURN)
5328 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5330 rtx pat = XVECEXP (x, 0, i);
5331 if (GET_CODE (pat) == SET)
5333 if (GET_CODE (SET_SRC (pat)) != CALL)
5335 new_flags.is_write = 1;
5336 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
5340 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
5341 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5349 if (REGNO (x) == AR_UNAT_REGNUM)
5351 for (i = 0; i < 64; ++i)
5352 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
5355 need_barrier = rws_access_reg (x, flags, pred);
5359 /* Find the regs used in memory address computation. */
5360 new_flags.is_write = 0;
5361 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5364 case CONST_INT: case CONST_DOUBLE:
5365 case SYMBOL_REF: case LABEL_REF: case CONST:
5368 /* Operators with side-effects. */
5369 case POST_INC: case POST_DEC:
5370 if (GET_CODE (XEXP (x, 0)) != REG)
5373 new_flags.is_write = 0;
5374 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5375 new_flags.is_write = 1;
5376 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5380 if (GET_CODE (XEXP (x, 0)) != REG)
5383 new_flags.is_write = 0;
5384 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5385 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5386 new_flags.is_write = 1;
5387 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5390 /* Handle common unary and binary ops for efficiency. */
5391 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
5392 case MOD: case UDIV: case UMOD: case AND: case IOR:
5393 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
5394 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
5395 case NE: case EQ: case GE: case GT: case LE:
5396 case LT: case GEU: case GTU: case LEU: case LTU:
5397 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5398 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5401 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
5402 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
5403 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
5404 case SQRT: case FFS: case POPCOUNT:
5405 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5409 switch (XINT (x, 1))
5411 case UNSPEC_LTOFF_DTPMOD:
5412 case UNSPEC_LTOFF_DTPREL:
5414 case UNSPEC_LTOFF_TPREL:
5416 case UNSPEC_PRED_REL_MUTEX:
5417 case UNSPEC_PIC_CALL:
5419 case UNSPEC_FETCHADD_ACQ:
5420 case UNSPEC_BSP_VALUE:
5421 case UNSPEC_FLUSHRS:
5422 case UNSPEC_BUNDLE_SELECTOR:
5425 case UNSPEC_GR_SPILL:
5426 case UNSPEC_GR_RESTORE:
5428 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
5429 HOST_WIDE_INT bit = (offset >> 3) & 63;
5431 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5432 new_flags.is_write = (XINT (x, 1) == 1);
5433 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
5438 case UNSPEC_FR_SPILL:
5439 case UNSPEC_FR_RESTORE:
5440 case UNSPEC_GETF_EXP:
5441 case UNSPEC_SETF_EXP:
5443 case UNSPEC_FR_SQRT_RECIP_APPROX:
5444 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5447 case UNSPEC_FR_RECIP_APPROX:
5448 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5449 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5452 case UNSPEC_CMPXCHG_ACQ:
5453 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5454 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
5462 case UNSPEC_VOLATILE:
5463 switch (XINT (x, 1))
5466 /* Alloc must always be the first instruction of a group.
5467 We force this by always returning true. */
5468 /* ??? We might get better scheduling if we explicitly check for
5469 input/local/output register dependencies, and modify the
5470 scheduler so that alloc is always reordered to the start of
5471 the current group. We could then eliminate all of the
5472 first_instruction code. */
5473 rws_access_regno (AR_PFS_REGNUM, flags, pred);
5475 new_flags.is_write = 1;
5476 rws_access_regno (REG_AR_CFM, new_flags, pred);
5479 case UNSPECV_SET_BSP:
5483 case UNSPECV_BLOCKAGE:
5484 case UNSPECV_INSN_GROUP_BARRIER:
5486 case UNSPECV_PSAC_ALL:
5487 case UNSPECV_PSAC_NORMAL:
5496 new_flags.is_write = 0;
5497 need_barrier = rws_access_regno (REG_RP, flags, pred);
5498 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
5500 new_flags.is_write = 1;
5501 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5502 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5506 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
5507 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5508 switch (format_ptr[i])
5510 case '0': /* unused field */
5511 case 'i': /* integer */
5512 case 'n': /* note */
5513 case 'w': /* wide integer */
5514 case 's': /* pointer to string */
5515 case 'S': /* optional pointer to string */
5519 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
5524 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
5525 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
5534 return need_barrier;
5537 /* Clear out the state for group_barrier_needed_p at the start of a
5538 sequence of insns. */
5541 init_insn_group_barriers (void)
5543 memset (rws_sum, 0, sizeof (rws_sum));
5544 first_instruction = 1;
5547 /* Given the current state, recorded by previous calls to this function,
5548 determine whether a group barrier (a stop bit) is necessary before INSN.
5549 Return nonzero if so. */
5552 group_barrier_needed_p (rtx insn)
5555 int need_barrier = 0;
5556 struct reg_flags flags;
5558 memset (&flags, 0, sizeof (flags));
5559 switch (GET_CODE (insn))
5565 /* A barrier doesn't imply an instruction group boundary. */
5569 memset (rws_insn, 0, sizeof (rws_insn));
5573 flags.is_branch = 1;
5574 flags.is_sibcall = SIBLING_CALL_P (insn);
5575 memset (rws_insn, 0, sizeof (rws_insn));
5577 /* Don't bundle a call following another call. */
5578 if ((pat = prev_active_insn (insn))
5579 && GET_CODE (pat) == CALL_INSN)
5585 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
5589 flags.is_branch = 1;
5591 /* Don't bundle a jump following a call. */
5592 if ((pat = prev_active_insn (insn))
5593 && GET_CODE (pat) == CALL_INSN)
5601 if (GET_CODE (PATTERN (insn)) == USE
5602 || GET_CODE (PATTERN (insn)) == CLOBBER)
5603 /* Don't care about USE and CLOBBER "insns"---those are used to
5604 indicate to the optimizer that it shouldn't get rid of
5605 certain operations. */
5608 pat = PATTERN (insn);
5610 /* Ug. Hack hacks hacked elsewhere. */
5611 switch (recog_memoized (insn))
5613 /* We play dependency tricks with the epilogue in order
5614 to get proper schedules. Undo this for dv analysis. */
5615 case CODE_FOR_epilogue_deallocate_stack:
5616 case CODE_FOR_prologue_allocate_stack:
5617 pat = XVECEXP (pat, 0, 0);
5620 /* The pattern we use for br.cloop confuses the code above.
5621 The second element of the vector is representative. */
5622 case CODE_FOR_doloop_end_internal:
5623 pat = XVECEXP (pat, 0, 1);
5626 /* Doesn't generate code. */
5627 case CODE_FOR_pred_rel_mutex:
5628 case CODE_FOR_prologue_use:
5635 memset (rws_insn, 0, sizeof (rws_insn));
5636 need_barrier = rtx_needs_barrier (pat, flags, 0);
5638 /* Check to see if the previous instruction was a volatile
5641 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
5648 if (first_instruction && INSN_P (insn)
5649 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
5650 && GET_CODE (PATTERN (insn)) != USE
5651 && GET_CODE (PATTERN (insn)) != CLOBBER)
5654 first_instruction = 0;
5657 return need_barrier;
5660 /* Like group_barrier_needed_p, but do not clobber the current state. */
5663 safe_group_barrier_needed_p (rtx insn)
5665 struct reg_write_state rws_saved[NUM_REGS];
5666 int saved_first_instruction;
5669 memcpy (rws_saved, rws_sum, NUM_REGS * sizeof *rws_saved);
5670 saved_first_instruction = first_instruction;
5672 t = group_barrier_needed_p (insn);
5674 memcpy (rws_sum, rws_saved, NUM_REGS * sizeof *rws_saved);
5675 first_instruction = saved_first_instruction;
5680 /* Scan the current function and insert stop bits as necessary to
5681 eliminate dependencies. This function assumes that a final
5682 instruction scheduling pass has been run which has already
5683 inserted most of the necessary stop bits. This function only
5684 inserts new ones at basic block boundaries, since these are
5685 invisible to the scheduler. */
5688 emit_insn_group_barriers (FILE *dump)
5692 int insns_since_last_label = 0;
5694 init_insn_group_barriers ();
5696 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5698 if (GET_CODE (insn) == CODE_LABEL)
5700 if (insns_since_last_label)
5702 insns_since_last_label = 0;
5704 else if (GET_CODE (insn) == NOTE
5705 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
5707 if (insns_since_last_label)
5709 insns_since_last_label = 0;
5711 else if (GET_CODE (insn) == INSN
5712 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
5713 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
5715 init_insn_group_barriers ();
5718 else if (INSN_P (insn))
5720 insns_since_last_label = 1;
5722 if (group_barrier_needed_p (insn))
5727 fprintf (dump, "Emitting stop before label %d\n",
5728 INSN_UID (last_label));
5729 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
5732 init_insn_group_barriers ();
5740 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
5741 This function has to emit all necessary group barriers. */
5744 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
5748 init_insn_group_barriers ();
5750 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5752 if (GET_CODE (insn) == BARRIER)
5754 rtx last = prev_active_insn (insn);
5758 if (GET_CODE (last) == JUMP_INSN
5759 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
5760 last = prev_active_insn (last);
5761 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
5762 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
5764 init_insn_group_barriers ();
5766 else if (INSN_P (insn))
5768 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
5769 init_insn_group_barriers ();
5770 else if (group_barrier_needed_p (insn))
5772 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5773 init_insn_group_barriers ();
5774 group_barrier_needed_p (insn);
5781 static int errata_find_address_regs (rtx *, void *);
5782 static void errata_emit_nops (rtx);
5783 static void fixup_errata (void);
5785 /* This structure is used to track some details about the previous insns
5786 groups so we can determine if it may be necessary to insert NOPs to
5787 workaround hardware errata. */
5790 HARD_REG_SET p_reg_set;
5791 HARD_REG_SET gr_reg_conditionally_set;
5794 /* Index into the last_group array. */
5795 static int group_idx;
5797 /* Called through for_each_rtx; determines if a hard register that was
5798 conditionally set in the previous group is used as an address register.
5799 It ensures that for_each_rtx returns 1 in that case. */
5801 errata_find_address_regs (rtx *xp, void *data ATTRIBUTE_UNUSED)
5804 if (GET_CODE (x) != MEM)
5807 if (GET_CODE (x) == POST_MODIFY)
5809 if (GET_CODE (x) == REG)
5811 struct group *prev_group = last_group + (group_idx ^ 1);
5812 if (TEST_HARD_REG_BIT (prev_group->gr_reg_conditionally_set,
5820 /* Called for each insn; this function keeps track of the state in
5821 last_group and emits additional NOPs if necessary to work around
5822 an Itanium A/B step erratum. */
5824 errata_emit_nops (rtx insn)
5826 struct group *this_group = last_group + group_idx;
5827 struct group *prev_group = last_group + (group_idx ^ 1);
5828 rtx pat = PATTERN (insn);
5829 rtx cond = GET_CODE (pat) == COND_EXEC ? COND_EXEC_TEST (pat) : 0;
5830 rtx real_pat = cond ? COND_EXEC_CODE (pat) : pat;
5831 enum attr_type type;
5834 if (GET_CODE (real_pat) == USE
5835 || GET_CODE (real_pat) == CLOBBER
5836 || GET_CODE (real_pat) == ASM_INPUT
5837 || GET_CODE (real_pat) == ADDR_VEC
5838 || GET_CODE (real_pat) == ADDR_DIFF_VEC
5839 || asm_noperands (PATTERN (insn)) >= 0)
5842 /* single_set doesn't work for COND_EXEC insns, so we have to duplicate
5845 if (GET_CODE (set) == PARALLEL)
5848 set = XVECEXP (real_pat, 0, 0);
5849 for (i = 1; i < XVECLEN (real_pat, 0); i++)
5850 if (GET_CODE (XVECEXP (real_pat, 0, i)) != USE
5851 && GET_CODE (XVECEXP (real_pat, 0, i)) != CLOBBER)
5858 if (set && GET_CODE (set) != SET)
5861 type = get_attr_type (insn);
5864 && set && REG_P (SET_DEST (set)) && PR_REGNO_P (REGNO (SET_DEST (set))))
5865 SET_HARD_REG_BIT (this_group->p_reg_set, REGNO (SET_DEST (set)));
5867 if ((type == TYPE_M || type == TYPE_A) && cond && set
5868 && REG_P (SET_DEST (set))
5869 && GET_CODE (SET_SRC (set)) != PLUS
5870 && GET_CODE (SET_SRC (set)) != MINUS
5871 && (GET_CODE (SET_SRC (set)) != ASHIFT
5872 || !shladd_operand (XEXP (SET_SRC (set), 1), VOIDmode))
5873 && (GET_CODE (SET_SRC (set)) != MEM
5874 || GET_CODE (XEXP (SET_SRC (set), 0)) != POST_MODIFY)
5875 && GENERAL_REGNO_P (REGNO (SET_DEST (set))))
5877 if (!COMPARISON_P (cond)
5878 || !REG_P (XEXP (cond, 0)))
5881 if (TEST_HARD_REG_BIT (prev_group->p_reg_set, REGNO (XEXP (cond, 0))))
5882 SET_HARD_REG_BIT (this_group->gr_reg_conditionally_set, REGNO (SET_DEST (set)));
5884 if (for_each_rtx (&real_pat, errata_find_address_regs, NULL))
5886 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5887 emit_insn_before (gen_nop (), insn);
5888 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5890 memset (last_group, 0, sizeof last_group);
5894 /* Emit extra nops if they are required to work around hardware errata. */
5901 if (! TARGET_B_STEP)
5905 memset (last_group, 0, sizeof last_group);
5907 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5912 if (ia64_safe_type (insn) == TYPE_S)
5915 memset (last_group + group_idx, 0, sizeof last_group[group_idx]);
5918 errata_emit_nops (insn);
5923 /* Instruction scheduling support. */
5925 #define NR_BUNDLES 10
5927 /* A list of names of all available bundles. */
5929 static const char *bundle_name [NR_BUNDLES] =
5935 #if NR_BUNDLES == 10
5945 /* Nonzero if we should insert stop bits into the schedule. */
5947 int ia64_final_schedule = 0;
5949 /* Codes of the corresponding quieryied units: */
5951 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
5952 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
5954 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
5955 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
5957 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
5959 /* The following variable value is an insn group barrier. */
5961 static rtx dfa_stop_insn;
5963 /* The following variable value is the last issued insn. */
5965 static rtx last_scheduled_insn;
5967 /* The following variable value is size of the DFA state. */
5969 static size_t dfa_state_size;
5971 /* The following variable value is pointer to a DFA state used as
5972 temporary variable. */
5974 static state_t temp_dfa_state = NULL;
5976 /* The following variable value is DFA state after issuing the last
5979 static state_t prev_cycle_state = NULL;
5981 /* The following array element values are TRUE if the corresponding
5982 insn requires to add stop bits before it. */
5984 static char *stops_p;
5986 /* The following variable is used to set up the mentioned above array. */
5988 static int stop_before_p = 0;
5990 /* The following variable value is length of the arrays `clocks' and
5993 static int clocks_length;
5995 /* The following array element values are cycles on which the
5996 corresponding insn will be issued. The array is used only for
6001 /* The following array element values are numbers of cycles should be
6002 added to improve insn scheduling for MM_insns for Itanium1. */
6004 static int *add_cycles;
6006 static rtx ia64_single_set (rtx);
6007 static void ia64_emit_insn_before (rtx, rtx);
6009 /* Map a bundle number to its pseudo-op. */
6012 get_bundle_name (int b)
6014 return bundle_name[b];
6018 /* Return the maximum number of instructions a cpu can issue. */
6021 ia64_issue_rate (void)
6026 /* Helper function - like single_set, but look inside COND_EXEC. */
6029 ia64_single_set (rtx insn)
6031 rtx x = PATTERN (insn), ret;
6032 if (GET_CODE (x) == COND_EXEC)
6033 x = COND_EXEC_CODE (x);
6034 if (GET_CODE (x) == SET)
6037 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
6038 Although they are not classical single set, the second set is there just
6039 to protect it from moving past FP-relative stack accesses. */
6040 switch (recog_memoized (insn))
6042 case CODE_FOR_prologue_allocate_stack:
6043 case CODE_FOR_epilogue_deallocate_stack:
6044 ret = XVECEXP (x, 0, 0);
6048 ret = single_set_2 (insn, x);
6055 /* Adjust the cost of a scheduling dependency. Return the new cost of
6056 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
6059 ia64_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
6061 enum attr_itanium_class dep_class;
6062 enum attr_itanium_class insn_class;
6064 if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT)
6067 insn_class = ia64_safe_itanium_class (insn);
6068 dep_class = ia64_safe_itanium_class (dep_insn);
6069 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
6070 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
6076 /* Like emit_insn_before, but skip cycle_display notes.
6077 ??? When cycle display notes are implemented, update this. */
6080 ia64_emit_insn_before (rtx insn, rtx before)
6082 emit_insn_before (insn, before);
6085 /* The following function marks insns who produce addresses for load
6086 and store insns. Such insns will be placed into M slots because it
6087 decrease latency time for Itanium1 (see function
6088 `ia64_produce_address_p' and the DFA descriptions). */
6091 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
6093 rtx insn, link, next, next_tail;
6095 next_tail = NEXT_INSN (tail);
6096 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6099 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6101 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
6103 for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
6105 next = XEXP (link, 0);
6106 if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_ST
6107 || ia64_safe_itanium_class (next) == ITANIUM_CLASS_STF)
6108 && ia64_st_address_bypass_p (insn, next))
6110 else if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_LD
6111 || ia64_safe_itanium_class (next)
6112 == ITANIUM_CLASS_FLD)
6113 && ia64_ld_address_bypass_p (insn, next))
6116 insn->call = link != 0;
6120 /* We're beginning a new block. Initialize data structures as necessary. */
6123 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
6124 int sched_verbose ATTRIBUTE_UNUSED,
6125 int max_ready ATTRIBUTE_UNUSED)
6127 #ifdef ENABLE_CHECKING
6130 if (reload_completed)
6131 for (insn = NEXT_INSN (current_sched_info->prev_head);
6132 insn != current_sched_info->next_tail;
6133 insn = NEXT_INSN (insn))
6134 if (SCHED_GROUP_P (insn))
6137 last_scheduled_insn = NULL_RTX;
6138 init_insn_group_barriers ();
6141 /* We are about to being issuing insns for this clock cycle.
6142 Override the default sort algorithm to better slot instructions. */
6145 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
6146 int *pn_ready, int clock_var ATTRIBUTE_UNUSED,
6150 int n_ready = *pn_ready;
6151 rtx *e_ready = ready + n_ready;
6155 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
6157 if (reorder_type == 0)
6159 /* First, move all USEs, CLOBBERs and other crud out of the way. */
6161 for (insnp = ready; insnp < e_ready; insnp++)
6162 if (insnp < e_ready)
6165 enum attr_type t = ia64_safe_type (insn);
6166 if (t == TYPE_UNKNOWN)
6168 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6169 || asm_noperands (PATTERN (insn)) >= 0)
6171 rtx lowest = ready[n_asms];
6172 ready[n_asms] = insn;
6178 rtx highest = ready[n_ready - 1];
6179 ready[n_ready - 1] = insn;
6186 if (n_asms < n_ready)
6188 /* Some normal insns to process. Skip the asms. */
6192 else if (n_ready > 0)
6196 if (ia64_final_schedule)
6199 int nr_need_stop = 0;
6201 for (insnp = ready; insnp < e_ready; insnp++)
6202 if (safe_group_barrier_needed_p (*insnp))
6205 if (reorder_type == 1 && n_ready == nr_need_stop)
6207 if (reorder_type == 0)
6210 /* Move down everything that needs a stop bit, preserving
6212 while (insnp-- > ready + deleted)
6213 while (insnp >= ready + deleted)
6216 if (! safe_group_barrier_needed_p (insn))
6218 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
6229 /* We are about to being issuing insns for this clock cycle. Override
6230 the default sort algorithm to better slot instructions. */
6233 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
6236 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
6237 pn_ready, clock_var, 0);
6240 /* Like ia64_sched_reorder, but called after issuing each insn.
6241 Override the default sort algorithm to better slot instructions. */
6244 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
6245 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
6246 int *pn_ready, int clock_var)
6248 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
6249 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
6250 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
6254 /* We are about to issue INSN. Return the number of insns left on the
6255 ready queue that can be issued this cycle. */
6258 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
6259 int sched_verbose ATTRIBUTE_UNUSED,
6260 rtx insn ATTRIBUTE_UNUSED,
6261 int can_issue_more ATTRIBUTE_UNUSED)
6263 last_scheduled_insn = insn;
6264 memcpy (prev_cycle_state, curr_state, dfa_state_size);
6265 if (reload_completed)
6267 if (group_barrier_needed_p (insn))
6269 if (GET_CODE (insn) == CALL_INSN)
6270 init_insn_group_barriers ();
6271 stops_p [INSN_UID (insn)] = stop_before_p;
6277 /* We are choosing insn from the ready queue. Return nonzero if INSN
6281 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
6283 if (insn == NULL_RTX || !INSN_P (insn))
6285 return (!reload_completed
6286 || !safe_group_barrier_needed_p (insn));
6289 /* The following variable value is pseudo-insn used by the DFA insn
6290 scheduler to change the DFA state when the simulated clock is
6293 static rtx dfa_pre_cycle_insn;
6295 /* We are about to being issuing INSN. Return nonzero if we can not
6296 issue it on given cycle CLOCK and return zero if we should not sort
6297 the ready queue on the next clock start. */
6300 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
6301 int clock, int *sort_p)
6303 int setup_clocks_p = FALSE;
6305 if (insn == NULL_RTX || !INSN_P (insn))
6307 if ((reload_completed && safe_group_barrier_needed_p (insn))
6308 || (last_scheduled_insn
6309 && (GET_CODE (last_scheduled_insn) == CALL_INSN
6310 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6311 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
6313 init_insn_group_barriers ();
6314 if (verbose && dump)
6315 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
6316 last_clock == clock ? " + cycle advance" : "");
6318 if (last_clock == clock)
6320 state_transition (curr_state, dfa_stop_insn);
6321 if (TARGET_EARLY_STOP_BITS)
6322 *sort_p = (last_scheduled_insn == NULL_RTX
6323 || GET_CODE (last_scheduled_insn) != CALL_INSN);
6328 else if (reload_completed)
6329 setup_clocks_p = TRUE;
6330 memcpy (curr_state, prev_cycle_state, dfa_state_size);
6331 state_transition (curr_state, dfa_stop_insn);
6332 state_transition (curr_state, dfa_pre_cycle_insn);
6333 state_transition (curr_state, NULL);
6335 else if (reload_completed)
6336 setup_clocks_p = TRUE;
6337 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM
6338 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6339 && asm_noperands (PATTERN (insn)) == 0)
6341 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
6343 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
6348 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
6349 if (REG_NOTE_KIND (link) == 0)
6351 enum attr_itanium_class dep_class;
6352 rtx dep_insn = XEXP (link, 0);
6354 dep_class = ia64_safe_itanium_class (dep_insn);
6355 if ((dep_class == ITANIUM_CLASS_MMMUL
6356 || dep_class == ITANIUM_CLASS_MMSHF)
6357 && last_clock - clocks [INSN_UID (dep_insn)] < 4
6359 || last_clock - clocks [INSN_UID (dep_insn)] < d))
6360 d = last_clock - clocks [INSN_UID (dep_insn)];
6363 add_cycles [INSN_UID (insn)] = 3 - d;
6371 /* The following page contains abstract data `bundle states' which are
6372 used for bundling insns (inserting nops and template generation). */
6374 /* The following describes state of insn bundling. */
6378 /* Unique bundle state number to identify them in the debugging
6381 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
6382 /* number nops before and after the insn */
6383 short before_nops_num, after_nops_num;
6384 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
6386 int cost; /* cost of the state in cycles */
6387 int accumulated_insns_num; /* number of all previous insns including
6388 nops. L is considered as 2 insns */
6389 int branch_deviation; /* deviation of previous branches from 3rd slots */
6390 struct bundle_state *next; /* next state with the same insn_num */
6391 struct bundle_state *originator; /* originator (previous insn state) */
6392 /* All bundle states are in the following chain. */
6393 struct bundle_state *allocated_states_chain;
6394 /* The DFA State after issuing the insn and the nops. */
6398 /* The following is map insn number to the corresponding bundle state. */
6400 static struct bundle_state **index_to_bundle_states;
6402 /* The unique number of next bundle state. */
6404 static int bundle_states_num;
6406 /* All allocated bundle states are in the following chain. */
6408 static struct bundle_state *allocated_bundle_states_chain;
6410 /* All allocated but not used bundle states are in the following
6413 static struct bundle_state *free_bundle_state_chain;
6416 /* The following function returns a free bundle state. */
6418 static struct bundle_state *
6419 get_free_bundle_state (void)
6421 struct bundle_state *result;
6423 if (free_bundle_state_chain != NULL)
6425 result = free_bundle_state_chain;
6426 free_bundle_state_chain = result->next;
6430 result = xmalloc (sizeof (struct bundle_state));
6431 result->dfa_state = xmalloc (dfa_state_size);
6432 result->allocated_states_chain = allocated_bundle_states_chain;
6433 allocated_bundle_states_chain = result;
6435 result->unique_num = bundle_states_num++;
6440 /* The following function frees given bundle state. */
6443 free_bundle_state (struct bundle_state *state)
6445 state->next = free_bundle_state_chain;
6446 free_bundle_state_chain = state;
6449 /* Start work with abstract data `bundle states'. */
6452 initiate_bundle_states (void)
6454 bundle_states_num = 0;
6455 free_bundle_state_chain = NULL;
6456 allocated_bundle_states_chain = NULL;
6459 /* Finish work with abstract data `bundle states'. */
6462 finish_bundle_states (void)
6464 struct bundle_state *curr_state, *next_state;
6466 for (curr_state = allocated_bundle_states_chain;
6468 curr_state = next_state)
6470 next_state = curr_state->allocated_states_chain;
6471 free (curr_state->dfa_state);
6476 /* Hash table of the bundle states. The key is dfa_state and insn_num
6477 of the bundle states. */
6479 static htab_t bundle_state_table;
6481 /* The function returns hash of BUNDLE_STATE. */
6484 bundle_state_hash (const void *bundle_state)
6486 const struct bundle_state *state = (struct bundle_state *) bundle_state;
6489 for (result = i = 0; i < dfa_state_size; i++)
6490 result += (((unsigned char *) state->dfa_state) [i]
6491 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
6492 return result + state->insn_num;
6495 /* The function returns nonzero if the bundle state keys are equal. */
6498 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
6500 const struct bundle_state * state1 = (struct bundle_state *) bundle_state_1;
6501 const struct bundle_state * state2 = (struct bundle_state *) bundle_state_2;
6503 return (state1->insn_num == state2->insn_num
6504 && memcmp (state1->dfa_state, state2->dfa_state,
6505 dfa_state_size) == 0);
6508 /* The function inserts the BUNDLE_STATE into the hash table. The
6509 function returns nonzero if the bundle has been inserted into the
6510 table. The table contains the best bundle state with given key. */
6513 insert_bundle_state (struct bundle_state *bundle_state)
6517 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
6518 if (*entry_ptr == NULL)
6520 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
6521 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
6522 *entry_ptr = (void *) bundle_state;
6525 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
6526 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
6527 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
6528 > bundle_state->accumulated_insns_num
6529 || (((struct bundle_state *)
6530 *entry_ptr)->accumulated_insns_num
6531 == bundle_state->accumulated_insns_num
6532 && ((struct bundle_state *)
6533 *entry_ptr)->branch_deviation
6534 > bundle_state->branch_deviation))))
6537 struct bundle_state temp;
6539 temp = *(struct bundle_state *) *entry_ptr;
6540 *(struct bundle_state *) *entry_ptr = *bundle_state;
6541 ((struct bundle_state *) *entry_ptr)->next = temp.next;
6542 *bundle_state = temp;
6547 /* Start work with the hash table. */
6550 initiate_bundle_state_table (void)
6552 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
6556 /* Finish work with the hash table. */
6559 finish_bundle_state_table (void)
6561 htab_delete (bundle_state_table);
6566 /* The following variable is a insn `nop' used to check bundle states
6567 with different number of inserted nops. */
6569 static rtx ia64_nop;
6571 /* The following function tries to issue NOPS_NUM nops for the current
6572 state without advancing processor cycle. If it failed, the
6573 function returns FALSE and frees the current state. */
6576 try_issue_nops (struct bundle_state *curr_state, int nops_num)
6580 for (i = 0; i < nops_num; i++)
6581 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
6583 free_bundle_state (curr_state);
6589 /* The following function tries to issue INSN for the current
6590 state without advancing processor cycle. If it failed, the
6591 function returns FALSE and frees the current state. */
6594 try_issue_insn (struct bundle_state *curr_state, rtx insn)
6596 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
6598 free_bundle_state (curr_state);
6604 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
6605 starting with ORIGINATOR without advancing processor cycle. If
6606 TRY_BUNDLE_END_P is TRUE, the function also/only (if
6607 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
6608 If it was successful, the function creates new bundle state and
6609 insert into the hash table and into `index_to_bundle_states'. */
6612 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
6613 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
6615 struct bundle_state *curr_state;
6617 curr_state = get_free_bundle_state ();
6618 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
6619 curr_state->insn = insn;
6620 curr_state->insn_num = originator->insn_num + 1;
6621 curr_state->cost = originator->cost;
6622 curr_state->originator = originator;
6623 curr_state->before_nops_num = before_nops_num;
6624 curr_state->after_nops_num = 0;
6625 curr_state->accumulated_insns_num
6626 = originator->accumulated_insns_num + before_nops_num;
6627 curr_state->branch_deviation = originator->branch_deviation;
6628 if (insn == NULL_RTX)
6630 else if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
6632 if (GET_MODE (insn) == TImode)
6634 if (!try_issue_nops (curr_state, before_nops_num))
6636 if (!try_issue_insn (curr_state, insn))
6638 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
6639 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
6640 && curr_state->accumulated_insns_num % 3 != 0)
6642 free_bundle_state (curr_state);
6646 else if (GET_MODE (insn) != TImode)
6648 if (!try_issue_nops (curr_state, before_nops_num))
6650 if (!try_issue_insn (curr_state, insn))
6652 curr_state->accumulated_insns_num++;
6653 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6654 || asm_noperands (PATTERN (insn)) >= 0)
6656 if (ia64_safe_type (insn) == TYPE_L)
6657 curr_state->accumulated_insns_num++;
6661 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
6662 state_transition (curr_state->dfa_state, NULL);
6664 if (!try_issue_nops (curr_state, before_nops_num))
6666 if (!try_issue_insn (curr_state, insn))
6668 curr_state->accumulated_insns_num++;
6669 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6670 || asm_noperands (PATTERN (insn)) >= 0)
6672 /* Finish bundle containing asm insn. */
6673 curr_state->after_nops_num
6674 = 3 - curr_state->accumulated_insns_num % 3;
6675 curr_state->accumulated_insns_num
6676 += 3 - curr_state->accumulated_insns_num % 3;
6678 else if (ia64_safe_type (insn) == TYPE_L)
6679 curr_state->accumulated_insns_num++;
6681 if (ia64_safe_type (insn) == TYPE_B)
6682 curr_state->branch_deviation
6683 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
6684 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
6686 if (!only_bundle_end_p && insert_bundle_state (curr_state))
6689 struct bundle_state *curr_state1;
6690 struct bundle_state *allocated_states_chain;
6692 curr_state1 = get_free_bundle_state ();
6693 dfa_state = curr_state1->dfa_state;
6694 allocated_states_chain = curr_state1->allocated_states_chain;
6695 *curr_state1 = *curr_state;
6696 curr_state1->dfa_state = dfa_state;
6697 curr_state1->allocated_states_chain = allocated_states_chain;
6698 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
6700 curr_state = curr_state1;
6702 if (!try_issue_nops (curr_state,
6703 3 - curr_state->accumulated_insns_num % 3))
6705 curr_state->after_nops_num
6706 = 3 - curr_state->accumulated_insns_num % 3;
6707 curr_state->accumulated_insns_num
6708 += 3 - curr_state->accumulated_insns_num % 3;
6710 if (!insert_bundle_state (curr_state))
6711 free_bundle_state (curr_state);
6715 /* The following function returns position in the two window bundle
6719 get_max_pos (state_t state)
6721 if (cpu_unit_reservation_p (state, pos_6))
6723 else if (cpu_unit_reservation_p (state, pos_5))
6725 else if (cpu_unit_reservation_p (state, pos_4))
6727 else if (cpu_unit_reservation_p (state, pos_3))
6729 else if (cpu_unit_reservation_p (state, pos_2))
6731 else if (cpu_unit_reservation_p (state, pos_1))
6737 /* The function returns code of a possible template for given position
6738 and state. The function should be called only with 2 values of
6739 position equal to 3 or 6. */
6742 get_template (state_t state, int pos)
6747 if (cpu_unit_reservation_p (state, _0mii_))
6749 else if (cpu_unit_reservation_p (state, _0mmi_))
6751 else if (cpu_unit_reservation_p (state, _0mfi_))
6753 else if (cpu_unit_reservation_p (state, _0mmf_))
6755 else if (cpu_unit_reservation_p (state, _0bbb_))
6757 else if (cpu_unit_reservation_p (state, _0mbb_))
6759 else if (cpu_unit_reservation_p (state, _0mib_))
6761 else if (cpu_unit_reservation_p (state, _0mmb_))
6763 else if (cpu_unit_reservation_p (state, _0mfb_))
6765 else if (cpu_unit_reservation_p (state, _0mlx_))
6770 if (cpu_unit_reservation_p (state, _1mii_))
6772 else if (cpu_unit_reservation_p (state, _1mmi_))
6774 else if (cpu_unit_reservation_p (state, _1mfi_))
6776 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
6778 else if (cpu_unit_reservation_p (state, _1bbb_))
6780 else if (cpu_unit_reservation_p (state, _1mbb_))
6782 else if (cpu_unit_reservation_p (state, _1mib_))
6784 else if (cpu_unit_reservation_p (state, _1mmb_))
6786 else if (cpu_unit_reservation_p (state, _1mfb_))
6788 else if (cpu_unit_reservation_p (state, _1mlx_))
6797 /* The following function returns an insn important for insn bundling
6798 followed by INSN and before TAIL. */
6801 get_next_important_insn (rtx insn, rtx tail)
6803 for (; insn && insn != tail; insn = NEXT_INSN (insn))
6805 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6806 && GET_CODE (PATTERN (insn)) != USE
6807 && GET_CODE (PATTERN (insn)) != CLOBBER)
6812 /* The following function does insn bundling. Bundling means
6813 inserting templates and nop insns to fit insn groups into permitted
6814 templates. Instruction scheduling uses NDFA (non-deterministic
6815 finite automata) encoding informations about the templates and the
6816 inserted nops. Nondeterminism of the automata permits follows
6817 all possible insn sequences very fast.
6819 Unfortunately it is not possible to get information about inserting
6820 nop insns and used templates from the automata states. The
6821 automata only says that we can issue an insn possibly inserting
6822 some nops before it and using some template. Therefore insn
6823 bundling in this function is implemented by using DFA
6824 (deterministic finite automata). We follows all possible insn
6825 sequences by inserting 0-2 nops (that is what the NDFA describe for
6826 insn scheduling) before/after each insn being bundled. We know the
6827 start of simulated processor cycle from insn scheduling (insn
6828 starting a new cycle has TImode).
6830 Simple implementation of insn bundling would create enormous
6831 number of possible insn sequences satisfying information about new
6832 cycle ticks taken from the insn scheduling. To make the algorithm
6833 practical we use dynamic programming. Each decision (about
6834 inserting nops and implicitly about previous decisions) is described
6835 by structure bundle_state (see above). If we generate the same
6836 bundle state (key is automaton state after issuing the insns and
6837 nops for it), we reuse already generated one. As consequence we
6838 reject some decisions which can not improve the solution and
6839 reduce memory for the algorithm.
6841 When we reach the end of EBB (extended basic block), we choose the
6842 best sequence and then, moving back in EBB, insert templates for
6843 the best alternative. The templates are taken from querying
6844 automaton state for each insn in chosen bundle states.
6846 So the algorithm makes two (forward and backward) passes through
6847 EBB. There is an additional forward pass through EBB for Itanium1
6848 processor. This pass inserts more nops to make dependency between
6849 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
6852 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
6854 struct bundle_state *curr_state, *next_state, *best_state;
6855 rtx insn, next_insn;
6857 int i, bundle_end_p, only_bundle_end_p, asm_p;
6858 int pos = 0, max_pos, template0, template1;
6861 enum attr_type type;
6864 /* Count insns in the EBB. */
6865 for (insn = NEXT_INSN (prev_head_insn);
6866 insn && insn != tail;
6867 insn = NEXT_INSN (insn))
6873 dfa_clean_insn_cache ();
6874 initiate_bundle_state_table ();
6875 index_to_bundle_states = xmalloc ((insn_num + 2)
6876 * sizeof (struct bundle_state *));
6877 /* First (forward) pass -- generation of bundle states. */
6878 curr_state = get_free_bundle_state ();
6879 curr_state->insn = NULL;
6880 curr_state->before_nops_num = 0;
6881 curr_state->after_nops_num = 0;
6882 curr_state->insn_num = 0;
6883 curr_state->cost = 0;
6884 curr_state->accumulated_insns_num = 0;
6885 curr_state->branch_deviation = 0;
6886 curr_state->next = NULL;
6887 curr_state->originator = NULL;
6888 state_reset (curr_state->dfa_state);
6889 index_to_bundle_states [0] = curr_state;
6891 /* Shift cycle mark if it is put on insn which could be ignored. */
6892 for (insn = NEXT_INSN (prev_head_insn);
6894 insn = NEXT_INSN (insn))
6896 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6897 || GET_CODE (PATTERN (insn)) == USE
6898 || GET_CODE (PATTERN (insn)) == CLOBBER)
6899 && GET_MODE (insn) == TImode)
6901 PUT_MODE (insn, VOIDmode);
6902 for (next_insn = NEXT_INSN (insn);
6904 next_insn = NEXT_INSN (next_insn))
6905 if (INSN_P (next_insn)
6906 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
6907 && GET_CODE (PATTERN (next_insn)) != USE
6908 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
6910 PUT_MODE (next_insn, TImode);
6914 /* Froward pass: generation of bundle states. */
6915 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
6920 || ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6921 || GET_CODE (PATTERN (insn)) == USE
6922 || GET_CODE (PATTERN (insn)) == CLOBBER)
6924 type = ia64_safe_type (insn);
6925 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
6927 index_to_bundle_states [insn_num] = NULL;
6928 for (curr_state = index_to_bundle_states [insn_num - 1];
6930 curr_state = next_state)
6932 pos = curr_state->accumulated_insns_num % 3;
6933 next_state = curr_state->next;
6934 /* We must fill up the current bundle in order to start a
6935 subsequent asm insn in a new bundle. Asm insn is always
6936 placed in a separate bundle. */
6938 = (next_insn != NULL_RTX
6939 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
6940 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
6941 /* We may fill up the current bundle if it is the cycle end
6942 without a group barrier. */
6944 = (only_bundle_end_p || next_insn == NULL_RTX
6945 || (GET_MODE (next_insn) == TImode
6946 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
6947 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
6949 /* We need to insert 2 nops for cases like M_MII. To
6950 guarantee issuing all insns on the same cycle for
6951 Itanium 1, we need to issue 2 nops after the first M
6952 insn (MnnMII where n is a nop insn). */
6953 || ((type == TYPE_M || type == TYPE_A)
6954 && ia64_tune == PROCESSOR_ITANIUM
6955 && !bundle_end_p && pos == 1))
6956 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
6958 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
6960 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
6963 if (index_to_bundle_states [insn_num] == NULL)
6965 for (curr_state = index_to_bundle_states [insn_num];
6967 curr_state = curr_state->next)
6968 if (verbose >= 2 && dump)
6970 /* This structure is taken from generated code of the
6971 pipeline hazard recognizer (see file insn-attrtab.c).
6972 Please don't forget to change the structure if a new
6973 automaton is added to .md file. */
6976 unsigned short one_automaton_state;
6977 unsigned short oneb_automaton_state;
6978 unsigned short two_automaton_state;
6979 unsigned short twob_automaton_state;
6984 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
6985 curr_state->unique_num,
6986 (curr_state->originator == NULL
6987 ? -1 : curr_state->originator->unique_num),
6989 curr_state->before_nops_num, curr_state->after_nops_num,
6990 curr_state->accumulated_insns_num, curr_state->branch_deviation,
6991 (ia64_tune == PROCESSOR_ITANIUM
6992 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
6993 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
6997 if (index_to_bundle_states [insn_num] == NULL)
6998 /* We should find a solution because the 2nd insn scheduling has
7001 /* Find a state corresponding to the best insn sequence. */
7003 for (curr_state = index_to_bundle_states [insn_num];
7005 curr_state = curr_state->next)
7006 /* We are just looking at the states with fully filled up last
7007 bundle. The first we prefer insn sequences with minimal cost
7008 then with minimal inserted nops and finally with branch insns
7009 placed in the 3rd slots. */
7010 if (curr_state->accumulated_insns_num % 3 == 0
7011 && (best_state == NULL || best_state->cost > curr_state->cost
7012 || (best_state->cost == curr_state->cost
7013 && (curr_state->accumulated_insns_num
7014 < best_state->accumulated_insns_num
7015 || (curr_state->accumulated_insns_num
7016 == best_state->accumulated_insns_num
7017 && curr_state->branch_deviation
7018 < best_state->branch_deviation)))))
7019 best_state = curr_state;
7020 /* Second (backward) pass: adding nops and templates. */
7021 insn_num = best_state->before_nops_num;
7022 template0 = template1 = -1;
7023 for (curr_state = best_state;
7024 curr_state->originator != NULL;
7025 curr_state = curr_state->originator)
7027 insn = curr_state->insn;
7028 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
7029 || asm_noperands (PATTERN (insn)) >= 0);
7031 if (verbose >= 2 && dump)
7035 unsigned short one_automaton_state;
7036 unsigned short oneb_automaton_state;
7037 unsigned short two_automaton_state;
7038 unsigned short twob_automaton_state;
7043 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
7044 curr_state->unique_num,
7045 (curr_state->originator == NULL
7046 ? -1 : curr_state->originator->unique_num),
7048 curr_state->before_nops_num, curr_state->after_nops_num,
7049 curr_state->accumulated_insns_num, curr_state->branch_deviation,
7050 (ia64_tune == PROCESSOR_ITANIUM
7051 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
7052 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
7055 /* Find the position in the current bundle window. The window can
7056 contain at most two bundles. Two bundle window means that
7057 the processor will make two bundle rotation. */
7058 max_pos = get_max_pos (curr_state->dfa_state);
7060 /* The following (negative template number) means that the
7061 processor did one bundle rotation. */
7062 || (max_pos == 3 && template0 < 0))
7064 /* We are at the end of the window -- find template(s) for
7068 template0 = get_template (curr_state->dfa_state, 3);
7071 template1 = get_template (curr_state->dfa_state, 3);
7072 template0 = get_template (curr_state->dfa_state, 6);
7075 if (max_pos > 3 && template1 < 0)
7076 /* It may happen when we have the stop inside a bundle. */
7080 template1 = get_template (curr_state->dfa_state, 3);
7084 /* Emit nops after the current insn. */
7085 for (i = 0; i < curr_state->after_nops_num; i++)
7088 emit_insn_after (nop, insn);
7094 /* We are at the start of a bundle: emit the template
7095 (it should be defined). */
7098 b = gen_bundle_selector (GEN_INT (template0));
7099 ia64_emit_insn_before (b, nop);
7100 /* If we have two bundle window, we make one bundle
7101 rotation. Otherwise template0 will be undefined
7102 (negative value). */
7103 template0 = template1;
7107 /* Move the position backward in the window. Group barrier has
7108 no slot. Asm insn takes all bundle. */
7109 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
7110 && GET_CODE (PATTERN (insn)) != ASM_INPUT
7111 && asm_noperands (PATTERN (insn)) < 0)
7113 /* Long insn takes 2 slots. */
7114 if (ia64_safe_type (insn) == TYPE_L)
7119 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
7120 && GET_CODE (PATTERN (insn)) != ASM_INPUT
7121 && asm_noperands (PATTERN (insn)) < 0)
7123 /* The current insn is at the bundle start: emit the
7127 b = gen_bundle_selector (GEN_INT (template0));
7128 ia64_emit_insn_before (b, insn);
7129 b = PREV_INSN (insn);
7131 /* See comment above in analogous place for emitting nops
7133 template0 = template1;
7136 /* Emit nops after the current insn. */
7137 for (i = 0; i < curr_state->before_nops_num; i++)
7140 ia64_emit_insn_before (nop, insn);
7141 nop = PREV_INSN (insn);
7148 /* See comment above in analogous place for emitting nops
7152 b = gen_bundle_selector (GEN_INT (template0));
7153 ia64_emit_insn_before (b, insn);
7154 b = PREV_INSN (insn);
7156 template0 = template1;
7161 if (ia64_tune == PROCESSOR_ITANIUM)
7162 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
7163 Itanium1 has a strange design, if the distance between an insn
7164 and dependent MM-insn is less 4 then we have a 6 additional
7165 cycles stall. So we make the distance equal to 4 cycles if it
7167 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
7172 || ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
7173 || GET_CODE (PATTERN (insn)) == USE
7174 || GET_CODE (PATTERN (insn)) == CLOBBER)
7176 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
7177 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
7178 /* We found a MM-insn which needs additional cycles. */
7184 /* Now we are searching for a template of the bundle in
7185 which the MM-insn is placed and the position of the
7186 insn in the bundle (0, 1, 2). Also we are searching
7187 for that there is a stop before the insn. */
7188 last = prev_active_insn (insn);
7189 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
7191 last = prev_active_insn (last);
7193 for (;; last = prev_active_insn (last))
7194 if (recog_memoized (last) == CODE_FOR_bundle_selector)
7196 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
7198 /* The insn is in MLX bundle. Change the template
7199 onto MFI because we will add nops before the
7200 insn. It simplifies subsequent code a lot. */
7202 = gen_bundle_selector (const2_rtx); /* -> MFI */
7205 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
7207 /* Some check of correctness: the stop is not at the
7208 bundle start, there are no more 3 insns in the bundle,
7209 and the MM-insn is not at the start of bundle with
7211 if ((pred_stop_p && n == 0) || n > 2
7212 || (template0 == 9 && n != 0))
7214 /* Put nops after the insn in the bundle. */
7215 for (j = 3 - n; j > 0; j --)
7216 ia64_emit_insn_before (gen_nop (), insn);
7217 /* It takes into account that we will add more N nops
7218 before the insn lately -- please see code below. */
7219 add_cycles [INSN_UID (insn)]--;
7220 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
7221 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7224 add_cycles [INSN_UID (insn)]--;
7225 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
7227 /* Insert "MII;" template. */
7228 ia64_emit_insn_before (gen_bundle_selector (const0_rtx),
7230 ia64_emit_insn_before (gen_nop (), insn);
7231 ia64_emit_insn_before (gen_nop (), insn);
7234 /* To decrease code size, we use "MI;I;"
7236 ia64_emit_insn_before
7237 (gen_insn_group_barrier (GEN_INT (3)), insn);
7240 ia64_emit_insn_before (gen_nop (), insn);
7241 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7244 /* Put the MM-insn in the same slot of a bundle with the
7245 same template as the original one. */
7246 ia64_emit_insn_before (gen_bundle_selector (GEN_INT (template0)),
7248 /* To put the insn in the same slot, add necessary number
7250 for (j = n; j > 0; j --)
7251 ia64_emit_insn_before (gen_nop (), insn);
7252 /* Put the stop if the original bundle had it. */
7254 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7258 free (index_to_bundle_states);
7259 finish_bundle_state_table ();
7261 dfa_clean_insn_cache ();
7264 /* The following function is called at the end of scheduling BB or
7265 EBB. After reload, it inserts stop bits and does insn bundling. */
7268 ia64_sched_finish (FILE *dump, int sched_verbose)
7271 fprintf (dump, "// Finishing schedule.\n");
7272 if (!reload_completed)
7274 if (reload_completed)
7276 final_emit_insn_group_barriers (dump);
7277 bundling (dump, sched_verbose, current_sched_info->prev_head,
7278 current_sched_info->next_tail);
7279 if (sched_verbose && dump)
7280 fprintf (dump, "// finishing %d-%d\n",
7281 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
7282 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
7288 /* The following function inserts stop bits in scheduled BB or EBB. */
7291 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
7294 int need_barrier_p = 0;
7295 rtx prev_insn = NULL_RTX;
7297 init_insn_group_barriers ();
7299 for (insn = NEXT_INSN (current_sched_info->prev_head);
7300 insn != current_sched_info->next_tail;
7301 insn = NEXT_INSN (insn))
7303 if (GET_CODE (insn) == BARRIER)
7305 rtx last = prev_active_insn (insn);
7309 if (GET_CODE (last) == JUMP_INSN
7310 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
7311 last = prev_active_insn (last);
7312 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
7313 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
7315 init_insn_group_barriers ();
7317 prev_insn = NULL_RTX;
7319 else if (INSN_P (insn))
7321 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
7323 init_insn_group_barriers ();
7325 prev_insn = NULL_RTX;
7327 else if (need_barrier_p || group_barrier_needed_p (insn))
7329 if (TARGET_EARLY_STOP_BITS)
7334 last != current_sched_info->prev_head;
7335 last = PREV_INSN (last))
7336 if (INSN_P (last) && GET_MODE (last) == TImode
7337 && stops_p [INSN_UID (last)])
7339 if (last == current_sched_info->prev_head)
7341 last = prev_active_insn (last);
7343 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
7344 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
7346 init_insn_group_barriers ();
7347 for (last = NEXT_INSN (last);
7349 last = NEXT_INSN (last))
7351 group_barrier_needed_p (last);
7355 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7357 init_insn_group_barriers ();
7359 group_barrier_needed_p (insn);
7360 prev_insn = NULL_RTX;
7362 else if (recog_memoized (insn) >= 0)
7364 need_barrier_p = (GET_CODE (insn) == CALL_INSN
7365 || GET_CODE (PATTERN (insn)) == ASM_INPUT
7366 || asm_noperands (PATTERN (insn)) >= 0);
7373 /* If the following function returns TRUE, we will use the the DFA
7377 ia64_use_dfa_pipeline_interface (void)
7382 /* If the following function returns TRUE, we will use the the DFA
7386 ia64_first_cycle_multipass_dfa_lookahead (void)
7388 return (reload_completed ? 6 : 4);
7391 /* The following function initiates variable `dfa_pre_cycle_insn'. */
7394 ia64_init_dfa_pre_cycle_insn (void)
7396 if (temp_dfa_state == NULL)
7398 dfa_state_size = state_size ();
7399 temp_dfa_state = xmalloc (dfa_state_size);
7400 prev_cycle_state = xmalloc (dfa_state_size);
7402 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
7403 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
7404 recog_memoized (dfa_pre_cycle_insn);
7405 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
7406 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
7407 recog_memoized (dfa_stop_insn);
7410 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
7411 used by the DFA insn scheduler. */
7414 ia64_dfa_pre_cycle_insn (void)
7416 return dfa_pre_cycle_insn;
7419 /* The following function returns TRUE if PRODUCER (of type ilog or
7420 ld) produces address for CONSUMER (of type st or stf). */
7423 ia64_st_address_bypass_p (rtx producer, rtx consumer)
7427 if (producer == NULL_RTX || consumer == NULL_RTX)
7429 dest = ia64_single_set (producer);
7430 if (dest == NULL_RTX || (reg = SET_DEST (dest)) == NULL_RTX
7431 || (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG))
7433 if (GET_CODE (reg) == SUBREG)
7434 reg = SUBREG_REG (reg);
7435 dest = ia64_single_set (consumer);
7436 if (dest == NULL_RTX || (mem = SET_DEST (dest)) == NULL_RTX
7437 || GET_CODE (mem) != MEM)
7439 return reg_mentioned_p (reg, mem);
7442 /* The following function returns TRUE if PRODUCER (of type ilog or
7443 ld) produces address for CONSUMER (of type ld or fld). */
7446 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
7448 rtx dest, src, reg, mem;
7450 if (producer == NULL_RTX || consumer == NULL_RTX)
7452 dest = ia64_single_set (producer);
7453 if (dest == NULL_RTX || (reg = SET_DEST (dest)) == NULL_RTX
7454 || (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG))
7456 if (GET_CODE (reg) == SUBREG)
7457 reg = SUBREG_REG (reg);
7458 src = ia64_single_set (consumer);
7459 if (src == NULL_RTX || (mem = SET_SRC (src)) == NULL_RTX)
7461 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
7462 mem = XVECEXP (mem, 0, 0);
7463 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
7464 mem = XEXP (mem, 0);
7466 /* Note that LO_SUM is used for GOT loads. */
7467 if (GET_CODE (mem) != LO_SUM && GET_CODE (mem) != MEM)
7470 return reg_mentioned_p (reg, mem);
7473 /* The following function returns TRUE if INSN produces address for a
7474 load/store insn. We will place such insns into M slot because it
7475 decreases its latency time. */
7478 ia64_produce_address_p (rtx insn)
7484 /* Emit pseudo-ops for the assembler to describe predicate relations.
7485 At present this assumes that we only consider predicate pairs to
7486 be mutex, and that the assembler can deduce proper values from
7487 straight-line code. */
7490 emit_predicate_relation_info (void)
7494 FOR_EACH_BB_REVERSE (bb)
7497 rtx head = BB_HEAD (bb);
7499 /* We only need such notes at code labels. */
7500 if (GET_CODE (head) != CODE_LABEL)
7502 if (GET_CODE (NEXT_INSN (head)) == NOTE
7503 && NOTE_LINE_NUMBER (NEXT_INSN (head)) == NOTE_INSN_BASIC_BLOCK)
7504 head = NEXT_INSN (head);
7506 for (r = PR_REG (0); r < PR_REG (64); r += 2)
7507 if (REGNO_REG_SET_P (bb->global_live_at_start, r))
7509 rtx p = gen_rtx_REG (BImode, r);
7510 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
7511 if (head == BB_END (bb))
7517 /* Look for conditional calls that do not return, and protect predicate
7518 relations around them. Otherwise the assembler will assume the call
7519 returns, and complain about uses of call-clobbered predicates after
7521 FOR_EACH_BB_REVERSE (bb)
7523 rtx insn = BB_HEAD (bb);
7527 if (GET_CODE (insn) == CALL_INSN
7528 && GET_CODE (PATTERN (insn)) == COND_EXEC
7529 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
7531 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
7532 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
7533 if (BB_HEAD (bb) == insn)
7535 if (BB_END (bb) == insn)
7539 if (insn == BB_END (bb))
7541 insn = NEXT_INSN (insn);
7546 /* Perform machine dependent operations on the rtl chain INSNS. */
7551 /* We are freeing block_for_insn in the toplev to keep compatibility
7552 with old MDEP_REORGS that are not CFG based. Recompute it now. */
7553 compute_bb_for_insn ();
7555 /* If optimizing, we'll have split before scheduling. */
7557 split_all_insns (0);
7559 /* ??? update_life_info_in_dirty_blocks fails to terminate during
7560 non-optimizing bootstrap. */
7561 update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES);
7563 if (ia64_flag_schedule_insns2)
7565 timevar_push (TV_SCHED2);
7566 ia64_final_schedule = 1;
7568 initiate_bundle_states ();
7569 ia64_nop = make_insn_raw (gen_nop ());
7570 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
7571 recog_memoized (ia64_nop);
7572 clocks_length = get_max_uid () + 1;
7573 stops_p = xcalloc (1, clocks_length);
7574 if (ia64_tune == PROCESSOR_ITANIUM)
7576 clocks = xcalloc (clocks_length, sizeof (int));
7577 add_cycles = xcalloc (clocks_length, sizeof (int));
7579 if (ia64_tune == PROCESSOR_ITANIUM2)
7581 pos_1 = get_cpu_unit_code ("2_1");
7582 pos_2 = get_cpu_unit_code ("2_2");
7583 pos_3 = get_cpu_unit_code ("2_3");
7584 pos_4 = get_cpu_unit_code ("2_4");
7585 pos_5 = get_cpu_unit_code ("2_5");
7586 pos_6 = get_cpu_unit_code ("2_6");
7587 _0mii_ = get_cpu_unit_code ("2b_0mii.");
7588 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
7589 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
7590 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
7591 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
7592 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
7593 _0mib_ = get_cpu_unit_code ("2b_0mib.");
7594 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
7595 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
7596 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
7597 _1mii_ = get_cpu_unit_code ("2b_1mii.");
7598 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
7599 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
7600 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
7601 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
7602 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
7603 _1mib_ = get_cpu_unit_code ("2b_1mib.");
7604 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
7605 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
7606 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
7610 pos_1 = get_cpu_unit_code ("1_1");
7611 pos_2 = get_cpu_unit_code ("1_2");
7612 pos_3 = get_cpu_unit_code ("1_3");
7613 pos_4 = get_cpu_unit_code ("1_4");
7614 pos_5 = get_cpu_unit_code ("1_5");
7615 pos_6 = get_cpu_unit_code ("1_6");
7616 _0mii_ = get_cpu_unit_code ("1b_0mii.");
7617 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
7618 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
7619 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
7620 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
7621 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
7622 _0mib_ = get_cpu_unit_code ("1b_0mib.");
7623 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
7624 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
7625 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
7626 _1mii_ = get_cpu_unit_code ("1b_1mii.");
7627 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
7628 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
7629 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
7630 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
7631 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
7632 _1mib_ = get_cpu_unit_code ("1b_1mib.");
7633 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
7634 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
7635 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
7637 schedule_ebbs (dump_file);
7638 finish_bundle_states ();
7639 if (ia64_tune == PROCESSOR_ITANIUM)
7645 emit_insn_group_barriers (dump_file);
7647 ia64_final_schedule = 0;
7648 timevar_pop (TV_SCHED2);
7651 emit_all_insn_group_barriers (dump_file);
7653 /* A call must not be the last instruction in a function, so that the
7654 return address is still within the function, so that unwinding works
7655 properly. Note that IA-64 differs from dwarf2 on this point. */
7656 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7661 insn = get_last_insn ();
7662 if (! INSN_P (insn))
7663 insn = prev_active_insn (insn);
7664 /* Skip over insns that expand to nothing. */
7665 while (GET_CODE (insn) == INSN && get_attr_empty (insn) == EMPTY_YES)
7667 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
7668 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
7670 insn = prev_active_insn (insn);
7672 if (GET_CODE (insn) == CALL_INSN)
7675 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7676 emit_insn (gen_break_f ());
7677 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7682 emit_predicate_relation_info ();
7684 if (ia64_flag_var_tracking)
7686 timevar_push (TV_VAR_TRACKING);
7687 variable_tracking_main ();
7688 timevar_pop (TV_VAR_TRACKING);
7692 /* Return true if REGNO is used by the epilogue. */
7695 ia64_epilogue_uses (int regno)
7700 /* With a call to a function in another module, we will write a new
7701 value to "gp". After returning from such a call, we need to make
7702 sure the function restores the original gp-value, even if the
7703 function itself does not use the gp anymore. */
7704 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
7706 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
7707 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
7708 /* For functions defined with the syscall_linkage attribute, all
7709 input registers are marked as live at all function exits. This
7710 prevents the register allocator from using the input registers,
7711 which in turn makes it possible to restart a system call after
7712 an interrupt without having to save/restore the input registers.
7713 This also prevents kernel data from leaking to application code. */
7714 return lookup_attribute ("syscall_linkage",
7715 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
7718 /* Conditional return patterns can't represent the use of `b0' as
7719 the return address, so we force the value live this way. */
7723 /* Likewise for ar.pfs, which is used by br.ret. */
7731 /* Return true if REGNO is used by the frame unwinder. */
7734 ia64_eh_uses (int regno)
7736 if (! reload_completed)
7739 if (current_frame_info.reg_save_b0
7740 && regno == current_frame_info.reg_save_b0)
7742 if (current_frame_info.reg_save_pr
7743 && regno == current_frame_info.reg_save_pr)
7745 if (current_frame_info.reg_save_ar_pfs
7746 && regno == current_frame_info.reg_save_ar_pfs)
7748 if (current_frame_info.reg_save_ar_unat
7749 && regno == current_frame_info.reg_save_ar_unat)
7751 if (current_frame_info.reg_save_ar_lc
7752 && regno == current_frame_info.reg_save_ar_lc)
7758 /* Return true if this goes in small data/bss. */
7760 /* ??? We could also support own long data here. Generating movl/add/ld8
7761 instead of addl,ld8/ld8. This makes the code bigger, but should make the
7762 code faster because there is one less load. This also includes incomplete
7763 types which can't go in sdata/sbss. */
7766 ia64_in_small_data_p (tree exp)
7768 if (TARGET_NO_SDATA)
7771 /* We want to merge strings, so we never consider them small data. */
7772 if (TREE_CODE (exp) == STRING_CST)
7775 /* Functions are never small data. */
7776 if (TREE_CODE (exp) == FUNCTION_DECL)
7779 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
7781 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
7782 if (strcmp (section, ".sdata") == 0
7783 || strcmp (section, ".sbss") == 0)
7788 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
7790 /* If this is an incomplete type with size 0, then we can't put it
7791 in sdata because it might be too big when completed. */
7792 if (size > 0 && size <= ia64_section_threshold)
7799 /* Output assembly directives for prologue regions. */
7801 /* The current basic block number. */
7803 static bool last_block;
7805 /* True if we need a copy_state command at the start of the next block. */
7807 static bool need_copy_state;
7809 /* The function emits unwind directives for the start of an epilogue. */
7812 process_epilogue (void)
7814 /* If this isn't the last block of the function, then we need to label the
7815 current state, and copy it back in at the start of the next block. */
7819 fprintf (asm_out_file, "\t.label_state 1\n");
7820 need_copy_state = true;
7823 fprintf (asm_out_file, "\t.restore sp\n");
7826 /* This function processes a SET pattern looking for specific patterns
7827 which result in emitting an assembly directive required for unwinding. */
7830 process_set (FILE *asm_out_file, rtx pat)
7832 rtx src = SET_SRC (pat);
7833 rtx dest = SET_DEST (pat);
7834 int src_regno, dest_regno;
7836 /* Look for the ALLOC insn. */
7837 if (GET_CODE (src) == UNSPEC_VOLATILE
7838 && XINT (src, 1) == UNSPECV_ALLOC
7839 && GET_CODE (dest) == REG)
7841 dest_regno = REGNO (dest);
7843 /* If this isn't the final destination for ar.pfs, the alloc
7844 shouldn't have been marked frame related. */
7845 if (dest_regno != current_frame_info.reg_save_ar_pfs)
7848 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
7849 ia64_dbx_register_number (dest_regno));
7853 /* Look for SP = .... */
7854 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
7856 if (GET_CODE (src) == PLUS)
7858 rtx op0 = XEXP (src, 0);
7859 rtx op1 = XEXP (src, 1);
7860 if (op0 == dest && GET_CODE (op1) == CONST_INT)
7862 if (INTVAL (op1) < 0)
7863 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
7866 process_epilogue ();
7871 else if (GET_CODE (src) == REG
7872 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
7873 process_epilogue ();
7880 /* Register move we need to look at. */
7881 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
7883 src_regno = REGNO (src);
7884 dest_regno = REGNO (dest);
7889 /* Saving return address pointer. */
7890 if (dest_regno != current_frame_info.reg_save_b0)
7892 fprintf (asm_out_file, "\t.save rp, r%d\n",
7893 ia64_dbx_register_number (dest_regno));
7897 if (dest_regno != current_frame_info.reg_save_pr)
7899 fprintf (asm_out_file, "\t.save pr, r%d\n",
7900 ia64_dbx_register_number (dest_regno));
7903 case AR_UNAT_REGNUM:
7904 if (dest_regno != current_frame_info.reg_save_ar_unat)
7906 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
7907 ia64_dbx_register_number (dest_regno));
7911 if (dest_regno != current_frame_info.reg_save_ar_lc)
7913 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
7914 ia64_dbx_register_number (dest_regno));
7917 case STACK_POINTER_REGNUM:
7918 if (dest_regno != HARD_FRAME_POINTER_REGNUM
7919 || ! frame_pointer_needed)
7921 fprintf (asm_out_file, "\t.vframe r%d\n",
7922 ia64_dbx_register_number (dest_regno));
7926 /* Everything else should indicate being stored to memory. */
7931 /* Memory store we need to look at. */
7932 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
7938 if (GET_CODE (XEXP (dest, 0)) == REG)
7940 base = XEXP (dest, 0);
7943 else if (GET_CODE (XEXP (dest, 0)) == PLUS
7944 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT)
7946 base = XEXP (XEXP (dest, 0), 0);
7947 off = INTVAL (XEXP (XEXP (dest, 0), 1));
7952 if (base == hard_frame_pointer_rtx)
7954 saveop = ".savepsp";
7957 else if (base == stack_pointer_rtx)
7962 src_regno = REGNO (src);
7966 if (current_frame_info.reg_save_b0 != 0)
7968 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
7972 if (current_frame_info.reg_save_pr != 0)
7974 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
7978 if (current_frame_info.reg_save_ar_lc != 0)
7980 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
7984 if (current_frame_info.reg_save_ar_pfs != 0)
7986 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
7989 case AR_UNAT_REGNUM:
7990 if (current_frame_info.reg_save_ar_unat != 0)
7992 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
7999 fprintf (asm_out_file, "\t.save.g 0x%x\n",
8000 1 << (src_regno - GR_REG (4)));
8008 fprintf (asm_out_file, "\t.save.b 0x%x\n",
8009 1 << (src_regno - BR_REG (1)));
8016 fprintf (asm_out_file, "\t.save.f 0x%x\n",
8017 1 << (src_regno - FR_REG (2)));
8020 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
8021 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
8022 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
8023 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
8024 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
8025 1 << (src_regno - FR_REG (12)));
8037 /* This function looks at a single insn and emits any directives
8038 required to unwind this insn. */
8040 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
8042 if (flag_unwind_tables
8043 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
8047 if (GET_CODE (insn) == NOTE
8048 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
8050 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
8052 /* Restore unwind state from immediately before the epilogue. */
8053 if (need_copy_state)
8055 fprintf (asm_out_file, "\t.body\n");
8056 fprintf (asm_out_file, "\t.copy_state 1\n");
8057 need_copy_state = false;
8061 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
8064 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
8066 pat = XEXP (pat, 0);
8068 pat = PATTERN (insn);
8070 switch (GET_CODE (pat))
8073 process_set (asm_out_file, pat);
8079 int limit = XVECLEN (pat, 0);
8080 for (par_index = 0; par_index < limit; par_index++)
8082 rtx x = XVECEXP (pat, 0, par_index);
8083 if (GET_CODE (x) == SET)
8084 process_set (asm_out_file, x);
8097 ia64_init_builtins (void)
8099 tree psi_type_node = build_pointer_type (integer_type_node);
8100 tree pdi_type_node = build_pointer_type (long_integer_type_node);
8102 /* __sync_val_compare_and_swap_si, __sync_bool_compare_and_swap_si */
8103 tree si_ftype_psi_si_si
8104 = build_function_type_list (integer_type_node,
8105 psi_type_node, integer_type_node,
8106 integer_type_node, NULL_TREE);
8108 /* __sync_val_compare_and_swap_di */
8109 tree di_ftype_pdi_di_di
8110 = build_function_type_list (long_integer_type_node,
8111 pdi_type_node, long_integer_type_node,
8112 long_integer_type_node, NULL_TREE);
8113 /* __sync_bool_compare_and_swap_di */
8114 tree si_ftype_pdi_di_di
8115 = build_function_type_list (integer_type_node,
8116 pdi_type_node, long_integer_type_node,
8117 long_integer_type_node, NULL_TREE);
8118 /* __sync_synchronize */
8119 tree void_ftype_void
8120 = build_function_type (void_type_node, void_list_node);
8122 /* __sync_lock_test_and_set_si */
8123 tree si_ftype_psi_si
8124 = build_function_type_list (integer_type_node,
8125 psi_type_node, integer_type_node, NULL_TREE);
8127 /* __sync_lock_test_and_set_di */
8128 tree di_ftype_pdi_di
8129 = build_function_type_list (long_integer_type_node,
8130 pdi_type_node, long_integer_type_node,
8133 /* __sync_lock_release_si */
8135 = build_function_type_list (void_type_node, psi_type_node, NULL_TREE);
8137 /* __sync_lock_release_di */
8139 = build_function_type_list (void_type_node, pdi_type_node, NULL_TREE);
8144 /* The __fpreg type. */
8145 fpreg_type = make_node (REAL_TYPE);
8146 /* ??? The back end should know to load/save __fpreg variables using
8147 the ldf.fill and stf.spill instructions. */
8148 TYPE_PRECISION (fpreg_type) = 96;
8149 layout_type (fpreg_type);
8150 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
8152 /* The __float80 type. */
8153 float80_type = make_node (REAL_TYPE);
8154 TYPE_PRECISION (float80_type) = 96;
8155 layout_type (float80_type);
8156 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
8158 /* The __float128 type. */
8161 tree float128_type = make_node (REAL_TYPE);
8162 TYPE_PRECISION (float128_type) = 128;
8163 layout_type (float128_type);
8164 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
8167 /* Under HPUX, this is a synonym for "long double". */
8168 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
8171 #define def_builtin(name, type, code) \
8172 builtin_function ((name), (type), (code), BUILT_IN_MD, NULL, NULL_TREE)
8174 def_builtin ("__sync_val_compare_and_swap_si", si_ftype_psi_si_si,
8175 IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI);
8176 def_builtin ("__sync_val_compare_and_swap_di", di_ftype_pdi_di_di,
8177 IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI);
8178 def_builtin ("__sync_bool_compare_and_swap_si", si_ftype_psi_si_si,
8179 IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI);
8180 def_builtin ("__sync_bool_compare_and_swap_di", si_ftype_pdi_di_di,
8181 IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI);
8183 def_builtin ("__sync_synchronize", void_ftype_void,
8184 IA64_BUILTIN_SYNCHRONIZE);
8186 def_builtin ("__sync_lock_test_and_set_si", si_ftype_psi_si,
8187 IA64_BUILTIN_LOCK_TEST_AND_SET_SI);
8188 def_builtin ("__sync_lock_test_and_set_di", di_ftype_pdi_di,
8189 IA64_BUILTIN_LOCK_TEST_AND_SET_DI);
8190 def_builtin ("__sync_lock_release_si", void_ftype_psi,
8191 IA64_BUILTIN_LOCK_RELEASE_SI);
8192 def_builtin ("__sync_lock_release_di", void_ftype_pdi,
8193 IA64_BUILTIN_LOCK_RELEASE_DI);
8195 def_builtin ("__builtin_ia64_bsp",
8196 build_function_type (ptr_type_node, void_list_node),
8199 def_builtin ("__builtin_ia64_flushrs",
8200 build_function_type (void_type_node, void_list_node),
8201 IA64_BUILTIN_FLUSHRS);
8203 def_builtin ("__sync_fetch_and_add_si", si_ftype_psi_si,
8204 IA64_BUILTIN_FETCH_AND_ADD_SI);
8205 def_builtin ("__sync_fetch_and_sub_si", si_ftype_psi_si,
8206 IA64_BUILTIN_FETCH_AND_SUB_SI);
8207 def_builtin ("__sync_fetch_and_or_si", si_ftype_psi_si,
8208 IA64_BUILTIN_FETCH_AND_OR_SI);
8209 def_builtin ("__sync_fetch_and_and_si", si_ftype_psi_si,
8210 IA64_BUILTIN_FETCH_AND_AND_SI);
8211 def_builtin ("__sync_fetch_and_xor_si", si_ftype_psi_si,
8212 IA64_BUILTIN_FETCH_AND_XOR_SI);
8213 def_builtin ("__sync_fetch_and_nand_si", si_ftype_psi_si,
8214 IA64_BUILTIN_FETCH_AND_NAND_SI);
8216 def_builtin ("__sync_add_and_fetch_si", si_ftype_psi_si,
8217 IA64_BUILTIN_ADD_AND_FETCH_SI);
8218 def_builtin ("__sync_sub_and_fetch_si", si_ftype_psi_si,
8219 IA64_BUILTIN_SUB_AND_FETCH_SI);
8220 def_builtin ("__sync_or_and_fetch_si", si_ftype_psi_si,
8221 IA64_BUILTIN_OR_AND_FETCH_SI);
8222 def_builtin ("__sync_and_and_fetch_si", si_ftype_psi_si,
8223 IA64_BUILTIN_AND_AND_FETCH_SI);
8224 def_builtin ("__sync_xor_and_fetch_si", si_ftype_psi_si,
8225 IA64_BUILTIN_XOR_AND_FETCH_SI);
8226 def_builtin ("__sync_nand_and_fetch_si", si_ftype_psi_si,
8227 IA64_BUILTIN_NAND_AND_FETCH_SI);
8229 def_builtin ("__sync_fetch_and_add_di", di_ftype_pdi_di,
8230 IA64_BUILTIN_FETCH_AND_ADD_DI);
8231 def_builtin ("__sync_fetch_and_sub_di", di_ftype_pdi_di,
8232 IA64_BUILTIN_FETCH_AND_SUB_DI);
8233 def_builtin ("__sync_fetch_and_or_di", di_ftype_pdi_di,
8234 IA64_BUILTIN_FETCH_AND_OR_DI);
8235 def_builtin ("__sync_fetch_and_and_di", di_ftype_pdi_di,
8236 IA64_BUILTIN_FETCH_AND_AND_DI);
8237 def_builtin ("__sync_fetch_and_xor_di", di_ftype_pdi_di,
8238 IA64_BUILTIN_FETCH_AND_XOR_DI);
8239 def_builtin ("__sync_fetch_and_nand_di", di_ftype_pdi_di,
8240 IA64_BUILTIN_FETCH_AND_NAND_DI);
8242 def_builtin ("__sync_add_and_fetch_di", di_ftype_pdi_di,
8243 IA64_BUILTIN_ADD_AND_FETCH_DI);
8244 def_builtin ("__sync_sub_and_fetch_di", di_ftype_pdi_di,
8245 IA64_BUILTIN_SUB_AND_FETCH_DI);
8246 def_builtin ("__sync_or_and_fetch_di", di_ftype_pdi_di,
8247 IA64_BUILTIN_OR_AND_FETCH_DI);
8248 def_builtin ("__sync_and_and_fetch_di", di_ftype_pdi_di,
8249 IA64_BUILTIN_AND_AND_FETCH_DI);
8250 def_builtin ("__sync_xor_and_fetch_di", di_ftype_pdi_di,
8251 IA64_BUILTIN_XOR_AND_FETCH_DI);
8252 def_builtin ("__sync_nand_and_fetch_di", di_ftype_pdi_di,
8253 IA64_BUILTIN_NAND_AND_FETCH_DI);
8258 /* Expand fetch_and_op intrinsics. The basic code sequence is:
8266 cmpxchgsz.acq tmp = [ptr], tmp
8267 } while (tmp != ret)
8271 ia64_expand_fetch_and_op (optab binoptab, enum machine_mode mode,
8272 tree arglist, rtx target)
8274 rtx ret, label, tmp, ccv, insn, mem, value;
8277 arg0 = TREE_VALUE (arglist);
8278 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8279 mem = expand_expr (arg0, NULL_RTX, Pmode, 0);
8280 #ifdef POINTERS_EXTEND_UNSIGNED
8281 if (GET_MODE(mem) != Pmode)
8282 mem = convert_memory_address (Pmode, mem);
8284 value = expand_expr (arg1, NULL_RTX, mode, 0);
8286 mem = gen_rtx_MEM (mode, force_reg (Pmode, mem));
8287 MEM_VOLATILE_P (mem) = 1;
8289 if (target && register_operand (target, mode))
8292 ret = gen_reg_rtx (mode);
8294 emit_insn (gen_mf ());
8296 /* Special case for fetchadd instructions. */
8297 if (binoptab == add_optab && fetchadd_operand (value, VOIDmode))
8300 insn = gen_fetchadd_acq_si (ret, mem, value);
8302 insn = gen_fetchadd_acq_di (ret, mem, value);
8307 tmp = gen_reg_rtx (mode);
8308 /* ar.ccv must always be loaded with a zero-extended DImode value. */
8309 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
8310 emit_move_insn (tmp, mem);
8312 label = gen_label_rtx ();
8314 emit_move_insn (ret, tmp);
8315 convert_move (ccv, tmp, /*unsignedp=*/1);
8317 /* Perform the specific operation. Special case NAND by noticing
8318 one_cmpl_optab instead. */
8319 if (binoptab == one_cmpl_optab)
8321 tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN);
8322 binoptab = and_optab;
8324 tmp = expand_binop (mode, binoptab, tmp, value, tmp, 1, OPTAB_WIDEN);
8327 insn = gen_cmpxchg_acq_si (tmp, mem, tmp, ccv);
8329 insn = gen_cmpxchg_acq_di (tmp, mem, tmp, ccv);
8332 emit_cmp_and_jump_insns (tmp, ret, NE, 0, mode, 1, label);
8337 /* Expand op_and_fetch intrinsics. The basic code sequence is:
8344 ret = tmp <op> value;
8345 cmpxchgsz.acq tmp = [ptr], ret
8346 } while (tmp != old)
8350 ia64_expand_op_and_fetch (optab binoptab, enum machine_mode mode,
8351 tree arglist, rtx target)
8353 rtx old, label, tmp, ret, ccv, insn, mem, value;
8356 arg0 = TREE_VALUE (arglist);
8357 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8358 mem = expand_expr (arg0, NULL_RTX, Pmode, 0);
8359 #ifdef POINTERS_EXTEND_UNSIGNED
8360 if (GET_MODE(mem) != Pmode)
8361 mem = convert_memory_address (Pmode, mem);
8364 value = expand_expr (arg1, NULL_RTX, mode, 0);
8366 mem = gen_rtx_MEM (mode, force_reg (Pmode, mem));
8367 MEM_VOLATILE_P (mem) = 1;
8369 if (target && ! register_operand (target, mode))
8372 emit_insn (gen_mf ());
8373 tmp = gen_reg_rtx (mode);
8374 old = gen_reg_rtx (mode);
8375 /* ar.ccv must always be loaded with a zero-extended DImode value. */
8376 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
8378 emit_move_insn (tmp, mem);
8380 label = gen_label_rtx ();
8382 emit_move_insn (old, tmp);
8383 convert_move (ccv, tmp, /*unsignedp=*/1);
8385 /* Perform the specific operation. Special case NAND by noticing
8386 one_cmpl_optab instead. */
8387 if (binoptab == one_cmpl_optab)
8389 tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN);
8390 binoptab = and_optab;
8392 ret = expand_binop (mode, binoptab, tmp, value, target, 1, OPTAB_WIDEN);
8395 insn = gen_cmpxchg_acq_si (tmp, mem, ret, ccv);
8397 insn = gen_cmpxchg_acq_di (tmp, mem, ret, ccv);
8400 emit_cmp_and_jump_insns (tmp, old, NE, 0, mode, 1, label);
8405 /* Expand val_ and bool_compare_and_swap. For val_ we want:
8409 cmpxchgsz.acq ret = [ptr], newval, ar.ccv
8412 For bool_ it's the same except return ret == oldval.
8416 ia64_expand_compare_and_swap (enum machine_mode rmode, enum machine_mode mode,
8417 int boolp, tree arglist, rtx target)
8419 tree arg0, arg1, arg2;
8420 rtx mem, old, new, ccv, tmp, insn;
8422 arg0 = TREE_VALUE (arglist);
8423 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8424 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8425 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
8426 old = expand_expr (arg1, NULL_RTX, mode, 0);
8427 new = expand_expr (arg2, NULL_RTX, mode, 0);
8429 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
8430 MEM_VOLATILE_P (mem) = 1;
8432 if (GET_MODE (old) != mode)
8433 old = convert_to_mode (mode, old, /*unsignedp=*/1);
8434 if (GET_MODE (new) != mode)
8435 new = convert_to_mode (mode, new, /*unsignedp=*/1);
8437 if (! register_operand (old, mode))
8438 old = copy_to_mode_reg (mode, old);
8439 if (! register_operand (new, mode))
8440 new = copy_to_mode_reg (mode, new);
8442 if (! boolp && target && register_operand (target, mode))
8445 tmp = gen_reg_rtx (mode);
8447 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
8448 convert_move (ccv, old, /*unsignedp=*/1);
8449 emit_insn (gen_mf ());
8451 insn = gen_cmpxchg_acq_si (tmp, mem, new, ccv);
8453 insn = gen_cmpxchg_acq_di (tmp, mem, new, ccv);
8459 target = gen_reg_rtx (rmode);
8460 return emit_store_flag_force (target, EQ, tmp, old, mode, 1, 1);
8466 /* Expand lock_test_and_set. I.e. `xchgsz ret = [ptr], new'. */
8469 ia64_expand_lock_test_and_set (enum machine_mode mode, tree arglist,
8473 rtx mem, new, ret, insn;
8475 arg0 = TREE_VALUE (arglist);
8476 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8477 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
8478 new = expand_expr (arg1, NULL_RTX, mode, 0);
8480 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
8481 MEM_VOLATILE_P (mem) = 1;
8482 if (! register_operand (new, mode))
8483 new = copy_to_mode_reg (mode, new);
8485 if (target && register_operand (target, mode))
8488 ret = gen_reg_rtx (mode);
8491 insn = gen_xchgsi (ret, mem, new);
8493 insn = gen_xchgdi (ret, mem, new);
8499 /* Expand lock_release. I.e. `stsz.rel [ptr] = r0'. */
8502 ia64_expand_lock_release (enum machine_mode mode, tree arglist,
8503 rtx target ATTRIBUTE_UNUSED)
8508 arg0 = TREE_VALUE (arglist);
8509 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
8511 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
8512 MEM_VOLATILE_P (mem) = 1;
8514 emit_move_insn (mem, const0_rtx);
8520 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
8521 enum machine_mode mode ATTRIBUTE_UNUSED,
8522 int ignore ATTRIBUTE_UNUSED)
8524 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
8525 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8526 tree arglist = TREE_OPERAND (exp, 1);
8527 enum machine_mode rmode = VOIDmode;
8531 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI:
8532 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI:
8537 case IA64_BUILTIN_LOCK_TEST_AND_SET_SI:
8538 case IA64_BUILTIN_LOCK_RELEASE_SI:
8539 case IA64_BUILTIN_FETCH_AND_ADD_SI:
8540 case IA64_BUILTIN_FETCH_AND_SUB_SI:
8541 case IA64_BUILTIN_FETCH_AND_OR_SI:
8542 case IA64_BUILTIN_FETCH_AND_AND_SI:
8543 case IA64_BUILTIN_FETCH_AND_XOR_SI:
8544 case IA64_BUILTIN_FETCH_AND_NAND_SI:
8545 case IA64_BUILTIN_ADD_AND_FETCH_SI:
8546 case IA64_BUILTIN_SUB_AND_FETCH_SI:
8547 case IA64_BUILTIN_OR_AND_FETCH_SI:
8548 case IA64_BUILTIN_AND_AND_FETCH_SI:
8549 case IA64_BUILTIN_XOR_AND_FETCH_SI:
8550 case IA64_BUILTIN_NAND_AND_FETCH_SI:
8554 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI:
8559 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI:
8564 case IA64_BUILTIN_LOCK_TEST_AND_SET_DI:
8565 case IA64_BUILTIN_LOCK_RELEASE_DI:
8566 case IA64_BUILTIN_FETCH_AND_ADD_DI:
8567 case IA64_BUILTIN_FETCH_AND_SUB_DI:
8568 case IA64_BUILTIN_FETCH_AND_OR_DI:
8569 case IA64_BUILTIN_FETCH_AND_AND_DI:
8570 case IA64_BUILTIN_FETCH_AND_XOR_DI:
8571 case IA64_BUILTIN_FETCH_AND_NAND_DI:
8572 case IA64_BUILTIN_ADD_AND_FETCH_DI:
8573 case IA64_BUILTIN_SUB_AND_FETCH_DI:
8574 case IA64_BUILTIN_OR_AND_FETCH_DI:
8575 case IA64_BUILTIN_AND_AND_FETCH_DI:
8576 case IA64_BUILTIN_XOR_AND_FETCH_DI:
8577 case IA64_BUILTIN_NAND_AND_FETCH_DI:
8587 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI:
8588 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI:
8589 return ia64_expand_compare_and_swap (rmode, mode, 1, arglist,
8592 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI:
8593 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI:
8594 return ia64_expand_compare_and_swap (rmode, mode, 0, arglist,
8597 case IA64_BUILTIN_SYNCHRONIZE:
8598 emit_insn (gen_mf ());
8601 case IA64_BUILTIN_LOCK_TEST_AND_SET_SI:
8602 case IA64_BUILTIN_LOCK_TEST_AND_SET_DI:
8603 return ia64_expand_lock_test_and_set (mode, arglist, target);
8605 case IA64_BUILTIN_LOCK_RELEASE_SI:
8606 case IA64_BUILTIN_LOCK_RELEASE_DI:
8607 return ia64_expand_lock_release (mode, arglist, target);
8609 case IA64_BUILTIN_BSP:
8610 if (! target || ! register_operand (target, DImode))
8611 target = gen_reg_rtx (DImode);
8612 emit_insn (gen_bsp_value (target));
8613 #ifdef POINTERS_EXTEND_UNSIGNED
8614 target = convert_memory_address (ptr_mode, target);
8618 case IA64_BUILTIN_FLUSHRS:
8619 emit_insn (gen_flushrs ());
8622 case IA64_BUILTIN_FETCH_AND_ADD_SI:
8623 case IA64_BUILTIN_FETCH_AND_ADD_DI:
8624 return ia64_expand_fetch_and_op (add_optab, mode, arglist, target);
8626 case IA64_BUILTIN_FETCH_AND_SUB_SI:
8627 case IA64_BUILTIN_FETCH_AND_SUB_DI:
8628 return ia64_expand_fetch_and_op (sub_optab, mode, arglist, target);
8630 case IA64_BUILTIN_FETCH_AND_OR_SI:
8631 case IA64_BUILTIN_FETCH_AND_OR_DI:
8632 return ia64_expand_fetch_and_op (ior_optab, mode, arglist, target);
8634 case IA64_BUILTIN_FETCH_AND_AND_SI:
8635 case IA64_BUILTIN_FETCH_AND_AND_DI:
8636 return ia64_expand_fetch_and_op (and_optab, mode, arglist, target);
8638 case IA64_BUILTIN_FETCH_AND_XOR_SI:
8639 case IA64_BUILTIN_FETCH_AND_XOR_DI:
8640 return ia64_expand_fetch_and_op (xor_optab, mode, arglist, target);
8642 case IA64_BUILTIN_FETCH_AND_NAND_SI:
8643 case IA64_BUILTIN_FETCH_AND_NAND_DI:
8644 return ia64_expand_fetch_and_op (one_cmpl_optab, mode, arglist, target);
8646 case IA64_BUILTIN_ADD_AND_FETCH_SI:
8647 case IA64_BUILTIN_ADD_AND_FETCH_DI:
8648 return ia64_expand_op_and_fetch (add_optab, mode, arglist, target);
8650 case IA64_BUILTIN_SUB_AND_FETCH_SI:
8651 case IA64_BUILTIN_SUB_AND_FETCH_DI:
8652 return ia64_expand_op_and_fetch (sub_optab, mode, arglist, target);
8654 case IA64_BUILTIN_OR_AND_FETCH_SI:
8655 case IA64_BUILTIN_OR_AND_FETCH_DI:
8656 return ia64_expand_op_and_fetch (ior_optab, mode, arglist, target);
8658 case IA64_BUILTIN_AND_AND_FETCH_SI:
8659 case IA64_BUILTIN_AND_AND_FETCH_DI:
8660 return ia64_expand_op_and_fetch (and_optab, mode, arglist, target);
8662 case IA64_BUILTIN_XOR_AND_FETCH_SI:
8663 case IA64_BUILTIN_XOR_AND_FETCH_DI:
8664 return ia64_expand_op_and_fetch (xor_optab, mode, arglist, target);
8666 case IA64_BUILTIN_NAND_AND_FETCH_SI:
8667 case IA64_BUILTIN_NAND_AND_FETCH_DI:
8668 return ia64_expand_op_and_fetch (one_cmpl_optab, mode, arglist, target);
8677 /* For the HP-UX IA64 aggregate parameters are passed stored in the
8678 most significant bits of the stack slot. */
8681 ia64_hpux_function_arg_padding (enum machine_mode mode, tree type)
8683 /* Exception to normal case for structures/unions/etc. */
8685 if (type && AGGREGATE_TYPE_P (type)
8686 && int_size_in_bytes (type) < UNITS_PER_WORD)
8689 /* Fall back to the default. */
8690 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
8693 /* Linked list of all external functions that are to be emitted by GCC.
8694 We output the name if and only if TREE_SYMBOL_REFERENCED is set in
8695 order to avoid putting out names that are never really used. */
8697 struct extern_func_list GTY(())
8699 struct extern_func_list *next;
8703 static GTY(()) struct extern_func_list *extern_func_head;
8706 ia64_hpux_add_extern_decl (tree decl)
8708 struct extern_func_list *p = ggc_alloc (sizeof (struct extern_func_list));
8711 p->next = extern_func_head;
8712 extern_func_head = p;
8715 /* Print out the list of used global functions. */
8718 ia64_hpux_file_end (void)
8720 struct extern_func_list *p;
8722 for (p = extern_func_head; p; p = p->next)
8724 tree decl = p->decl;
8725 tree id = DECL_ASSEMBLER_NAME (decl);
8730 if (!TREE_ASM_WRITTEN (decl) && TREE_SYMBOL_REFERENCED (id))
8732 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
8734 TREE_ASM_WRITTEN (decl) = 1;
8735 (*targetm.asm_out.globalize_label) (asm_out_file, name);
8736 fputs (TYPE_ASM_OP, asm_out_file);
8737 assemble_name (asm_out_file, name);
8738 fprintf (asm_out_file, "," TYPE_OPERAND_FMT "\n", "function");
8742 extern_func_head = 0;
8745 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
8746 modes of word_mode and larger. Rename the TFmode libfuncs using the
8747 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
8748 backward compatibility. */
8751 ia64_init_libfuncs (void)
8753 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
8754 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
8755 set_optab_libfunc (smod_optab, SImode, "__modsi3");
8756 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
8758 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
8759 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
8760 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
8761 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
8762 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
8764 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
8765 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
8766 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
8767 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
8768 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
8769 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
8771 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
8772 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
8773 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
8774 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
8776 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
8777 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
8780 /* Rename all the TFmode libfuncs using the HPUX conventions. */
8783 ia64_hpux_init_libfuncs (void)
8785 ia64_init_libfuncs ();
8787 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
8788 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
8789 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
8791 /* ia64_expand_compare uses this. */
8792 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
8794 /* These should never be used. */
8795 set_optab_libfunc (eq_optab, TFmode, 0);
8796 set_optab_libfunc (ne_optab, TFmode, 0);
8797 set_optab_libfunc (gt_optab, TFmode, 0);
8798 set_optab_libfunc (ge_optab, TFmode, 0);
8799 set_optab_libfunc (lt_optab, TFmode, 0);
8800 set_optab_libfunc (le_optab, TFmode, 0);
8803 /* Rename the division and modulus functions in VMS. */
8806 ia64_vms_init_libfuncs (void)
8808 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
8809 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
8810 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
8811 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
8812 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
8813 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
8814 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
8815 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
8818 /* Rename the TFmode libfuncs available from soft-fp in glibc using
8819 the HPUX conventions. */
8822 ia64_sysv4_init_libfuncs (void)
8824 ia64_init_libfuncs ();
8826 /* These functions are not part of the HPUX TFmode interface. We
8827 use them instead of _U_Qfcmp, which doesn't work the way we
8829 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
8830 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
8831 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
8832 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
8833 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
8834 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
8836 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
8837 glibc doesn't have them. */
8840 /* Switch to the section to which we should output X. The only thing
8841 special we do here is to honor small data. */
8844 ia64_select_rtx_section (enum machine_mode mode, rtx x,
8845 unsigned HOST_WIDE_INT align)
8847 if (GET_MODE_SIZE (mode) > 0
8848 && GET_MODE_SIZE (mode) <= ia64_section_threshold)
8851 default_elf_select_rtx_section (mode, x, align);
8854 /* It is illegal to have relocations in shared segments on AIX and HPUX.
8855 Pretend flag_pic is always set. */
8858 ia64_rwreloc_select_section (tree exp, int reloc, unsigned HOST_WIDE_INT align)
8860 default_elf_select_section_1 (exp, reloc, align, true);
8864 ia64_rwreloc_unique_section (tree decl, int reloc)
8866 default_unique_section_1 (decl, reloc, true);
8870 ia64_rwreloc_select_rtx_section (enum machine_mode mode, rtx x,
8871 unsigned HOST_WIDE_INT align)
8873 int save_pic = flag_pic;
8875 ia64_select_rtx_section (mode, x, align);
8876 flag_pic = save_pic;
8880 ia64_rwreloc_section_type_flags (tree decl, const char *name, int reloc)
8882 return default_section_type_flags_1 (decl, name, reloc, true);
8885 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
8886 structure type and that the address of that type should be passed
8887 in out0, rather than in r8. */
8890 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
8892 tree ret_type = TREE_TYPE (fntype);
8894 /* The Itanium C++ ABI requires that out0, rather than r8, be used
8895 as the structure return address parameter, if the return value
8896 type has a non-trivial copy constructor or destructor. It is not
8897 clear if this same convention should be used for other
8898 programming languages. Until G++ 3.4, we incorrectly used r8 for
8899 these return values. */
8900 return (abi_version_at_least (2)
8902 && TYPE_MODE (ret_type) == BLKmode
8903 && TREE_ADDRESSABLE (ret_type)
8904 && strcmp (lang_hooks.name, "GNU C++") == 0);
8907 /* Output the assembler code for a thunk function. THUNK_DECL is the
8908 declaration for the thunk function itself, FUNCTION is the decl for
8909 the target function. DELTA is an immediate constant offset to be
8910 added to THIS. If VCALL_OFFSET is nonzero, the word at
8911 *(*this + vcall_offset) should be added to THIS. */
8914 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
8915 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8918 rtx this, insn, funexp;
8919 unsigned int this_parmno;
8920 unsigned int this_regno;
8922 reload_completed = 1;
8923 epilogue_completed = 1;
8925 reset_block_changes ();
8927 /* Set things up as ia64_expand_prologue might. */
8928 last_scratch_gr_reg = 15;
8930 memset (¤t_frame_info, 0, sizeof (current_frame_info));
8931 current_frame_info.spill_cfa_off = -16;
8932 current_frame_info.n_input_regs = 1;
8933 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
8935 /* Mark the end of the (empty) prologue. */
8936 emit_note (NOTE_INSN_PROLOGUE_END);
8938 /* Figure out whether "this" will be the first parameter (the
8939 typical case) or the second parameter (as happens when the
8940 virtual function returns certain class objects). */
8942 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
8944 this_regno = IN_REG (this_parmno);
8945 if (!TARGET_REG_NAMES)
8946 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
8948 this = gen_rtx_REG (Pmode, this_regno);
8951 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
8952 REG_POINTER (tmp) = 1;
8953 if (delta && CONST_OK_FOR_I (delta))
8955 emit_insn (gen_ptr_extend_plus_imm (this, tmp, GEN_INT (delta)));
8959 emit_insn (gen_ptr_extend (this, tmp));
8962 /* Apply the constant offset, if required. */
8965 rtx delta_rtx = GEN_INT (delta);
8967 if (!CONST_OK_FOR_I (delta))
8969 rtx tmp = gen_rtx_REG (Pmode, 2);
8970 emit_move_insn (tmp, delta_rtx);
8973 emit_insn (gen_adddi3 (this, this, delta_rtx));
8976 /* Apply the offset from the vtable, if required. */
8979 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8980 rtx tmp = gen_rtx_REG (Pmode, 2);
8984 rtx t = gen_rtx_REG (ptr_mode, 2);
8985 REG_POINTER (t) = 1;
8986 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this));
8987 if (CONST_OK_FOR_I (vcall_offset))
8989 emit_insn (gen_ptr_extend_plus_imm (tmp, t,
8994 emit_insn (gen_ptr_extend (tmp, t));
8997 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
9001 if (!CONST_OK_FOR_J (vcall_offset))
9003 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
9004 emit_move_insn (tmp2, vcall_offset_rtx);
9005 vcall_offset_rtx = tmp2;
9007 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
9011 emit_move_insn (gen_rtx_REG (ptr_mode, 2),
9012 gen_rtx_MEM (ptr_mode, tmp));
9014 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
9016 emit_insn (gen_adddi3 (this, this, tmp));
9019 /* Generate a tail call to the target function. */
9020 if (! TREE_USED (function))
9022 assemble_external (function);
9023 TREE_USED (function) = 1;
9025 funexp = XEXP (DECL_RTL (function), 0);
9026 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
9027 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
9028 insn = get_last_insn ();
9029 SIBLING_CALL_P (insn) = 1;
9031 /* Code generation for calls relies on splitting. */
9032 reload_completed = 1;
9033 epilogue_completed = 1;
9034 try_split (PATTERN (insn), insn, 0);
9038 /* Run just enough of rest_of_compilation to get the insns emitted.
9039 There's not really enough bulk here to make other passes such as
9040 instruction scheduling worth while. Note that use_thunk calls
9041 assemble_start_function and assemble_end_function. */
9043 insn_locators_initialize ();
9044 emit_all_insn_group_barriers (NULL);
9045 insn = get_insns ();
9046 shorten_branches (insn);
9047 final_start_function (insn, file, 1);
9048 final (insn, file, 1, 0);
9049 final_end_function ();
9051 reload_completed = 0;
9052 epilogue_completed = 0;
9056 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9059 ia64_struct_value_rtx (tree fntype,
9060 int incoming ATTRIBUTE_UNUSED)
9062 if (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype))
9064 return gen_rtx_REG (Pmode, GR_REG (8));
9067 #include "gt-ia64.h"