1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to
21 the Free Software Foundation, 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
44 #include "basic-block.h"
46 #include "sched-int.h"
49 #include "target-def.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
56 /* This is used for communication between ASM_OUTPUT_LABEL and
57 ASM_OUTPUT_LABELREF. */
58 int ia64_asm_output_label = 0;
60 /* Define the information needed to generate branch and scc insns. This is
61 stored from the compare operation. */
62 struct rtx_def * ia64_compare_op0;
63 struct rtx_def * ia64_compare_op1;
65 /* Register names for ia64_expand_prologue. */
66 static const char * const ia64_reg_numbers[96] =
67 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
68 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
69 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
70 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
71 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
72 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
73 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
74 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
75 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
76 "r104","r105","r106","r107","r108","r109","r110","r111",
77 "r112","r113","r114","r115","r116","r117","r118","r119",
78 "r120","r121","r122","r123","r124","r125","r126","r127"};
80 /* ??? These strings could be shared with REGISTER_NAMES. */
81 static const char * const ia64_input_reg_names[8] =
82 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
84 /* ??? These strings could be shared with REGISTER_NAMES. */
85 static const char * const ia64_local_reg_names[80] =
86 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
87 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
88 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
89 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
90 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
91 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
92 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
93 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
94 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
95 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
97 /* ??? These strings could be shared with REGISTER_NAMES. */
98 static const char * const ia64_output_reg_names[8] =
99 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
101 /* String used with the -mfixed-range= option. */
102 const char *ia64_fixed_range_string;
104 /* Determines whether we use adds, addl, or movl to generate our
105 TLS immediate offsets. */
106 int ia64_tls_size = 22;
108 /* String used with the -mtls-size= option. */
109 const char *ia64_tls_size_string;
111 /* Which cpu are we scheduling for. */
112 enum processor_type ia64_tune;
114 /* String used with the -tune= option. */
115 const char *ia64_tune_string;
117 /* Determines whether we run our final scheduling pass or not. We always
118 avoid the normal second scheduling pass. */
119 static int ia64_flag_schedule_insns2;
121 /* Determines whether we run variable tracking in machine dependent
123 static int ia64_flag_var_tracking;
125 /* Variables which are this size or smaller are put in the sdata/sbss
128 unsigned int ia64_section_threshold;
130 /* The following variable is used by the DFA insn scheduler. The value is
131 TRUE if we do insn bundling instead of insn scheduling. */
134 /* Structure to be filled in by ia64_compute_frame_size with register
135 save masks and offsets for the current function. */
137 struct ia64_frame_info
139 HOST_WIDE_INT total_size; /* size of the stack frame, not including
140 the caller's scratch area. */
141 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
142 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
143 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
144 HARD_REG_SET mask; /* mask of saved registers. */
145 unsigned int gr_used_mask; /* mask of registers in use as gr spill
146 registers or long-term scratches. */
147 int n_spilled; /* number of spilled registers. */
148 int reg_fp; /* register for fp. */
149 int reg_save_b0; /* save register for b0. */
150 int reg_save_pr; /* save register for prs. */
151 int reg_save_ar_pfs; /* save register for ar.pfs. */
152 int reg_save_ar_unat; /* save register for ar.unat. */
153 int reg_save_ar_lc; /* save register for ar.lc. */
154 int reg_save_gp; /* save register for gp. */
155 int n_input_regs; /* number of input registers used. */
156 int n_local_regs; /* number of local registers used. */
157 int n_output_regs; /* number of output registers used. */
158 int n_rotate_regs; /* number of rotating registers used. */
160 char need_regstk; /* true if a .regstk directive needed. */
161 char initialized; /* true if the data is finalized. */
164 /* Current frame information calculated by ia64_compute_frame_size. */
165 static struct ia64_frame_info current_frame_info;
167 static int ia64_first_cycle_multipass_dfa_lookahead (void);
168 static void ia64_dependencies_evaluation_hook (rtx, rtx);
169 static void ia64_init_dfa_pre_cycle_insn (void);
170 static rtx ia64_dfa_pre_cycle_insn (void);
171 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
172 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
173 static rtx gen_tls_get_addr (void);
174 static rtx gen_thread_pointer (void);
175 static rtx ia64_expand_tls_address (enum tls_model, rtx, rtx);
176 static int find_gr_spill (int);
177 static int next_scratch_gr_reg (void);
178 static void mark_reg_gr_used_mask (rtx, void *);
179 static void ia64_compute_frame_size (HOST_WIDE_INT);
180 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
181 static void finish_spill_pointers (void);
182 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
183 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
184 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
185 static rtx gen_movdi_x (rtx, rtx, rtx);
186 static rtx gen_fr_spill_x (rtx, rtx, rtx);
187 static rtx gen_fr_restore_x (rtx, rtx, rtx);
189 static enum machine_mode hfa_element_mode (tree, bool);
190 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
192 static bool ia64_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
194 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
196 static bool ia64_function_ok_for_sibcall (tree, tree);
197 static bool ia64_return_in_memory (tree, tree);
198 static bool ia64_rtx_costs (rtx, int, int, int *);
199 static void fix_range (const char *);
200 static struct machine_function * ia64_init_machine_status (void);
201 static void emit_insn_group_barriers (FILE *);
202 static void emit_all_insn_group_barriers (FILE *);
203 static void final_emit_insn_group_barriers (FILE *);
204 static void emit_predicate_relation_info (void);
205 static void ia64_reorg (void);
206 static bool ia64_in_small_data_p (tree);
207 static void process_epilogue (void);
208 static int process_set (FILE *, rtx);
210 static rtx ia64_expand_fetch_and_op (optab, enum machine_mode, tree, rtx);
211 static rtx ia64_expand_op_and_fetch (optab, enum machine_mode, tree, rtx);
212 static rtx ia64_expand_compare_and_swap (enum machine_mode, enum machine_mode,
214 static rtx ia64_expand_lock_test_and_set (enum machine_mode, tree, rtx);
215 static rtx ia64_expand_lock_release (enum machine_mode, tree, rtx);
216 static bool ia64_assemble_integer (rtx, unsigned int, int);
217 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
218 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
219 static void ia64_output_function_end_prologue (FILE *);
221 static int ia64_issue_rate (void);
222 static int ia64_adjust_cost (rtx, rtx, rtx, int);
223 static void ia64_sched_init (FILE *, int, int);
224 static void ia64_sched_finish (FILE *, int);
225 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
226 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
227 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
228 static int ia64_variable_issue (FILE *, int, rtx, int);
230 static struct bundle_state *get_free_bundle_state (void);
231 static void free_bundle_state (struct bundle_state *);
232 static void initiate_bundle_states (void);
233 static void finish_bundle_states (void);
234 static unsigned bundle_state_hash (const void *);
235 static int bundle_state_eq_p (const void *, const void *);
236 static int insert_bundle_state (struct bundle_state *);
237 static void initiate_bundle_state_table (void);
238 static void finish_bundle_state_table (void);
239 static int try_issue_nops (struct bundle_state *, int);
240 static int try_issue_insn (struct bundle_state *, rtx);
241 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
242 static int get_max_pos (state_t);
243 static int get_template (state_t, int);
245 static rtx get_next_important_insn (rtx, rtx);
246 static void bundling (FILE *, int, rtx, rtx);
248 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
249 HOST_WIDE_INT, tree);
250 static void ia64_file_start (void);
252 static void ia64_select_rtx_section (enum machine_mode, rtx,
253 unsigned HOST_WIDE_INT);
254 static void ia64_rwreloc_select_section (tree, int, unsigned HOST_WIDE_INT)
256 static void ia64_rwreloc_unique_section (tree, int)
258 static void ia64_rwreloc_select_rtx_section (enum machine_mode, rtx,
259 unsigned HOST_WIDE_INT)
261 static unsigned int ia64_section_type_flags (tree, const char *, int);
262 static void ia64_hpux_add_extern_decl (tree decl)
264 static void ia64_hpux_file_end (void)
266 static void ia64_init_libfuncs (void)
268 static void ia64_hpux_init_libfuncs (void)
270 static void ia64_sysv4_init_libfuncs (void)
272 static void ia64_vms_init_libfuncs (void)
275 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
276 static void ia64_encode_section_info (tree, rtx, int);
277 static rtx ia64_struct_value_rtx (tree, int);
278 static tree ia64_gimplify_va_arg (tree, tree, tree *, tree *);
279 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
280 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
283 /* Table of valid machine attributes. */
284 static const struct attribute_spec ia64_attribute_table[] =
286 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
287 { "syscall_linkage", 0, 0, false, true, true, NULL },
288 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
289 { NULL, 0, 0, false, false, false, NULL }
292 /* Initialize the GCC target structure. */
293 #undef TARGET_ATTRIBUTE_TABLE
294 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
296 #undef TARGET_INIT_BUILTINS
297 #define TARGET_INIT_BUILTINS ia64_init_builtins
299 #undef TARGET_EXPAND_BUILTIN
300 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
302 #undef TARGET_ASM_BYTE_OP
303 #define TARGET_ASM_BYTE_OP "\tdata1\t"
304 #undef TARGET_ASM_ALIGNED_HI_OP
305 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
306 #undef TARGET_ASM_ALIGNED_SI_OP
307 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
308 #undef TARGET_ASM_ALIGNED_DI_OP
309 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
310 #undef TARGET_ASM_UNALIGNED_HI_OP
311 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
312 #undef TARGET_ASM_UNALIGNED_SI_OP
313 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
314 #undef TARGET_ASM_UNALIGNED_DI_OP
315 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
316 #undef TARGET_ASM_INTEGER
317 #define TARGET_ASM_INTEGER ia64_assemble_integer
319 #undef TARGET_ASM_FUNCTION_PROLOGUE
320 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
321 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
322 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
323 #undef TARGET_ASM_FUNCTION_EPILOGUE
324 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
326 #undef TARGET_IN_SMALL_DATA_P
327 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
329 #undef TARGET_SCHED_ADJUST_COST
330 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
331 #undef TARGET_SCHED_ISSUE_RATE
332 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
333 #undef TARGET_SCHED_VARIABLE_ISSUE
334 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
335 #undef TARGET_SCHED_INIT
336 #define TARGET_SCHED_INIT ia64_sched_init
337 #undef TARGET_SCHED_FINISH
338 #define TARGET_SCHED_FINISH ia64_sched_finish
339 #undef TARGET_SCHED_REORDER
340 #define TARGET_SCHED_REORDER ia64_sched_reorder
341 #undef TARGET_SCHED_REORDER2
342 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
344 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
345 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
347 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
348 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
350 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
351 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
352 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
353 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
355 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
356 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
357 ia64_first_cycle_multipass_dfa_lookahead_guard
359 #undef TARGET_SCHED_DFA_NEW_CYCLE
360 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
362 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
363 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
364 #undef TARGET_PASS_BY_REFERENCE
365 #define TARGET_PASS_BY_REFERENCE ia64_pass_by_reference
366 #undef TARGET_ARG_PARTIAL_BYTES
367 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
369 #undef TARGET_ASM_OUTPUT_MI_THUNK
370 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
371 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
372 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
374 #undef TARGET_ASM_FILE_START
375 #define TARGET_ASM_FILE_START ia64_file_start
377 #undef TARGET_RTX_COSTS
378 #define TARGET_RTX_COSTS ia64_rtx_costs
379 #undef TARGET_ADDRESS_COST
380 #define TARGET_ADDRESS_COST hook_int_rtx_0
382 #undef TARGET_MACHINE_DEPENDENT_REORG
383 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
385 #undef TARGET_ENCODE_SECTION_INFO
386 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
388 #undef TARGET_SECTION_TYPE_FLAGS
389 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
391 /* ??? ABI doesn't allow us to define this. */
393 #undef TARGET_PROMOTE_FUNCTION_ARGS
394 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
397 /* ??? ABI doesn't allow us to define this. */
399 #undef TARGET_PROMOTE_FUNCTION_RETURN
400 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
403 /* ??? Investigate. */
405 #undef TARGET_PROMOTE_PROTOTYPES
406 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
409 #undef TARGET_STRUCT_VALUE_RTX
410 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
411 #undef TARGET_RETURN_IN_MEMORY
412 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
413 #undef TARGET_SETUP_INCOMING_VARARGS
414 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
415 #undef TARGET_STRICT_ARGUMENT_NAMING
416 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
417 #undef TARGET_MUST_PASS_IN_STACK
418 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
420 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
421 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
423 #undef TARGET_UNWIND_EMIT
424 #define TARGET_UNWIND_EMIT process_for_unwind_directive
426 #undef TARGET_SCALAR_MODE_SUPPORTED_P
427 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
428 #undef TARGET_VECTOR_MODE_SUPPORTED_P
429 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
431 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
432 in an order different from the specified program order. */
433 #undef TARGET_RELAXED_ORDERING
434 #define TARGET_RELAXED_ORDERING true
436 struct gcc_target targetm = TARGET_INITIALIZER;
440 ADDR_AREA_NORMAL, /* normal address area */
441 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
445 static GTY(()) tree small_ident1;
446 static GTY(()) tree small_ident2;
451 if (small_ident1 == 0)
453 small_ident1 = get_identifier ("small");
454 small_ident2 = get_identifier ("__small__");
458 /* Retrieve the address area that has been chosen for the given decl. */
460 static ia64_addr_area
461 ia64_get_addr_area (tree decl)
465 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
471 id = TREE_VALUE (TREE_VALUE (model_attr));
472 if (id == small_ident1 || id == small_ident2)
473 return ADDR_AREA_SMALL;
475 return ADDR_AREA_NORMAL;
479 ia64_handle_model_attribute (tree *node, tree name, tree args,
480 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
482 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
484 tree arg, decl = *node;
487 arg = TREE_VALUE (args);
488 if (arg == small_ident1 || arg == small_ident2)
490 addr_area = ADDR_AREA_SMALL;
494 warning ("invalid argument of %qs attribute",
495 IDENTIFIER_POINTER (name));
496 *no_add_attrs = true;
499 switch (TREE_CODE (decl))
502 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
504 && !TREE_STATIC (decl))
506 error ("%Jan address area attribute cannot be specified for "
507 "local variables", decl, decl);
508 *no_add_attrs = true;
510 area = ia64_get_addr_area (decl);
511 if (area != ADDR_AREA_NORMAL && addr_area != area)
513 error ("%Jaddress area of '%s' conflicts with previous "
514 "declaration", decl, decl);
515 *no_add_attrs = true;
520 error ("%Jaddress area attribute cannot be specified for functions",
522 *no_add_attrs = true;
526 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
527 *no_add_attrs = true;
535 ia64_encode_addr_area (tree decl, rtx symbol)
539 flags = SYMBOL_REF_FLAGS (symbol);
540 switch (ia64_get_addr_area (decl))
542 case ADDR_AREA_NORMAL: break;
543 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
546 SYMBOL_REF_FLAGS (symbol) = flags;
550 ia64_encode_section_info (tree decl, rtx rtl, int first)
552 default_encode_section_info (decl, rtl, first);
554 /* Careful not to prod global register variables. */
555 if (TREE_CODE (decl) == VAR_DECL
556 && GET_CODE (DECL_RTL (decl)) == MEM
557 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
558 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
559 ia64_encode_addr_area (decl, XEXP (rtl, 0));
562 /* Implement CONST_OK_FOR_LETTER_P. */
565 ia64_const_ok_for_letter_p (HOST_WIDE_INT value, char c)
570 return CONST_OK_FOR_I (value);
572 return CONST_OK_FOR_J (value);
574 return CONST_OK_FOR_K (value);
576 return CONST_OK_FOR_L (value);
578 return CONST_OK_FOR_M (value);
580 return CONST_OK_FOR_N (value);
582 return CONST_OK_FOR_O (value);
584 return CONST_OK_FOR_P (value);
590 /* Implement CONST_DOUBLE_OK_FOR_LETTER_P. */
593 ia64_const_double_ok_for_letter_p (rtx value, char c)
598 return CONST_DOUBLE_OK_FOR_G (value);
604 /* Implement EXTRA_CONSTRAINT. */
607 ia64_extra_constraint (rtx value, char c)
612 /* Non-volatile memory for FP_REG loads/stores. */
613 return memory_operand(value, VOIDmode) && !MEM_VOLATILE_P (value);
616 /* 1..4 for shladd arguments. */
617 return (GET_CODE (value) == CONST_INT
618 && INTVAL (value) >= 1 && INTVAL (value) <= 4);
621 /* Non-post-inc memory for asms and other unsavory creatures. */
622 return (GET_CODE (value) == MEM
623 && GET_RTX_CLASS (GET_CODE (XEXP (value, 0))) != RTX_AUTOINC
624 && (reload_in_progress || memory_operand (value, VOIDmode)));
627 /* Symbol ref to small-address-area. */
628 return (GET_CODE (value) == SYMBOL_REF
629 && SYMBOL_REF_SMALL_ADDR_P (value));
633 return value == CONST0_RTX (GET_MODE (value));
636 /* An integer vector, such that conversion to an integer yields a
637 value appropriate for an integer 'J' constraint. */
638 if (GET_CODE (value) == CONST_VECTOR
639 && GET_MODE_CLASS (GET_MODE (value)) == MODE_VECTOR_INT)
641 value = simplify_subreg (DImode, value, GET_MODE (value), 0);
642 return ia64_const_ok_for_letter_p (INTVAL (value), 'J');
647 /* A V2SF vector containing elements that satisfy 'G'. */
649 (GET_CODE (value) == CONST_VECTOR
650 && GET_MODE (value) == V2SFmode
651 && ia64_const_double_ok_for_letter_p (XVECEXP (value, 0, 0), 'G')
652 && ia64_const_double_ok_for_letter_p (XVECEXP (value, 0, 1), 'G'));
659 /* Return 1 if the operands of a move are ok. */
662 ia64_move_ok (rtx dst, rtx src)
664 /* If we're under init_recog_no_volatile, we'll not be able to use
665 memory_operand. So check the code directly and don't worry about
666 the validity of the underlying address, which should have been
667 checked elsewhere anyway. */
668 if (GET_CODE (dst) != MEM)
670 if (GET_CODE (src) == MEM)
672 if (register_operand (src, VOIDmode))
675 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
676 if (INTEGRAL_MODE_P (GET_MODE (dst)))
677 return src == const0_rtx;
679 return GET_CODE (src) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (src);
683 addp4_optimize_ok (rtx op1, rtx op2)
685 return (basereg_operand (op1, GET_MODE(op1)) !=
686 basereg_operand (op2, GET_MODE(op2)));
689 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
690 Return the length of the field, or <= 0 on failure. */
693 ia64_depz_field_mask (rtx rop, rtx rshift)
695 unsigned HOST_WIDE_INT op = INTVAL (rop);
696 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
698 /* Get rid of the zero bits we're shifting in. */
701 /* We must now have a solid block of 1's at bit 0. */
702 return exact_log2 (op + 1);
705 /* Expand a symbolic constant load. */
708 ia64_expand_load_address (rtx dest, rtx src)
710 if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (src))
712 if (GET_CODE (dest) != REG)
715 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
716 having to pointer-extend the value afterward. Other forms of address
717 computation below are also more natural to compute as 64-bit quantities.
718 If we've been given an SImode destination register, change it. */
719 if (GET_MODE (dest) != Pmode)
720 dest = gen_rtx_REG (Pmode, REGNO (dest));
722 if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_SMALL_ADDR_P (src))
724 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
727 else if (TARGET_AUTO_PIC)
729 emit_insn (gen_load_gprel64 (dest, src));
732 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
734 emit_insn (gen_load_fptr (dest, src));
737 else if (sdata_symbolic_operand (src, VOIDmode))
739 emit_insn (gen_load_gprel (dest, src));
743 if (GET_CODE (src) == CONST
744 && GET_CODE (XEXP (src, 0)) == PLUS
745 && GET_CODE (XEXP (XEXP (src, 0), 1)) == CONST_INT
746 && (INTVAL (XEXP (XEXP (src, 0), 1)) & 0x3fff) != 0)
748 rtx sym = XEXP (XEXP (src, 0), 0);
749 HOST_WIDE_INT ofs, hi, lo;
751 /* Split the offset into a sign extended 14-bit low part
752 and a complementary high part. */
753 ofs = INTVAL (XEXP (XEXP (src, 0), 1));
754 lo = ((ofs & 0x3fff) ^ 0x2000) - 0x2000;
757 ia64_expand_load_address (dest, plus_constant (sym, hi));
758 emit_insn (gen_adddi3 (dest, dest, GEN_INT (lo)));
764 tmp = gen_rtx_HIGH (Pmode, src);
765 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
766 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
768 tmp = gen_rtx_LO_SUM (GET_MODE (dest), dest, src);
769 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
773 static GTY(()) rtx gen_tls_tga;
775 gen_tls_get_addr (void)
778 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
782 static GTY(()) rtx thread_pointer_rtx;
784 gen_thread_pointer (void)
786 if (!thread_pointer_rtx)
787 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
788 return thread_pointer_rtx;
792 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1)
794 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
799 case TLS_MODEL_GLOBAL_DYNAMIC:
802 tga_op1 = gen_reg_rtx (Pmode);
803 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
804 tga_op1 = gen_const_mem (Pmode, tga_op1);
806 tga_op2 = gen_reg_rtx (Pmode);
807 emit_insn (gen_load_ltoff_dtprel (tga_op2, op1));
808 tga_op2 = gen_const_mem (Pmode, tga_op2);
810 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
811 LCT_CONST, Pmode, 2, tga_op1,
812 Pmode, tga_op2, Pmode);
814 insns = get_insns ();
817 if (GET_MODE (op0) != Pmode)
819 emit_libcall_block (insns, op0, tga_ret, op1);
822 case TLS_MODEL_LOCAL_DYNAMIC:
823 /* ??? This isn't the completely proper way to do local-dynamic
824 If the call to __tls_get_addr is used only by a single symbol,
825 then we should (somehow) move the dtprel to the second arg
826 to avoid the extra add. */
829 tga_op1 = gen_reg_rtx (Pmode);
830 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
831 tga_op1 = gen_const_mem (Pmode, tga_op1);
833 tga_op2 = const0_rtx;
835 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
836 LCT_CONST, Pmode, 2, tga_op1,
837 Pmode, tga_op2, Pmode);
839 insns = get_insns ();
842 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
844 tmp = gen_reg_rtx (Pmode);
845 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
847 if (!register_operand (op0, Pmode))
848 op0 = gen_reg_rtx (Pmode);
851 emit_insn (gen_load_dtprel (op0, op1));
852 emit_insn (gen_adddi3 (op0, tmp, op0));
855 emit_insn (gen_add_dtprel (op0, tmp, op1));
858 case TLS_MODEL_INITIAL_EXEC:
859 tmp = gen_reg_rtx (Pmode);
860 emit_insn (gen_load_ltoff_tprel (tmp, op1));
861 tmp = gen_const_mem (Pmode, tmp);
862 tmp = force_reg (Pmode, tmp);
864 if (!register_operand (op0, Pmode))
865 op0 = gen_reg_rtx (Pmode);
866 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
869 case TLS_MODEL_LOCAL_EXEC:
870 if (!register_operand (op0, Pmode))
871 op0 = gen_reg_rtx (Pmode);
874 emit_insn (gen_load_tprel (op0, op1));
875 emit_insn (gen_adddi3 (op0, gen_thread_pointer (), op0));
878 emit_insn (gen_add_tprel (op0, gen_thread_pointer (), op1));
887 if (GET_MODE (orig_op0) == Pmode)
889 return gen_lowpart (GET_MODE (orig_op0), op0);
893 ia64_expand_move (rtx op0, rtx op1)
895 enum machine_mode mode = GET_MODE (op0);
897 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
898 op1 = force_reg (mode, op1);
900 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
902 enum tls_model tls_kind;
903 if (GET_CODE (op1) == SYMBOL_REF
904 && (tls_kind = SYMBOL_REF_TLS_MODEL (op1)))
905 return ia64_expand_tls_address (tls_kind, op0, op1);
907 if (!TARGET_NO_PIC && reload_completed)
909 ia64_expand_load_address (op0, op1);
917 /* Split a move from OP1 to OP0 conditional on COND. */
920 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
922 rtx insn, first = get_last_insn ();
924 emit_move_insn (op0, op1);
926 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
928 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
932 /* Split a post-reload TImode or TFmode reference into two DImode
933 components. This is made extra difficult by the fact that we do
934 not get any scratch registers to work with, because reload cannot
935 be prevented from giving us a scratch that overlaps the register
936 pair involved. So instead, when addressing memory, we tweak the
937 pointer register up and back down with POST_INCs. Or up and not
938 back down when we can get away with it.
940 REVERSED is true when the loads must be done in reversed order
941 (high word first) for correctness. DEAD is true when the pointer
942 dies with the second insn we generate and therefore the second
943 address must not carry a postmodify.
945 May return an insn which is to be emitted after the moves. */
948 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
952 switch (GET_CODE (in))
955 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
956 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
961 /* Cannot occur reversed. */
962 if (reversed) abort ();
964 if (GET_MODE (in) != TFmode)
965 split_double (in, &out[0], &out[1]);
967 /* split_double does not understand how to split a TFmode
968 quantity into a pair of DImode constants. */
971 unsigned HOST_WIDE_INT p[2];
972 long l[4]; /* TFmode is 128 bits */
974 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
975 real_to_target (l, &r, TFmode);
977 if (FLOAT_WORDS_BIG_ENDIAN)
979 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
980 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
984 p[0] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
985 p[1] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
987 out[0] = GEN_INT (p[0]);
988 out[1] = GEN_INT (p[1]);
994 rtx base = XEXP (in, 0);
997 switch (GET_CODE (base))
1002 out[0] = adjust_automodify_address
1003 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1004 out[1] = adjust_automodify_address
1005 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1009 /* Reversal requires a pre-increment, which can only
1010 be done as a separate insn. */
1011 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1012 out[0] = adjust_automodify_address
1013 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1014 out[1] = adjust_address (in, DImode, 0);
1019 if (reversed || dead) abort ();
1020 /* Just do the increment in two steps. */
1021 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1022 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1026 if (reversed || dead) abort ();
1027 /* Add 8, subtract 24. */
1028 base = XEXP (base, 0);
1029 out[0] = adjust_automodify_address
1030 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1031 out[1] = adjust_automodify_address
1033 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1038 if (reversed || dead) abort ();
1039 /* Extract and adjust the modification. This case is
1040 trickier than the others, because we might have an
1041 index register, or we might have a combined offset that
1042 doesn't fit a signed 9-bit displacement field. We can
1043 assume the incoming expression is already legitimate. */
1044 offset = XEXP (base, 1);
1045 base = XEXP (base, 0);
1047 out[0] = adjust_automodify_address
1048 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1050 if (GET_CODE (XEXP (offset, 1)) == REG)
1052 /* Can't adjust the postmodify to match. Emit the
1053 original, then a separate addition insn. */
1054 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1055 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1057 else if (GET_CODE (XEXP (offset, 1)) != CONST_INT)
1059 else if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1061 /* Again the postmodify cannot be made to match, but
1062 in this case it's more efficient to get rid of the
1063 postmodify entirely and fix up with an add insn. */
1064 out[1] = adjust_automodify_address (in, DImode, base, 8);
1065 fixup = gen_adddi3 (base, base,
1066 GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1070 /* Combined offset still fits in the displacement field.
1071 (We cannot overflow it at the high end.) */
1072 out[1] = adjust_automodify_address
1074 gen_rtx_POST_MODIFY (Pmode, base,
1075 gen_rtx_PLUS (Pmode, base,
1076 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1094 /* Split a TImode or TFmode move instruction after reload.
1095 This is used by *movtf_internal and *movti_internal. */
1097 ia64_split_tmode_move (rtx operands[])
1099 rtx in[2], out[2], insn;
1102 bool reversed = false;
1104 /* It is possible for reload to decide to overwrite a pointer with
1105 the value it points to. In that case we have to do the loads in
1106 the appropriate order so that the pointer is not destroyed too
1107 early. Also we must not generate a postmodify for that second
1108 load, or rws_access_regno will abort. */
1109 if (GET_CODE (operands[1]) == MEM
1110 && reg_overlap_mentioned_p (operands[0], operands[1]))
1112 rtx base = XEXP (operands[1], 0);
1113 while (GET_CODE (base) != REG)
1114 base = XEXP (base, 0);
1116 if (REGNO (base) == REGNO (operands[0]))
1120 /* Another reason to do the moves in reversed order is if the first
1121 element of the target register pair is also the second element of
1122 the source register pair. */
1123 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1124 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1127 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1128 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1130 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1131 if (GET_CODE (EXP) == MEM \
1132 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1133 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1134 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1135 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1136 XEXP (XEXP (EXP, 0), 0), \
1139 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1140 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1141 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1143 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1144 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1145 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1148 emit_insn (fixup[0]);
1150 emit_insn (fixup[1]);
1152 #undef MAYBE_ADD_REG_INC_NOTE
1155 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1156 through memory plus an extra GR scratch register. Except that you can
1157 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1158 SECONDARY_RELOAD_CLASS, but not both.
1160 We got into problems in the first place by allowing a construct like
1161 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1162 This solution attempts to prevent this situation from occurring. When
1163 we see something like the above, we spill the inner register to memory. */
1166 spill_xfmode_operand (rtx in, int force)
1168 if (GET_CODE (in) == SUBREG
1169 && GET_MODE (SUBREG_REG (in)) == TImode
1170 && GET_CODE (SUBREG_REG (in)) == REG)
1172 rtx memt = assign_stack_temp (TImode, 16, 0);
1173 emit_move_insn (memt, SUBREG_REG (in));
1174 return adjust_address (memt, XFmode, 0);
1176 else if (force && GET_CODE (in) == REG)
1178 rtx memx = assign_stack_temp (XFmode, 16, 0);
1179 emit_move_insn (memx, in);
1186 /* Emit comparison instruction if necessary, returning the expression
1187 that holds the compare result in the proper mode. */
1189 static GTY(()) rtx cmptf_libfunc;
1192 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1194 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1197 /* If we have a BImode input, then we already have a compare result, and
1198 do not need to emit another comparison. */
1199 if (GET_MODE (op0) == BImode)
1201 if ((code == NE || code == EQ) && op1 == const0_rtx)
1206 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1207 magic number as its third argument, that indicates what to do.
1208 The return value is an integer to be compared against zero. */
1209 else if (GET_MODE (op0) == TFmode)
1212 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1218 enum rtx_code ncode;
1220 if (!cmptf_libfunc || GET_MODE (op1) != TFmode)
1224 /* 1 = equal, 0 = not equal. Equality operators do
1225 not raise FP_INVALID when given an SNaN operand. */
1226 case EQ: magic = QCMP_EQ; ncode = NE; break;
1227 case NE: magic = QCMP_EQ; ncode = EQ; break;
1228 /* isunordered() from C99. */
1229 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1230 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1231 /* Relational operators raise FP_INVALID when given
1233 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1234 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1235 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1236 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1237 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1238 Expanders for buneq etc. weuld have to be added to ia64.md
1239 for this to be useful. */
1245 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1246 op0, TFmode, op1, TFmode,
1247 GEN_INT (magic), DImode);
1248 cmp = gen_reg_rtx (BImode);
1249 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1250 gen_rtx_fmt_ee (ncode, BImode,
1253 insns = get_insns ();
1256 emit_libcall_block (insns, cmp, cmp,
1257 gen_rtx_fmt_ee (code, BImode, op0, op1));
1262 cmp = gen_reg_rtx (BImode);
1263 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1264 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1268 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1271 /* Generate an integral vector comparison. */
1274 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1275 rtx dest, rtx op0, rtx op1)
1277 bool negate = false;
1312 rtx w0h, w0l, w1h, w1l, ch, cl;
1313 enum machine_mode wmode;
1314 rtx (*unpack_l) (rtx, rtx, rtx);
1315 rtx (*unpack_h) (rtx, rtx, rtx);
1316 rtx (*pack) (rtx, rtx, rtx);
1318 /* We don't have native unsigned comparisons, but we can generate
1319 them better than generic code can. */
1321 if (mode == V2SImode)
1323 else if (mode == V8QImode)
1326 pack = gen_pack2_sss;
1327 unpack_l = gen_unpack1_l;
1328 unpack_h = gen_unpack1_h;
1330 else if (mode == V4HImode)
1333 pack = gen_pack4_sss;
1334 unpack_l = gen_unpack2_l;
1335 unpack_h = gen_unpack2_h;
1340 /* Unpack into wider vectors, zero extending the elements. */
1342 w0l = gen_reg_rtx (wmode);
1343 w0h = gen_reg_rtx (wmode);
1344 w1l = gen_reg_rtx (wmode);
1345 w1h = gen_reg_rtx (wmode);
1346 emit_insn (unpack_l (gen_lowpart (mode, w0l), op0, CONST0_RTX (mode)));
1347 emit_insn (unpack_h (gen_lowpart (mode, w0h), op0, CONST0_RTX (mode)));
1348 emit_insn (unpack_l (gen_lowpart (mode, w1l), op1, CONST0_RTX (mode)));
1349 emit_insn (unpack_h (gen_lowpart (mode, w1h), op1, CONST0_RTX (mode)));
1351 /* Compare in the wider mode. */
1353 cl = gen_reg_rtx (wmode);
1354 ch = gen_reg_rtx (wmode);
1355 code = signed_condition (code);
1356 ia64_expand_vecint_compare (code, wmode, cl, w0l, w1l);
1357 negate = ia64_expand_vecint_compare (code, wmode, ch, w0h, w1h);
1359 /* Repack into a single narrower vector. */
1361 emit_insn (pack (dest, cl, ch));
1369 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1370 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1376 ia64_expand_vcondu_v2si (enum rtx_code code, rtx operands[])
1378 rtx dl, dh, bl, bh, op1l, op1h, op2l, op2h, op4l, op4h, op5l, op5h, x;
1380 /* In this case, we extract the two SImode quantities and generate
1381 normal comparisons for each of them. */
1383 op1l = gen_lowpart (SImode, operands[1]);
1384 op2l = gen_lowpart (SImode, operands[2]);
1385 op4l = gen_lowpart (SImode, operands[4]);
1386 op5l = gen_lowpart (SImode, operands[5]);
1388 op1h = gen_reg_rtx (SImode);
1389 op2h = gen_reg_rtx (SImode);
1390 op4h = gen_reg_rtx (SImode);
1391 op5h = gen_reg_rtx (SImode);
1393 emit_insn (gen_lshrdi3 (gen_lowpart (DImode, op1h),
1394 gen_lowpart (DImode, operands[1]), GEN_INT (32)));
1395 emit_insn (gen_lshrdi3 (gen_lowpart (DImode, op2h),
1396 gen_lowpart (DImode, operands[2]), GEN_INT (32)));
1397 emit_insn (gen_lshrdi3 (gen_lowpart (DImode, op4h),
1398 gen_lowpart (DImode, operands[4]), GEN_INT (32)));
1399 emit_insn (gen_lshrdi3 (gen_lowpart (DImode, op5h),
1400 gen_lowpart (DImode, operands[5]), GEN_INT (32)));
1402 bl = gen_reg_rtx (BImode);
1403 x = gen_rtx_fmt_ee (code, BImode, op4l, op5l);
1404 emit_insn (gen_rtx_SET (VOIDmode, bl, x));
1406 bh = gen_reg_rtx (BImode);
1407 x = gen_rtx_fmt_ee (code, BImode, op4h, op5h);
1408 emit_insn (gen_rtx_SET (VOIDmode, bh, x));
1410 /* With the results of the comparisons, emit conditional moves. */
1412 dl = gen_reg_rtx (SImode);
1413 x = gen_rtx_IF_THEN_ELSE (SImode, bl, op1l, op2l);
1414 emit_insn (gen_rtx_SET (VOIDmode, dl, x));
1416 dh = gen_reg_rtx (SImode);
1417 x = gen_rtx_IF_THEN_ELSE (SImode, bh, op1h, op2h);
1418 emit_insn (gen_rtx_SET (VOIDmode, dh, x));
1420 /* Merge the two partial results back into a vector. */
1422 x = gen_rtx_VEC_CONCAT (V2SImode, dl, dh);
1423 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1426 /* Emit an integral vector conditional move. */
1429 ia64_expand_vecint_cmov (rtx operands[])
1431 enum machine_mode mode = GET_MODE (operands[0]);
1432 enum rtx_code code = GET_CODE (operands[3]);
1436 /* Since we don't have unsigned V2SImode comparisons, it's more efficient
1437 to special-case them entirely. */
1438 if (mode == V2SImode
1439 && (code == GTU || code == GEU || code == LEU || code == LTU))
1441 ia64_expand_vcondu_v2si (code, operands);
1445 cmp = gen_reg_rtx (mode);
1446 negate = ia64_expand_vecint_compare (code, mode, cmp,
1447 operands[4], operands[5]);
1449 ot = operands[1+negate];
1450 of = operands[2-negate];
1452 if (ot == CONST0_RTX (mode))
1454 if (of == CONST0_RTX (mode))
1456 emit_move_insn (operands[0], ot);
1460 x = gen_rtx_NOT (mode, cmp);
1461 x = gen_rtx_AND (mode, x, of);
1462 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1464 else if (of == CONST0_RTX (mode))
1466 x = gen_rtx_AND (mode, cmp, ot);
1467 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1473 t = gen_reg_rtx (mode);
1474 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1475 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1477 f = gen_reg_rtx (mode);
1478 x = gen_rtx_NOT (mode, cmp);
1479 x = gen_rtx_AND (mode, x, operands[2-negate]);
1480 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1482 x = gen_rtx_IOR (mode, t, f);
1483 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1487 /* Emit an integral vector min or max operation. Return true if all done. */
1490 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1495 /* These four combinations are supported directly. */
1496 if (mode == V8QImode && (code == UMIN || code == UMAX))
1498 if (mode == V4HImode && (code == SMIN || code == SMAX))
1501 /* Everything else implemented via vector comparisons. */
1502 xops[0] = operands[0];
1503 xops[4] = xops[1] = operands[1];
1504 xops[5] = xops[2] = operands[2];
1523 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
1525 ia64_expand_vecint_cmov (xops);
1529 /* Emit the appropriate sequence for a call. */
1532 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1537 addr = XEXP (addr, 0);
1538 addr = convert_memory_address (DImode, addr);
1539 b0 = gen_rtx_REG (DImode, R_BR (0));
1541 /* ??? Should do this for functions known to bind local too. */
1542 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1545 insn = gen_sibcall_nogp (addr);
1547 insn = gen_call_nogp (addr, b0);
1549 insn = gen_call_value_nogp (retval, addr, b0);
1550 insn = emit_call_insn (insn);
1555 insn = gen_sibcall_gp (addr);
1557 insn = gen_call_gp (addr, b0);
1559 insn = gen_call_value_gp (retval, addr, b0);
1560 insn = emit_call_insn (insn);
1562 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1566 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1570 ia64_reload_gp (void)
1574 if (current_frame_info.reg_save_gp)
1575 tmp = gen_rtx_REG (DImode, current_frame_info.reg_save_gp);
1578 HOST_WIDE_INT offset;
1580 offset = (current_frame_info.spill_cfa_off
1581 + current_frame_info.spill_size);
1582 if (frame_pointer_needed)
1584 tmp = hard_frame_pointer_rtx;
1589 tmp = stack_pointer_rtx;
1590 offset = current_frame_info.total_size - offset;
1593 if (CONST_OK_FOR_I (offset))
1594 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1595 tmp, GEN_INT (offset)));
1598 emit_move_insn (pic_offset_table_rtx, GEN_INT (offset));
1599 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1600 pic_offset_table_rtx, tmp));
1603 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1606 emit_move_insn (pic_offset_table_rtx, tmp);
1610 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1611 rtx scratch_b, int noreturn_p, int sibcall_p)
1614 bool is_desc = false;
1616 /* If we find we're calling through a register, then we're actually
1617 calling through a descriptor, so load up the values. */
1618 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1623 /* ??? We are currently constrained to *not* use peep2, because
1624 we can legitimately change the global lifetime of the GP
1625 (in the form of killing where previously live). This is
1626 because a call through a descriptor doesn't use the previous
1627 value of the GP, while a direct call does, and we do not
1628 commit to either form until the split here.
1630 That said, this means that we lack precise life info for
1631 whether ADDR is dead after this call. This is not terribly
1632 important, since we can fix things up essentially for free
1633 with the POST_DEC below, but it's nice to not use it when we
1634 can immediately tell it's not necessary. */
1635 addr_dead_p = ((noreturn_p || sibcall_p
1636 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1638 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1640 /* Load the code address into scratch_b. */
1641 tmp = gen_rtx_POST_INC (Pmode, addr);
1642 tmp = gen_rtx_MEM (Pmode, tmp);
1643 emit_move_insn (scratch_r, tmp);
1644 emit_move_insn (scratch_b, scratch_r);
1646 /* Load the GP address. If ADDR is not dead here, then we must
1647 revert the change made above via the POST_INCREMENT. */
1649 tmp = gen_rtx_POST_DEC (Pmode, addr);
1652 tmp = gen_rtx_MEM (Pmode, tmp);
1653 emit_move_insn (pic_offset_table_rtx, tmp);
1660 insn = gen_sibcall_nogp (addr);
1662 insn = gen_call_value_nogp (retval, addr, retaddr);
1664 insn = gen_call_nogp (addr, retaddr);
1665 emit_call_insn (insn);
1667 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
1671 /* Begin the assembly file. */
1674 ia64_file_start (void)
1676 default_file_start ();
1677 emit_safe_across_calls ();
1681 emit_safe_across_calls (void)
1683 unsigned int rs, re;
1690 while (rs < 64 && call_used_regs[PR_REG (rs)])
1694 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
1698 fputs ("\t.pred.safe_across_calls ", asm_out_file);
1702 fputc (',', asm_out_file);
1704 fprintf (asm_out_file, "p%u", rs);
1706 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
1710 fputc ('\n', asm_out_file);
1713 /* Helper function for ia64_compute_frame_size: find an appropriate general
1714 register to spill some special register to. SPECIAL_SPILL_MASK contains
1715 bits in GR0 to GR31 that have already been allocated by this routine.
1716 TRY_LOCALS is true if we should attempt to locate a local regnum. */
1719 find_gr_spill (int try_locals)
1723 /* If this is a leaf function, first try an otherwise unused
1724 call-clobbered register. */
1725 if (current_function_is_leaf)
1727 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1728 if (! regs_ever_live[regno]
1729 && call_used_regs[regno]
1730 && ! fixed_regs[regno]
1731 && ! global_regs[regno]
1732 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1734 current_frame_info.gr_used_mask |= 1 << regno;
1741 regno = current_frame_info.n_local_regs;
1742 /* If there is a frame pointer, then we can't use loc79, because
1743 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
1744 reg_name switching code in ia64_expand_prologue. */
1745 if (regno < (80 - frame_pointer_needed))
1747 current_frame_info.n_local_regs = regno + 1;
1748 return LOC_REG (0) + regno;
1752 /* Failed to find a general register to spill to. Must use stack. */
1756 /* In order to make for nice schedules, we try to allocate every temporary
1757 to a different register. We must of course stay away from call-saved,
1758 fixed, and global registers. We must also stay away from registers
1759 allocated in current_frame_info.gr_used_mask, since those include regs
1760 used all through the prologue.
1762 Any register allocated here must be used immediately. The idea is to
1763 aid scheduling, not to solve data flow problems. */
1765 static int last_scratch_gr_reg;
1768 next_scratch_gr_reg (void)
1772 for (i = 0; i < 32; ++i)
1774 regno = (last_scratch_gr_reg + i + 1) & 31;
1775 if (call_used_regs[regno]
1776 && ! fixed_regs[regno]
1777 && ! global_regs[regno]
1778 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1780 last_scratch_gr_reg = regno;
1785 /* There must be _something_ available. */
1789 /* Helper function for ia64_compute_frame_size, called through
1790 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
1793 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
1795 unsigned int regno = REGNO (reg);
1798 unsigned int i, n = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1799 for (i = 0; i < n; ++i)
1800 current_frame_info.gr_used_mask |= 1 << (regno + i);
1804 /* Returns the number of bytes offset between the frame pointer and the stack
1805 pointer for the current function. SIZE is the number of bytes of space
1806 needed for local variables. */
1809 ia64_compute_frame_size (HOST_WIDE_INT size)
1811 HOST_WIDE_INT total_size;
1812 HOST_WIDE_INT spill_size = 0;
1813 HOST_WIDE_INT extra_spill_size = 0;
1814 HOST_WIDE_INT pretend_args_size;
1817 int spilled_gr_p = 0;
1818 int spilled_fr_p = 0;
1822 if (current_frame_info.initialized)
1825 memset (¤t_frame_info, 0, sizeof current_frame_info);
1826 CLEAR_HARD_REG_SET (mask);
1828 /* Don't allocate scratches to the return register. */
1829 diddle_return_value (mark_reg_gr_used_mask, NULL);
1831 /* Don't allocate scratches to the EH scratch registers. */
1832 if (cfun->machine->ia64_eh_epilogue_sp)
1833 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
1834 if (cfun->machine->ia64_eh_epilogue_bsp)
1835 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
1837 /* Find the size of the register stack frame. We have only 80 local
1838 registers, because we reserve 8 for the inputs and 8 for the
1841 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
1842 since we'll be adjusting that down later. */
1843 regno = LOC_REG (78) + ! frame_pointer_needed;
1844 for (; regno >= LOC_REG (0); regno--)
1845 if (regs_ever_live[regno])
1847 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
1849 /* For functions marked with the syscall_linkage attribute, we must mark
1850 all eight input registers as in use, so that locals aren't visible to
1853 if (cfun->machine->n_varargs > 0
1854 || lookup_attribute ("syscall_linkage",
1855 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
1856 current_frame_info.n_input_regs = 8;
1859 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
1860 if (regs_ever_live[regno])
1862 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
1865 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
1866 if (regs_ever_live[regno])
1868 i = regno - OUT_REG (0) + 1;
1870 /* When -p profiling, we need one output register for the mcount argument.
1871 Likewise for -a profiling for the bb_init_func argument. For -ax
1872 profiling, we need two output registers for the two bb_init_trace_func
1874 if (current_function_profile)
1876 current_frame_info.n_output_regs = i;
1878 /* ??? No rotating register support yet. */
1879 current_frame_info.n_rotate_regs = 0;
1881 /* Discover which registers need spilling, and how much room that
1882 will take. Begin with floating point and general registers,
1883 which will always wind up on the stack. */
1885 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
1886 if (regs_ever_live[regno] && ! call_used_regs[regno])
1888 SET_HARD_REG_BIT (mask, regno);
1894 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1895 if (regs_ever_live[regno] && ! call_used_regs[regno])
1897 SET_HARD_REG_BIT (mask, regno);
1903 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
1904 if (regs_ever_live[regno] && ! call_used_regs[regno])
1906 SET_HARD_REG_BIT (mask, regno);
1911 /* Now come all special registers that might get saved in other
1912 general registers. */
1914 if (frame_pointer_needed)
1916 current_frame_info.reg_fp = find_gr_spill (1);
1917 /* If we did not get a register, then we take LOC79. This is guaranteed
1918 to be free, even if regs_ever_live is already set, because this is
1919 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
1920 as we don't count loc79 above. */
1921 if (current_frame_info.reg_fp == 0)
1923 current_frame_info.reg_fp = LOC_REG (79);
1924 current_frame_info.n_local_regs++;
1928 if (! current_function_is_leaf)
1930 /* Emit a save of BR0 if we call other functions. Do this even
1931 if this function doesn't return, as EH depends on this to be
1932 able to unwind the stack. */
1933 SET_HARD_REG_BIT (mask, BR_REG (0));
1935 current_frame_info.reg_save_b0 = find_gr_spill (1);
1936 if (current_frame_info.reg_save_b0 == 0)
1942 /* Similarly for ar.pfs. */
1943 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
1944 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
1945 if (current_frame_info.reg_save_ar_pfs == 0)
1947 extra_spill_size += 8;
1951 /* Similarly for gp. Note that if we're calling setjmp, the stacked
1952 registers are clobbered, so we fall back to the stack. */
1953 current_frame_info.reg_save_gp
1954 = (current_function_calls_setjmp ? 0 : find_gr_spill (1));
1955 if (current_frame_info.reg_save_gp == 0)
1957 SET_HARD_REG_BIT (mask, GR_REG (1));
1964 if (regs_ever_live[BR_REG (0)] && ! call_used_regs[BR_REG (0)])
1966 SET_HARD_REG_BIT (mask, BR_REG (0));
1971 if (regs_ever_live[AR_PFS_REGNUM])
1973 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
1974 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
1975 if (current_frame_info.reg_save_ar_pfs == 0)
1977 extra_spill_size += 8;
1983 /* Unwind descriptor hackery: things are most efficient if we allocate
1984 consecutive GR save registers for RP, PFS, FP in that order. However,
1985 it is absolutely critical that FP get the only hard register that's
1986 guaranteed to be free, so we allocated it first. If all three did
1987 happen to be allocated hard regs, and are consecutive, rearrange them
1988 into the preferred order now. */
1989 if (current_frame_info.reg_fp != 0
1990 && current_frame_info.reg_save_b0 == current_frame_info.reg_fp + 1
1991 && current_frame_info.reg_save_ar_pfs == current_frame_info.reg_fp + 2)
1993 current_frame_info.reg_save_b0 = current_frame_info.reg_fp;
1994 current_frame_info.reg_save_ar_pfs = current_frame_info.reg_fp + 1;
1995 current_frame_info.reg_fp = current_frame_info.reg_fp + 2;
1998 /* See if we need to store the predicate register block. */
1999 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2000 if (regs_ever_live[regno] && ! call_used_regs[regno])
2002 if (regno <= PR_REG (63))
2004 SET_HARD_REG_BIT (mask, PR_REG (0));
2005 current_frame_info.reg_save_pr = find_gr_spill (1);
2006 if (current_frame_info.reg_save_pr == 0)
2008 extra_spill_size += 8;
2012 /* ??? Mark them all as used so that register renaming and such
2013 are free to use them. */
2014 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2015 regs_ever_live[regno] = 1;
2018 /* If we're forced to use st8.spill, we're forced to save and restore
2019 ar.unat as well. The check for existing liveness allows inline asm
2020 to touch ar.unat. */
2021 if (spilled_gr_p || cfun->machine->n_varargs
2022 || regs_ever_live[AR_UNAT_REGNUM])
2024 regs_ever_live[AR_UNAT_REGNUM] = 1;
2025 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2026 current_frame_info.reg_save_ar_unat = find_gr_spill (spill_size == 0);
2027 if (current_frame_info.reg_save_ar_unat == 0)
2029 extra_spill_size += 8;
2034 if (regs_ever_live[AR_LC_REGNUM])
2036 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2037 current_frame_info.reg_save_ar_lc = find_gr_spill (spill_size == 0);
2038 if (current_frame_info.reg_save_ar_lc == 0)
2040 extra_spill_size += 8;
2045 /* If we have an odd number of words of pretend arguments written to
2046 the stack, then the FR save area will be unaligned. We round the
2047 size of this area up to keep things 16 byte aligned. */
2049 pretend_args_size = IA64_STACK_ALIGN (current_function_pretend_args_size);
2051 pretend_args_size = current_function_pretend_args_size;
2053 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2054 + current_function_outgoing_args_size);
2055 total_size = IA64_STACK_ALIGN (total_size);
2057 /* We always use the 16-byte scratch area provided by the caller, but
2058 if we are a leaf function, there's no one to which we need to provide
2060 if (current_function_is_leaf)
2061 total_size = MAX (0, total_size - 16);
2063 current_frame_info.total_size = total_size;
2064 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2065 current_frame_info.spill_size = spill_size;
2066 current_frame_info.extra_spill_size = extra_spill_size;
2067 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2068 current_frame_info.n_spilled = n_spilled;
2069 current_frame_info.initialized = reload_completed;
2072 /* Compute the initial difference between the specified pair of registers. */
2075 ia64_initial_elimination_offset (int from, int to)
2077 HOST_WIDE_INT offset;
2079 ia64_compute_frame_size (get_frame_size ());
2082 case FRAME_POINTER_REGNUM:
2083 if (to == HARD_FRAME_POINTER_REGNUM)
2085 if (current_function_is_leaf)
2086 offset = -current_frame_info.total_size;
2088 offset = -(current_frame_info.total_size
2089 - current_function_outgoing_args_size - 16);
2091 else if (to == STACK_POINTER_REGNUM)
2093 if (current_function_is_leaf)
2096 offset = 16 + current_function_outgoing_args_size;
2102 case ARG_POINTER_REGNUM:
2103 /* Arguments start above the 16 byte save area, unless stdarg
2104 in which case we store through the 16 byte save area. */
2105 if (to == HARD_FRAME_POINTER_REGNUM)
2106 offset = 16 - current_function_pretend_args_size;
2107 else if (to == STACK_POINTER_REGNUM)
2108 offset = (current_frame_info.total_size
2109 + 16 - current_function_pretend_args_size);
2121 /* If there are more than a trivial number of register spills, we use
2122 two interleaved iterators so that we can get two memory references
2125 In order to simplify things in the prologue and epilogue expanders,
2126 we use helper functions to fix up the memory references after the
2127 fact with the appropriate offsets to a POST_MODIFY memory mode.
2128 The following data structure tracks the state of the two iterators
2129 while insns are being emitted. */
2131 struct spill_fill_data
2133 rtx init_after; /* point at which to emit initializations */
2134 rtx init_reg[2]; /* initial base register */
2135 rtx iter_reg[2]; /* the iterator registers */
2136 rtx *prev_addr[2]; /* address of last memory use */
2137 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2138 HOST_WIDE_INT prev_off[2]; /* last offset */
2139 int n_iter; /* number of iterators in use */
2140 int next_iter; /* next iterator to use */
2141 unsigned int save_gr_used_mask;
2144 static struct spill_fill_data spill_fill_data;
2147 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2151 spill_fill_data.init_after = get_last_insn ();
2152 spill_fill_data.init_reg[0] = init_reg;
2153 spill_fill_data.init_reg[1] = init_reg;
2154 spill_fill_data.prev_addr[0] = NULL;
2155 spill_fill_data.prev_addr[1] = NULL;
2156 spill_fill_data.prev_insn[0] = NULL;
2157 spill_fill_data.prev_insn[1] = NULL;
2158 spill_fill_data.prev_off[0] = cfa_off;
2159 spill_fill_data.prev_off[1] = cfa_off;
2160 spill_fill_data.next_iter = 0;
2161 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2163 spill_fill_data.n_iter = 1 + (n_spills > 2);
2164 for (i = 0; i < spill_fill_data.n_iter; ++i)
2166 int regno = next_scratch_gr_reg ();
2167 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2168 current_frame_info.gr_used_mask |= 1 << regno;
2173 finish_spill_pointers (void)
2175 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
2179 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
2181 int iter = spill_fill_data.next_iter;
2182 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
2183 rtx disp_rtx = GEN_INT (disp);
2186 if (spill_fill_data.prev_addr[iter])
2188 if (CONST_OK_FOR_N (disp))
2190 *spill_fill_data.prev_addr[iter]
2191 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2192 gen_rtx_PLUS (DImode,
2193 spill_fill_data.iter_reg[iter],
2195 REG_NOTES (spill_fill_data.prev_insn[iter])
2196 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
2197 REG_NOTES (spill_fill_data.prev_insn[iter]));
2201 /* ??? Could use register post_modify for loads. */
2202 if (! CONST_OK_FOR_I (disp))
2204 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2205 emit_move_insn (tmp, disp_rtx);
2208 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2209 spill_fill_data.iter_reg[iter], disp_rtx));
2212 /* Micro-optimization: if we've created a frame pointer, it's at
2213 CFA 0, which may allow the real iterator to be initialized lower,
2214 slightly increasing parallelism. Also, if there are few saves
2215 it may eliminate the iterator entirely. */
2217 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
2218 && frame_pointer_needed)
2220 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
2221 set_mem_alias_set (mem, get_varargs_alias_set ());
2229 seq = gen_movdi (spill_fill_data.iter_reg[iter],
2230 spill_fill_data.init_reg[iter]);
2235 if (! CONST_OK_FOR_I (disp))
2237 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2238 emit_move_insn (tmp, disp_rtx);
2242 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2243 spill_fill_data.init_reg[iter],
2250 /* Careful for being the first insn in a sequence. */
2251 if (spill_fill_data.init_after)
2252 insn = emit_insn_after (seq, spill_fill_data.init_after);
2255 rtx first = get_insns ();
2257 insn = emit_insn_before (seq, first);
2259 insn = emit_insn (seq);
2261 spill_fill_data.init_after = insn;
2263 /* If DISP is 0, we may or may not have a further adjustment
2264 afterward. If we do, then the load/store insn may be modified
2265 to be a post-modify. If we don't, then this copy may be
2266 eliminated by copyprop_hardreg_forward, which makes this
2267 insn garbage, which runs afoul of the sanity check in
2268 propagate_one_insn. So mark this insn as legal to delete. */
2270 REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx,
2274 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
2276 /* ??? Not all of the spills are for varargs, but some of them are.
2277 The rest of the spills belong in an alias set of their own. But
2278 it doesn't actually hurt to include them here. */
2279 set_mem_alias_set (mem, get_varargs_alias_set ());
2281 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
2282 spill_fill_data.prev_off[iter] = cfa_off;
2284 if (++iter >= spill_fill_data.n_iter)
2286 spill_fill_data.next_iter = iter;
2292 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
2295 int iter = spill_fill_data.next_iter;
2298 mem = spill_restore_mem (reg, cfa_off);
2299 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
2300 spill_fill_data.prev_insn[iter] = insn;
2307 RTX_FRAME_RELATED_P (insn) = 1;
2309 /* Don't even pretend that the unwind code can intuit its way
2310 through a pair of interleaved post_modify iterators. Just
2311 provide the correct answer. */
2313 if (frame_pointer_needed)
2315 base = hard_frame_pointer_rtx;
2320 base = stack_pointer_rtx;
2321 off = current_frame_info.total_size - cfa_off;
2325 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2326 gen_rtx_SET (VOIDmode,
2327 gen_rtx_MEM (GET_MODE (reg),
2328 plus_constant (base, off)),
2335 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
2337 int iter = spill_fill_data.next_iter;
2340 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
2341 GEN_INT (cfa_off)));
2342 spill_fill_data.prev_insn[iter] = insn;
2345 /* Wrapper functions that discards the CONST_INT spill offset. These
2346 exist so that we can give gr_spill/gr_fill the offset they need and
2347 use a consistent function interface. */
2350 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2352 return gen_movdi (dest, src);
2356 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2358 return gen_fr_spill (dest, src);
2362 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2364 return gen_fr_restore (dest, src);
2367 /* Called after register allocation to add any instructions needed for the
2368 prologue. Using a prologue insn is favored compared to putting all of the
2369 instructions in output_function_prologue(), since it allows the scheduler
2370 to intermix instructions with the saves of the caller saved registers. In
2371 some cases, it might be necessary to emit a barrier instruction as the last
2372 insn to prevent such scheduling.
2374 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2375 so that the debug info generation code can handle them properly.
2377 The register save area is layed out like so:
2379 [ varargs spill area ]
2380 [ fr register spill area ]
2381 [ br register spill area ]
2382 [ ar register spill area ]
2383 [ pr register spill area ]
2384 [ gr register spill area ] */
2386 /* ??? Get inefficient code when the frame size is larger than can fit in an
2387 adds instruction. */
2390 ia64_expand_prologue (void)
2392 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2393 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2396 ia64_compute_frame_size (get_frame_size ());
2397 last_scratch_gr_reg = 15;
2399 /* If there is no epilogue, then we don't need some prologue insns.
2400 We need to avoid emitting the dead prologue insns, because flow
2401 will complain about them. */
2407 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
2408 if ((e->flags & EDGE_FAKE) == 0
2409 && (e->flags & EDGE_FALLTHRU) != 0)
2411 epilogue_p = (e != NULL);
2416 /* Set the local, input, and output register names. We need to do this
2417 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2418 half. If we use in/loc/out register names, then we get assembler errors
2419 in crtn.S because there is no alloc insn or regstk directive in there. */
2420 if (! TARGET_REG_NAMES)
2422 int inputs = current_frame_info.n_input_regs;
2423 int locals = current_frame_info.n_local_regs;
2424 int outputs = current_frame_info.n_output_regs;
2426 for (i = 0; i < inputs; i++)
2427 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2428 for (i = 0; i < locals; i++)
2429 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2430 for (i = 0; i < outputs; i++)
2431 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2434 /* Set the frame pointer register name. The regnum is logically loc79,
2435 but of course we'll not have allocated that many locals. Rather than
2436 worrying about renumbering the existing rtxs, we adjust the name. */
2437 /* ??? This code means that we can never use one local register when
2438 there is a frame pointer. loc79 gets wasted in this case, as it is
2439 renamed to a register that will never be used. See also the try_locals
2440 code in find_gr_spill. */
2441 if (current_frame_info.reg_fp)
2443 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2444 reg_names[HARD_FRAME_POINTER_REGNUM]
2445 = reg_names[current_frame_info.reg_fp];
2446 reg_names[current_frame_info.reg_fp] = tmp;
2449 /* We don't need an alloc instruction if we've used no outputs or locals. */
2450 if (current_frame_info.n_local_regs == 0
2451 && current_frame_info.n_output_regs == 0
2452 && current_frame_info.n_input_regs <= current_function_args_info.int_regs
2453 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2455 /* If there is no alloc, but there are input registers used, then we
2456 need a .regstk directive. */
2457 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
2458 ar_pfs_save_reg = NULL_RTX;
2462 current_frame_info.need_regstk = 0;
2464 if (current_frame_info.reg_save_ar_pfs)
2465 regno = current_frame_info.reg_save_ar_pfs;
2467 regno = next_scratch_gr_reg ();
2468 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
2470 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
2471 GEN_INT (current_frame_info.n_input_regs),
2472 GEN_INT (current_frame_info.n_local_regs),
2473 GEN_INT (current_frame_info.n_output_regs),
2474 GEN_INT (current_frame_info.n_rotate_regs)));
2475 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_pfs != 0);
2478 /* Set up frame pointer, stack pointer, and spill iterators. */
2480 n_varargs = cfun->machine->n_varargs;
2481 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
2482 stack_pointer_rtx, 0);
2484 if (frame_pointer_needed)
2486 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
2487 RTX_FRAME_RELATED_P (insn) = 1;
2490 if (current_frame_info.total_size != 0)
2492 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
2495 if (CONST_OK_FOR_I (- current_frame_info.total_size))
2496 offset = frame_size_rtx;
2499 regno = next_scratch_gr_reg ();
2500 offset = gen_rtx_REG (DImode, regno);
2501 emit_move_insn (offset, frame_size_rtx);
2504 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
2505 stack_pointer_rtx, offset));
2507 if (! frame_pointer_needed)
2509 RTX_FRAME_RELATED_P (insn) = 1;
2510 if (GET_CODE (offset) != CONST_INT)
2513 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2514 gen_rtx_SET (VOIDmode,
2516 gen_rtx_PLUS (DImode,
2523 /* ??? At this point we must generate a magic insn that appears to
2524 modify the stack pointer, the frame pointer, and all spill
2525 iterators. This would allow the most scheduling freedom. For
2526 now, just hard stop. */
2527 emit_insn (gen_blockage ());
2530 /* Must copy out ar.unat before doing any integer spills. */
2531 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2533 if (current_frame_info.reg_save_ar_unat)
2535 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2538 alt_regno = next_scratch_gr_reg ();
2539 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2540 current_frame_info.gr_used_mask |= 1 << alt_regno;
2543 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2544 insn = emit_move_insn (ar_unat_save_reg, reg);
2545 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_unat != 0);
2547 /* Even if we're not going to generate an epilogue, we still
2548 need to save the register so that EH works. */
2549 if (! epilogue_p && current_frame_info.reg_save_ar_unat)
2550 emit_insn (gen_prologue_use (ar_unat_save_reg));
2553 ar_unat_save_reg = NULL_RTX;
2555 /* Spill all varargs registers. Do this before spilling any GR registers,
2556 since we want the UNAT bits for the GR registers to override the UNAT
2557 bits from varargs, which we don't care about. */
2560 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
2562 reg = gen_rtx_REG (DImode, regno);
2563 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
2566 /* Locate the bottom of the register save area. */
2567 cfa_off = (current_frame_info.spill_cfa_off
2568 + current_frame_info.spill_size
2569 + current_frame_info.extra_spill_size);
2571 /* Save the predicate register block either in a register or in memory. */
2572 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2574 reg = gen_rtx_REG (DImode, PR_REG (0));
2575 if (current_frame_info.reg_save_pr != 0)
2577 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2578 insn = emit_move_insn (alt_reg, reg);
2580 /* ??? Denote pr spill/fill by a DImode move that modifies all
2581 64 hard registers. */
2582 RTX_FRAME_RELATED_P (insn) = 1;
2584 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2585 gen_rtx_SET (VOIDmode, alt_reg, reg),
2588 /* Even if we're not going to generate an epilogue, we still
2589 need to save the register so that EH works. */
2591 emit_insn (gen_prologue_use (alt_reg));
2595 alt_regno = next_scratch_gr_reg ();
2596 alt_reg = gen_rtx_REG (DImode, alt_regno);
2597 insn = emit_move_insn (alt_reg, reg);
2598 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2603 /* Handle AR regs in numerical order. All of them get special handling. */
2604 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
2605 && current_frame_info.reg_save_ar_unat == 0)
2607 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2608 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
2612 /* The alloc insn already copied ar.pfs into a general register. The
2613 only thing we have to do now is copy that register to a stack slot
2614 if we'd not allocated a local register for the job. */
2615 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
2616 && current_frame_info.reg_save_ar_pfs == 0)
2618 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2619 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
2623 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2625 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2626 if (current_frame_info.reg_save_ar_lc != 0)
2628 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2629 insn = emit_move_insn (alt_reg, reg);
2630 RTX_FRAME_RELATED_P (insn) = 1;
2632 /* Even if we're not going to generate an epilogue, we still
2633 need to save the register so that EH works. */
2635 emit_insn (gen_prologue_use (alt_reg));
2639 alt_regno = next_scratch_gr_reg ();
2640 alt_reg = gen_rtx_REG (DImode, alt_regno);
2641 emit_move_insn (alt_reg, reg);
2642 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2647 if (current_frame_info.reg_save_gp)
2649 insn = emit_move_insn (gen_rtx_REG (DImode,
2650 current_frame_info.reg_save_gp),
2651 pic_offset_table_rtx);
2652 /* We don't know for sure yet if this is actually needed, since
2653 we've not split the PIC call patterns. If all of the calls
2654 are indirect, and not followed by any uses of the gp, then
2655 this save is dead. Allow it to go away. */
2657 = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, REG_NOTES (insn));
2660 /* We should now be at the base of the gr/br/fr spill area. */
2661 if (cfa_off != (current_frame_info.spill_cfa_off
2662 + current_frame_info.spill_size))
2665 /* Spill all general registers. */
2666 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
2667 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2669 reg = gen_rtx_REG (DImode, regno);
2670 do_spill (gen_gr_spill, reg, cfa_off, reg);
2674 /* Handle BR0 specially -- it may be getting stored permanently in
2675 some GR register. */
2676 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2678 reg = gen_rtx_REG (DImode, BR_REG (0));
2679 if (current_frame_info.reg_save_b0 != 0)
2681 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2682 insn = emit_move_insn (alt_reg, reg);
2683 RTX_FRAME_RELATED_P (insn) = 1;
2685 /* Even if we're not going to generate an epilogue, we still
2686 need to save the register so that EH works. */
2688 emit_insn (gen_prologue_use (alt_reg));
2692 alt_regno = next_scratch_gr_reg ();
2693 alt_reg = gen_rtx_REG (DImode, alt_regno);
2694 emit_move_insn (alt_reg, reg);
2695 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2700 /* Spill the rest of the BR registers. */
2701 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2702 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2704 alt_regno = next_scratch_gr_reg ();
2705 alt_reg = gen_rtx_REG (DImode, alt_regno);
2706 reg = gen_rtx_REG (DImode, regno);
2707 emit_move_insn (alt_reg, reg);
2708 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2712 /* Align the frame and spill all FR registers. */
2713 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2714 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2718 reg = gen_rtx_REG (XFmode, regno);
2719 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
2723 if (cfa_off != current_frame_info.spill_cfa_off)
2726 finish_spill_pointers ();
2729 /* Called after register allocation to add any instructions needed for the
2730 epilogue. Using an epilogue insn is favored compared to putting all of the
2731 instructions in output_function_prologue(), since it allows the scheduler
2732 to intermix instructions with the saves of the caller saved registers. In
2733 some cases, it might be necessary to emit a barrier instruction as the last
2734 insn to prevent such scheduling. */
2737 ia64_expand_epilogue (int sibcall_p)
2739 rtx insn, reg, alt_reg, ar_unat_save_reg;
2740 int regno, alt_regno, cfa_off;
2742 ia64_compute_frame_size (get_frame_size ());
2744 /* If there is a frame pointer, then we use it instead of the stack
2745 pointer, so that the stack pointer does not need to be valid when
2746 the epilogue starts. See EXIT_IGNORE_STACK. */
2747 if (frame_pointer_needed)
2748 setup_spill_pointers (current_frame_info.n_spilled,
2749 hard_frame_pointer_rtx, 0);
2751 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
2752 current_frame_info.total_size);
2754 if (current_frame_info.total_size != 0)
2756 /* ??? At this point we must generate a magic insn that appears to
2757 modify the spill iterators and the frame pointer. This would
2758 allow the most scheduling freedom. For now, just hard stop. */
2759 emit_insn (gen_blockage ());
2762 /* Locate the bottom of the register save area. */
2763 cfa_off = (current_frame_info.spill_cfa_off
2764 + current_frame_info.spill_size
2765 + current_frame_info.extra_spill_size);
2767 /* Restore the predicate registers. */
2768 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2770 if (current_frame_info.reg_save_pr != 0)
2771 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2774 alt_regno = next_scratch_gr_reg ();
2775 alt_reg = gen_rtx_REG (DImode, alt_regno);
2776 do_restore (gen_movdi_x, alt_reg, cfa_off);
2779 reg = gen_rtx_REG (DImode, PR_REG (0));
2780 emit_move_insn (reg, alt_reg);
2783 /* Restore the application registers. */
2785 /* Load the saved unat from the stack, but do not restore it until
2786 after the GRs have been restored. */
2787 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2789 if (current_frame_info.reg_save_ar_unat != 0)
2791 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2794 alt_regno = next_scratch_gr_reg ();
2795 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2796 current_frame_info.gr_used_mask |= 1 << alt_regno;
2797 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
2802 ar_unat_save_reg = NULL_RTX;
2804 if (current_frame_info.reg_save_ar_pfs != 0)
2806 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_pfs);
2807 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2808 emit_move_insn (reg, alt_reg);
2810 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2812 alt_regno = next_scratch_gr_reg ();
2813 alt_reg = gen_rtx_REG (DImode, alt_regno);
2814 do_restore (gen_movdi_x, alt_reg, cfa_off);
2816 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2817 emit_move_insn (reg, alt_reg);
2820 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2822 if (current_frame_info.reg_save_ar_lc != 0)
2823 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2826 alt_regno = next_scratch_gr_reg ();
2827 alt_reg = gen_rtx_REG (DImode, alt_regno);
2828 do_restore (gen_movdi_x, alt_reg, cfa_off);
2831 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2832 emit_move_insn (reg, alt_reg);
2835 /* We should now be at the base of the gr/br/fr spill area. */
2836 if (cfa_off != (current_frame_info.spill_cfa_off
2837 + current_frame_info.spill_size))
2840 /* The GP may be stored on the stack in the prologue, but it's
2841 never restored in the epilogue. Skip the stack slot. */
2842 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
2845 /* Restore all general registers. */
2846 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
2847 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2849 reg = gen_rtx_REG (DImode, regno);
2850 do_restore (gen_gr_restore, reg, cfa_off);
2854 /* Restore the branch registers. Handle B0 specially, as it may
2855 have gotten stored in some GR register. */
2856 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2858 if (current_frame_info.reg_save_b0 != 0)
2859 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2862 alt_regno = next_scratch_gr_reg ();
2863 alt_reg = gen_rtx_REG (DImode, alt_regno);
2864 do_restore (gen_movdi_x, alt_reg, cfa_off);
2867 reg = gen_rtx_REG (DImode, BR_REG (0));
2868 emit_move_insn (reg, alt_reg);
2871 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2872 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2874 alt_regno = next_scratch_gr_reg ();
2875 alt_reg = gen_rtx_REG (DImode, alt_regno);
2876 do_restore (gen_movdi_x, alt_reg, cfa_off);
2878 reg = gen_rtx_REG (DImode, regno);
2879 emit_move_insn (reg, alt_reg);
2882 /* Restore floating point registers. */
2883 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2884 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2888 reg = gen_rtx_REG (XFmode, regno);
2889 do_restore (gen_fr_restore_x, reg, cfa_off);
2893 /* Restore ar.unat for real. */
2894 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2896 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2897 emit_move_insn (reg, ar_unat_save_reg);
2900 if (cfa_off != current_frame_info.spill_cfa_off)
2903 finish_spill_pointers ();
2905 if (current_frame_info.total_size || cfun->machine->ia64_eh_epilogue_sp)
2907 /* ??? At this point we must generate a magic insn that appears to
2908 modify the spill iterators, the stack pointer, and the frame
2909 pointer. This would allow the most scheduling freedom. For now,
2911 emit_insn (gen_blockage ());
2914 if (cfun->machine->ia64_eh_epilogue_sp)
2915 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
2916 else if (frame_pointer_needed)
2918 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
2919 RTX_FRAME_RELATED_P (insn) = 1;
2921 else if (current_frame_info.total_size)
2923 rtx offset, frame_size_rtx;
2925 frame_size_rtx = GEN_INT (current_frame_info.total_size);
2926 if (CONST_OK_FOR_I (current_frame_info.total_size))
2927 offset = frame_size_rtx;
2930 regno = next_scratch_gr_reg ();
2931 offset = gen_rtx_REG (DImode, regno);
2932 emit_move_insn (offset, frame_size_rtx);
2935 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
2938 RTX_FRAME_RELATED_P (insn) = 1;
2939 if (GET_CODE (offset) != CONST_INT)
2942 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2943 gen_rtx_SET (VOIDmode,
2945 gen_rtx_PLUS (DImode,
2952 if (cfun->machine->ia64_eh_epilogue_bsp)
2953 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
2956 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
2959 int fp = GR_REG (2);
2960 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
2961 first available call clobbered register. If there was a frame_pointer
2962 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
2963 so we have to make sure we're using the string "r2" when emitting
2964 the register name for the assembler. */
2965 if (current_frame_info.reg_fp && current_frame_info.reg_fp == GR_REG (2))
2966 fp = HARD_FRAME_POINTER_REGNUM;
2968 /* We must emit an alloc to force the input registers to become output
2969 registers. Otherwise, if the callee tries to pass its parameters
2970 through to another call without an intervening alloc, then these
2972 /* ??? We don't need to preserve all input registers. We only need to
2973 preserve those input registers used as arguments to the sibling call.
2974 It is unclear how to compute that number here. */
2975 if (current_frame_info.n_input_regs != 0)
2977 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
2978 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
2979 const0_rtx, const0_rtx,
2980 n_inputs, const0_rtx));
2981 RTX_FRAME_RELATED_P (insn) = 1;
2986 /* Return 1 if br.ret can do all the work required to return from a
2990 ia64_direct_return (void)
2992 if (reload_completed && ! frame_pointer_needed)
2994 ia64_compute_frame_size (get_frame_size ());
2996 return (current_frame_info.total_size == 0
2997 && current_frame_info.n_spilled == 0
2998 && current_frame_info.reg_save_b0 == 0
2999 && current_frame_info.reg_save_pr == 0
3000 && current_frame_info.reg_save_ar_pfs == 0
3001 && current_frame_info.reg_save_ar_unat == 0
3002 && current_frame_info.reg_save_ar_lc == 0);
3007 /* Return the magic cookie that we use to hold the return address
3008 during early compilation. */
3011 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3015 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3018 /* Split this value after reload, now that we know where the return
3019 address is saved. */
3022 ia64_split_return_addr_rtx (rtx dest)
3026 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3028 if (current_frame_info.reg_save_b0 != 0)
3029 src = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3035 /* Compute offset from CFA for BR0. */
3036 /* ??? Must be kept in sync with ia64_expand_prologue. */
3037 off = (current_frame_info.spill_cfa_off
3038 + current_frame_info.spill_size);
3039 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3040 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3043 /* Convert CFA offset to a register based offset. */
3044 if (frame_pointer_needed)
3045 src = hard_frame_pointer_rtx;
3048 src = stack_pointer_rtx;
3049 off += current_frame_info.total_size;
3052 /* Load address into scratch register. */
3053 if (CONST_OK_FOR_I (off))
3054 emit_insn (gen_adddi3 (dest, src, GEN_INT (off)));
3057 emit_move_insn (dest, GEN_INT (off));
3058 emit_insn (gen_adddi3 (dest, src, dest));
3061 src = gen_rtx_MEM (Pmode, dest);
3065 src = gen_rtx_REG (DImode, BR_REG (0));
3067 emit_move_insn (dest, src);
3071 ia64_hard_regno_rename_ok (int from, int to)
3073 /* Don't clobber any of the registers we reserved for the prologue. */
3074 if (to == current_frame_info.reg_fp
3075 || to == current_frame_info.reg_save_b0
3076 || to == current_frame_info.reg_save_pr
3077 || to == current_frame_info.reg_save_ar_pfs
3078 || to == current_frame_info.reg_save_ar_unat
3079 || to == current_frame_info.reg_save_ar_lc)
3082 if (from == current_frame_info.reg_fp
3083 || from == current_frame_info.reg_save_b0
3084 || from == current_frame_info.reg_save_pr
3085 || from == current_frame_info.reg_save_ar_pfs
3086 || from == current_frame_info.reg_save_ar_unat
3087 || from == current_frame_info.reg_save_ar_lc)
3090 /* Don't use output registers outside the register frame. */
3091 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3094 /* Retain even/oddness on predicate register pairs. */
3095 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3096 return (from & 1) == (to & 1);
3101 /* Target hook for assembling integer objects. Handle word-sized
3102 aligned objects and detect the cases when @fptr is needed. */
3105 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3107 if (size == POINTER_SIZE / BITS_PER_UNIT
3108 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3109 && GET_CODE (x) == SYMBOL_REF
3110 && SYMBOL_REF_FUNCTION_P (x))
3112 static const char * const directive[2][2] = {
3113 /* 64-bit pointer */ /* 32-bit pointer */
3114 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
3115 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
3117 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
3118 output_addr_const (asm_out_file, x);
3119 fputs (")\n", asm_out_file);
3122 return default_assemble_integer (x, size, aligned_p);
3125 /* Emit the function prologue. */
3128 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3130 int mask, grsave, grsave_prev;
3132 if (current_frame_info.need_regstk)
3133 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
3134 current_frame_info.n_input_regs,
3135 current_frame_info.n_local_regs,
3136 current_frame_info.n_output_regs,
3137 current_frame_info.n_rotate_regs);
3139 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3142 /* Emit the .prologue directive. */
3145 grsave = grsave_prev = 0;
3146 if (current_frame_info.reg_save_b0 != 0)
3149 grsave = grsave_prev = current_frame_info.reg_save_b0;
3151 if (current_frame_info.reg_save_ar_pfs != 0
3152 && (grsave_prev == 0
3153 || current_frame_info.reg_save_ar_pfs == grsave_prev + 1))
3156 if (grsave_prev == 0)
3157 grsave = current_frame_info.reg_save_ar_pfs;
3158 grsave_prev = current_frame_info.reg_save_ar_pfs;
3160 if (current_frame_info.reg_fp != 0
3161 && (grsave_prev == 0
3162 || current_frame_info.reg_fp == grsave_prev + 1))
3165 if (grsave_prev == 0)
3166 grsave = HARD_FRAME_POINTER_REGNUM;
3167 grsave_prev = current_frame_info.reg_fp;
3169 if (current_frame_info.reg_save_pr != 0
3170 && (grsave_prev == 0
3171 || current_frame_info.reg_save_pr == grsave_prev + 1))
3174 if (grsave_prev == 0)
3175 grsave = current_frame_info.reg_save_pr;
3178 if (mask && TARGET_GNU_AS)
3179 fprintf (file, "\t.prologue %d, %d\n", mask,
3180 ia64_dbx_register_number (grsave));
3182 fputs ("\t.prologue\n", file);
3184 /* Emit a .spill directive, if necessary, to relocate the base of
3185 the register spill area. */
3186 if (current_frame_info.spill_cfa_off != -16)
3187 fprintf (file, "\t.spill %ld\n",
3188 (long) (current_frame_info.spill_cfa_off
3189 + current_frame_info.spill_size));
3192 /* Emit the .body directive at the scheduled end of the prologue. */
3195 ia64_output_function_end_prologue (FILE *file)
3197 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3200 fputs ("\t.body\n", file);
3203 /* Emit the function epilogue. */
3206 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
3207 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3211 if (current_frame_info.reg_fp)
3213 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3214 reg_names[HARD_FRAME_POINTER_REGNUM]
3215 = reg_names[current_frame_info.reg_fp];
3216 reg_names[current_frame_info.reg_fp] = tmp;
3218 if (! TARGET_REG_NAMES)
3220 for (i = 0; i < current_frame_info.n_input_regs; i++)
3221 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
3222 for (i = 0; i < current_frame_info.n_local_regs; i++)
3223 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
3224 for (i = 0; i < current_frame_info.n_output_regs; i++)
3225 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
3228 current_frame_info.initialized = 0;
3232 ia64_dbx_register_number (int regno)
3234 /* In ia64_expand_prologue we quite literally renamed the frame pointer
3235 from its home at loc79 to something inside the register frame. We
3236 must perform the same renumbering here for the debug info. */
3237 if (current_frame_info.reg_fp)
3239 if (regno == HARD_FRAME_POINTER_REGNUM)
3240 regno = current_frame_info.reg_fp;
3241 else if (regno == current_frame_info.reg_fp)
3242 regno = HARD_FRAME_POINTER_REGNUM;
3245 if (IN_REGNO_P (regno))
3246 return 32 + regno - IN_REG (0);
3247 else if (LOC_REGNO_P (regno))
3248 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
3249 else if (OUT_REGNO_P (regno))
3250 return (32 + current_frame_info.n_input_regs
3251 + current_frame_info.n_local_regs + regno - OUT_REG (0));
3257 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
3259 rtx addr_reg, eight = GEN_INT (8);
3261 /* The Intel assembler requires that the global __ia64_trampoline symbol
3262 be declared explicitly */
3265 static bool declared_ia64_trampoline = false;
3267 if (!declared_ia64_trampoline)
3269 declared_ia64_trampoline = true;
3270 (*targetm.asm_out.globalize_label) (asm_out_file,
3271 "__ia64_trampoline");
3275 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
3276 addr = convert_memory_address (Pmode, addr);
3277 fnaddr = convert_memory_address (Pmode, fnaddr);
3278 static_chain = convert_memory_address (Pmode, static_chain);
3280 /* Load up our iterator. */
3281 addr_reg = gen_reg_rtx (Pmode);
3282 emit_move_insn (addr_reg, addr);
3284 /* The first two words are the fake descriptor:
3285 __ia64_trampoline, ADDR+16. */
3286 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3287 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
3288 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3290 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3291 copy_to_reg (plus_constant (addr, 16)));
3292 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3294 /* The third word is the target descriptor. */
3295 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
3296 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3298 /* The fourth word is the static chain. */
3299 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
3302 /* Do any needed setup for a variadic function. CUM has not been updated
3303 for the last named argument which has type TYPE and mode MODE.
3305 We generate the actual spill instructions during prologue generation. */
3308 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3309 tree type, int * pretend_size,
3310 int second_time ATTRIBUTE_UNUSED)
3312 CUMULATIVE_ARGS next_cum = *cum;
3314 /* Skip the current argument. */
3315 ia64_function_arg_advance (&next_cum, mode, type, 1);
3317 if (next_cum.words < MAX_ARGUMENT_SLOTS)
3319 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
3320 *pretend_size = n * UNITS_PER_WORD;
3321 cfun->machine->n_varargs = n;
3325 /* Check whether TYPE is a homogeneous floating point aggregate. If
3326 it is, return the mode of the floating point type that appears
3327 in all leafs. If it is not, return VOIDmode.
3329 An aggregate is a homogeneous floating point aggregate is if all
3330 fields/elements in it have the same floating point type (e.g,
3331 SFmode). 128-bit quad-precision floats are excluded.
3333 Variable sized aggregates should never arrive here, since we should
3334 have already decided to pass them by reference. Top-level zero-sized
3335 aggregates are excluded because our parallels crash the middle-end. */
3337 static enum machine_mode
3338 hfa_element_mode (tree type, bool nested)
3340 enum machine_mode element_mode = VOIDmode;
3341 enum machine_mode mode;
3342 enum tree_code code = TREE_CODE (type);
3343 int know_element_mode = 0;
3346 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
3351 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
3352 case BOOLEAN_TYPE: case CHAR_TYPE: case POINTER_TYPE:
3353 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
3354 case FILE_TYPE: case LANG_TYPE: case FUNCTION_TYPE:
3357 /* Fortran complex types are supposed to be HFAs, so we need to handle
3358 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
3361 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
3362 && TYPE_MODE (type) != TCmode)
3363 return GET_MODE_INNER (TYPE_MODE (type));
3368 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
3369 mode if this is contained within an aggregate. */
3370 if (nested && TYPE_MODE (type) != TFmode)
3371 return TYPE_MODE (type);
3376 return hfa_element_mode (TREE_TYPE (type), 1);
3380 case QUAL_UNION_TYPE:
3381 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3383 if (TREE_CODE (t) != FIELD_DECL)
3386 mode = hfa_element_mode (TREE_TYPE (t), 1);
3387 if (know_element_mode)
3389 if (mode != element_mode)
3392 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3396 know_element_mode = 1;
3397 element_mode = mode;
3400 return element_mode;
3403 /* If we reach here, we probably have some front-end specific type
3404 that the backend doesn't know about. This can happen via the
3405 aggregate_value_p call in init_function_start. All we can do is
3406 ignore unknown tree types. */
3413 /* Return the number of words required to hold a quantity of TYPE and MODE
3414 when passed as an argument. */
3416 ia64_function_arg_words (tree type, enum machine_mode mode)
3420 if (mode == BLKmode)
3421 words = int_size_in_bytes (type);
3423 words = GET_MODE_SIZE (mode);
3425 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
3428 /* Return the number of registers that should be skipped so the current
3429 argument (described by TYPE and WORDS) will be properly aligned.
3431 Integer and float arguments larger than 8 bytes start at the next
3432 even boundary. Aggregates larger than 8 bytes start at the next
3433 even boundary if the aggregate has 16 byte alignment. Note that
3434 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
3435 but are still to be aligned in registers.
3437 ??? The ABI does not specify how to handle aggregates with
3438 alignment from 9 to 15 bytes, or greater than 16. We handle them
3439 all as if they had 16 byte alignment. Such aggregates can occur
3440 only if gcc extensions are used. */
3442 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
3444 if ((cum->words & 1) == 0)
3448 && TREE_CODE (type) != INTEGER_TYPE
3449 && TREE_CODE (type) != REAL_TYPE)
3450 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
3455 /* Return rtx for register where argument is passed, or zero if it is passed
3457 /* ??? 128-bit quad-precision floats are always passed in general
3461 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
3462 int named, int incoming)
3464 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
3465 int words = ia64_function_arg_words (type, mode);
3466 int offset = ia64_function_arg_offset (cum, type, words);
3467 enum machine_mode hfa_mode = VOIDmode;
3469 /* If all argument slots are used, then it must go on the stack. */
3470 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3473 /* Check for and handle homogeneous FP aggregates. */
3475 hfa_mode = hfa_element_mode (type, 0);
3477 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3478 and unprototyped hfas are passed specially. */
3479 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3483 int fp_regs = cum->fp_regs;
3484 int int_regs = cum->words + offset;
3485 int hfa_size = GET_MODE_SIZE (hfa_mode);
3489 /* If prototyped, pass it in FR regs then GR regs.
3490 If not prototyped, pass it in both FR and GR regs.
3492 If this is an SFmode aggregate, then it is possible to run out of
3493 FR regs while GR regs are still left. In that case, we pass the
3494 remaining part in the GR regs. */
3496 /* Fill the FP regs. We do this always. We stop if we reach the end
3497 of the argument, the last FP register, or the last argument slot. */
3499 byte_size = ((mode == BLKmode)
3500 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3501 args_byte_size = int_regs * UNITS_PER_WORD;
3503 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3504 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
3506 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3507 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
3511 args_byte_size += hfa_size;
3515 /* If no prototype, then the whole thing must go in GR regs. */
3516 if (! cum->prototype)
3518 /* If this is an SFmode aggregate, then we might have some left over
3519 that needs to go in GR regs. */
3520 else if (byte_size != offset)
3521 int_regs += offset / UNITS_PER_WORD;
3523 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
3525 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
3527 enum machine_mode gr_mode = DImode;
3528 unsigned int gr_size;
3530 /* If we have an odd 4 byte hunk because we ran out of FR regs,
3531 then this goes in a GR reg left adjusted/little endian, right
3532 adjusted/big endian. */
3533 /* ??? Currently this is handled wrong, because 4-byte hunks are
3534 always right adjusted/little endian. */
3537 /* If we have an even 4 byte hunk because the aggregate is a
3538 multiple of 4 bytes in size, then this goes in a GR reg right
3539 adjusted/little endian. */
3540 else if (byte_size - offset == 4)
3543 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3544 gen_rtx_REG (gr_mode, (basereg
3548 gr_size = GET_MODE_SIZE (gr_mode);
3550 if (gr_size == UNITS_PER_WORD
3551 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
3553 else if (gr_size > UNITS_PER_WORD)
3554 int_regs += gr_size / UNITS_PER_WORD;
3556 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3559 /* Integral and aggregates go in general registers. If we have run out of
3560 FR registers, then FP values must also go in general registers. This can
3561 happen when we have a SFmode HFA. */
3562 else if (mode == TFmode || mode == TCmode
3563 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
3565 int byte_size = ((mode == BLKmode)
3566 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3567 if (BYTES_BIG_ENDIAN
3568 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
3569 && byte_size < UNITS_PER_WORD
3572 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3573 gen_rtx_REG (DImode,
3574 (basereg + cum->words
3577 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
3580 return gen_rtx_REG (mode, basereg + cum->words + offset);
3584 /* If there is a prototype, then FP values go in a FR register when
3585 named, and in a GR register when unnamed. */
3586 else if (cum->prototype)
3589 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
3590 /* In big-endian mode, an anonymous SFmode value must be represented
3591 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
3592 the value into the high half of the general register. */
3593 else if (BYTES_BIG_ENDIAN && mode == SFmode)
3594 return gen_rtx_PARALLEL (mode,
3596 gen_rtx_EXPR_LIST (VOIDmode,
3597 gen_rtx_REG (DImode, basereg + cum->words + offset),
3600 return gen_rtx_REG (mode, basereg + cum->words + offset);
3602 /* If there is no prototype, then FP values go in both FR and GR
3606 /* See comment above. */
3607 enum machine_mode inner_mode =
3608 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
3610 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
3611 gen_rtx_REG (mode, (FR_ARG_FIRST
3614 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3615 gen_rtx_REG (inner_mode,
3616 (basereg + cum->words
3620 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
3624 /* Return number of bytes, at the beginning of the argument, that must be
3625 put in registers. 0 is the argument is entirely in registers or entirely
3629 ia64_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3630 tree type, bool named ATTRIBUTE_UNUSED)
3632 int words = ia64_function_arg_words (type, mode);
3633 int offset = ia64_function_arg_offset (cum, type, words);
3635 /* If all argument slots are used, then it must go on the stack. */
3636 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3639 /* It doesn't matter whether the argument goes in FR or GR regs. If
3640 it fits within the 8 argument slots, then it goes entirely in
3641 registers. If it extends past the last argument slot, then the rest
3642 goes on the stack. */
3644 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
3647 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
3650 /* Update CUM to point after this argument. This is patterned after
3651 ia64_function_arg. */
3654 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3655 tree type, int named)
3657 int words = ia64_function_arg_words (type, mode);
3658 int offset = ia64_function_arg_offset (cum, type, words);
3659 enum machine_mode hfa_mode = VOIDmode;
3661 /* If all arg slots are already full, then there is nothing to do. */
3662 if (cum->words >= MAX_ARGUMENT_SLOTS)
3665 cum->words += words + offset;
3667 /* Check for and handle homogeneous FP aggregates. */
3669 hfa_mode = hfa_element_mode (type, 0);
3671 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3672 and unprototyped hfas are passed specially. */
3673 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3675 int fp_regs = cum->fp_regs;
3676 /* This is the original value of cum->words + offset. */
3677 int int_regs = cum->words - words;
3678 int hfa_size = GET_MODE_SIZE (hfa_mode);
3682 /* If prototyped, pass it in FR regs then GR regs.
3683 If not prototyped, pass it in both FR and GR regs.
3685 If this is an SFmode aggregate, then it is possible to run out of
3686 FR regs while GR regs are still left. In that case, we pass the
3687 remaining part in the GR regs. */
3689 /* Fill the FP regs. We do this always. We stop if we reach the end
3690 of the argument, the last FP register, or the last argument slot. */
3692 byte_size = ((mode == BLKmode)
3693 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3694 args_byte_size = int_regs * UNITS_PER_WORD;
3696 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3697 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
3700 args_byte_size += hfa_size;
3704 cum->fp_regs = fp_regs;
3707 /* Integral and aggregates go in general registers. So do TFmode FP values.
3708 If we have run out of FR registers, then other FP values must also go in
3709 general registers. This can happen when we have a SFmode HFA. */
3710 else if (mode == TFmode || mode == TCmode
3711 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
3712 cum->int_regs = cum->words;
3714 /* If there is a prototype, then FP values go in a FR register when
3715 named, and in a GR register when unnamed. */
3716 else if (cum->prototype)
3719 cum->int_regs = cum->words;
3721 /* ??? Complex types should not reach here. */
3722 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3724 /* If there is no prototype, then FP values go in both FR and GR
3728 /* ??? Complex types should not reach here. */
3729 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3730 cum->int_regs = cum->words;
3734 /* Arguments with alignment larger than 8 bytes start at the next even
3735 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
3736 even though their normal alignment is 8 bytes. See ia64_function_arg. */
3739 ia64_function_arg_boundary (enum machine_mode mode, tree type)
3742 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
3743 return PARM_BOUNDARY * 2;
3747 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
3748 return PARM_BOUNDARY * 2;
3750 return PARM_BOUNDARY;
3753 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
3754 return PARM_BOUNDARY * 2;
3756 return PARM_BOUNDARY;
3759 /* Variable sized types are passed by reference. */
3760 /* ??? At present this is a GCC extension to the IA-64 ABI. */
3763 ia64_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3764 enum machine_mode mode ATTRIBUTE_UNUSED,
3765 tree type, bool named ATTRIBUTE_UNUSED)
3767 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
3770 /* True if it is OK to do sibling call optimization for the specified
3771 call expression EXP. DECL will be the called function, or NULL if
3772 this is an indirect call. */
3774 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3776 /* We can't perform a sibcall if the current function has the syscall_linkage
3778 if (lookup_attribute ("syscall_linkage",
3779 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
3782 /* We must always return with our current GP. This means we can
3783 only sibcall to functions defined in the current module. */
3784 return decl && (*targetm.binds_local_p) (decl);
3788 /* Implement va_arg. */
3791 ia64_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
3793 /* Variable sized types are passed by reference. */
3794 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
3796 tree ptrtype = build_pointer_type (type);
3797 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
3798 return build_va_arg_indirect_ref (addr);
3801 /* Aggregate arguments with alignment larger than 8 bytes start at
3802 the next even boundary. Integer and floating point arguments
3803 do so if they are larger than 8 bytes, whether or not they are
3804 also aligned larger than 8 bytes. */
3805 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
3806 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
3808 tree t = build (PLUS_EXPR, TREE_TYPE (valist), valist,
3809 build_int_cst (NULL_TREE, 2 * UNITS_PER_WORD - 1));
3810 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
3811 build_int_cst (NULL_TREE, -2 * UNITS_PER_WORD));
3812 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
3813 gimplify_and_add (t, pre_p);
3816 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3819 /* Return 1 if function return value returned in memory. Return 0 if it is
3823 ia64_return_in_memory (tree valtype, tree fntype ATTRIBUTE_UNUSED)
3825 enum machine_mode mode;
3826 enum machine_mode hfa_mode;
3827 HOST_WIDE_INT byte_size;
3829 mode = TYPE_MODE (valtype);
3830 byte_size = GET_MODE_SIZE (mode);
3831 if (mode == BLKmode)
3833 byte_size = int_size_in_bytes (valtype);
3838 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
3840 hfa_mode = hfa_element_mode (valtype, 0);
3841 if (hfa_mode != VOIDmode)
3843 int hfa_size = GET_MODE_SIZE (hfa_mode);
3845 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
3850 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
3856 /* Return rtx for register that holds the function return value. */
3859 ia64_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
3861 enum machine_mode mode;
3862 enum machine_mode hfa_mode;
3864 mode = TYPE_MODE (valtype);
3865 hfa_mode = hfa_element_mode (valtype, 0);
3867 if (hfa_mode != VOIDmode)
3875 hfa_size = GET_MODE_SIZE (hfa_mode);
3876 byte_size = ((mode == BLKmode)
3877 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
3879 for (i = 0; offset < byte_size; i++)
3881 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3882 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
3886 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3888 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
3889 return gen_rtx_REG (mode, FR_ARG_FIRST);
3892 bool need_parallel = false;
3894 /* In big-endian mode, we need to manage the layout of aggregates
3895 in the registers so that we get the bits properly aligned in
3896 the highpart of the registers. */
3897 if (BYTES_BIG_ENDIAN
3898 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
3899 need_parallel = true;
3901 /* Something like struct S { long double x; char a[0] } is not an
3902 HFA structure, and therefore doesn't go in fp registers. But
3903 the middle-end will give it XFmode anyway, and XFmode values
3904 don't normally fit in integer registers. So we need to smuggle
3905 the value inside a parallel. */
3906 else if (mode == XFmode || mode == XCmode)
3907 need_parallel = true;
3917 bytesize = int_size_in_bytes (valtype);
3918 /* An empty PARALLEL is invalid here, but the return value
3919 doesn't matter for empty structs. */
3921 return gen_rtx_REG (mode, GR_RET_FIRST);
3922 for (i = 0; offset < bytesize; i++)
3924 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3925 gen_rtx_REG (DImode,
3928 offset += UNITS_PER_WORD;
3930 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3933 return gen_rtx_REG (mode, GR_RET_FIRST);
3937 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
3938 We need to emit DTP-relative relocations. */
3941 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
3945 fputs ("\tdata8.ua\t@dtprel(", file);
3946 output_addr_const (file, x);
3950 /* Print a memory address as an operand to reference that memory location. */
3952 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
3953 also call this from ia64_print_operand for memory addresses. */
3956 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
3957 rtx address ATTRIBUTE_UNUSED)
3961 /* Print an operand to an assembler instruction.
3962 C Swap and print a comparison operator.
3963 D Print an FP comparison operator.
3964 E Print 32 - constant, for SImode shifts as extract.
3965 e Print 64 - constant, for DImode rotates.
3966 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
3967 a floating point register emitted normally.
3968 I Invert a predicate register by adding 1.
3969 J Select the proper predicate register for a condition.
3970 j Select the inverse predicate register for a condition.
3971 O Append .acq for volatile load.
3972 P Postincrement of a MEM.
3973 Q Append .rel for volatile store.
3974 S Shift amount for shladd instruction.
3975 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
3976 for Intel assembler.
3977 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
3978 for Intel assembler.
3979 r Print register name, or constant 0 as r0. HP compatibility for
3981 v Print vector constant value as an 8-byte integer value. */
3984 ia64_print_operand (FILE * file, rtx x, int code)
3991 /* Handled below. */
3996 enum rtx_code c = swap_condition (GET_CODE (x));
3997 fputs (GET_RTX_NAME (c), file);
4002 switch (GET_CODE (x))
4014 str = GET_RTX_NAME (GET_CODE (x));
4021 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
4025 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
4029 if (x == CONST0_RTX (GET_MODE (x)))
4030 str = reg_names [FR_REG (0)];
4031 else if (x == CONST1_RTX (GET_MODE (x)))
4032 str = reg_names [FR_REG (1)];
4033 else if (GET_CODE (x) == REG)
4034 str = reg_names [REGNO (x)];
4041 fputs (reg_names [REGNO (x) + 1], file);
4047 unsigned int regno = REGNO (XEXP (x, 0));
4048 if (GET_CODE (x) == EQ)
4052 fputs (reg_names [regno], file);
4057 if (MEM_VOLATILE_P (x))
4058 fputs(".acq", file);
4063 HOST_WIDE_INT value;
4065 switch (GET_CODE (XEXP (x, 0)))
4071 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
4072 if (GET_CODE (x) == CONST_INT)
4074 else if (GET_CODE (x) == REG)
4076 fprintf (file, ", %s", reg_names[REGNO (x)]);
4084 value = GET_MODE_SIZE (GET_MODE (x));
4088 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
4092 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
4097 if (MEM_VOLATILE_P (x))
4098 fputs(".rel", file);
4102 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4106 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4108 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
4114 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4116 const char *prefix = "0x";
4117 if (INTVAL (x) & 0x80000000)
4119 fprintf (file, "0xffffffff");
4122 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
4128 /* If this operand is the constant zero, write it as register zero.
4129 Any register, zero, or CONST_INT value is OK here. */
4130 if (GET_CODE (x) == REG)
4131 fputs (reg_names[REGNO (x)], file);
4132 else if (x == CONST0_RTX (GET_MODE (x)))
4134 else if (GET_CODE (x) == CONST_INT)
4135 output_addr_const (file, x);
4137 output_operand_lossage ("invalid %%r value");
4141 gcc_assert (GET_CODE (x) == CONST_VECTOR);
4142 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
4149 /* For conditional branches, returns or calls, substitute
4150 sptk, dptk, dpnt, or spnt for %s. */
4151 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
4154 int pred_val = INTVAL (XEXP (x, 0));
4156 /* Guess top and bottom 10% statically predicted. */
4157 if (pred_val < REG_BR_PROB_BASE / 50)
4159 else if (pred_val < REG_BR_PROB_BASE / 2)
4161 else if (pred_val < REG_BR_PROB_BASE / 100 * 98)
4166 else if (GET_CODE (current_output_insn) == CALL_INSN)
4171 fputs (which, file);
4176 x = current_insn_predicate;
4179 unsigned int regno = REGNO (XEXP (x, 0));
4180 if (GET_CODE (x) == EQ)
4182 fprintf (file, "(%s) ", reg_names [regno]);
4187 output_operand_lossage ("ia64_print_operand: unknown code");
4191 switch (GET_CODE (x))
4193 /* This happens for the spill/restore instructions. */
4198 /* ... fall through ... */
4201 fputs (reg_names [REGNO (x)], file);
4206 rtx addr = XEXP (x, 0);
4207 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
4208 addr = XEXP (addr, 0);
4209 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
4214 output_addr_const (file, x);
4221 /* Compute a (partial) cost for rtx X. Return true if the complete
4222 cost has been computed, and false if subexpressions should be
4223 scanned. In either case, *TOTAL contains the cost result. */
4224 /* ??? This is incomplete. */
4227 ia64_rtx_costs (rtx x, int code, int outer_code, int *total)
4235 *total = CONST_OK_FOR_J (INTVAL (x)) ? 0 : COSTS_N_INSNS (1);
4238 if (CONST_OK_FOR_I (INTVAL (x)))
4240 else if (CONST_OK_FOR_J (INTVAL (x)))
4243 *total = COSTS_N_INSNS (1);
4246 if (CONST_OK_FOR_K (INTVAL (x)) || CONST_OK_FOR_L (INTVAL (x)))
4249 *total = COSTS_N_INSNS (1);
4254 *total = COSTS_N_INSNS (1);
4260 *total = COSTS_N_INSNS (3);
4264 /* For multiplies wider than HImode, we have to go to the FPU,
4265 which normally involves copies. Plus there's the latency
4266 of the multiply itself, and the latency of the instructions to
4267 transfer integer regs to FP regs. */
4268 /* ??? Check for FP mode. */
4269 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
4270 *total = COSTS_N_INSNS (10);
4272 *total = COSTS_N_INSNS (2);
4280 *total = COSTS_N_INSNS (1);
4287 /* We make divide expensive, so that divide-by-constant will be
4288 optimized to a multiply. */
4289 *total = COSTS_N_INSNS (60);
4297 /* Calculate the cost of moving data from a register in class FROM to
4298 one in class TO, using MODE. */
4301 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
4304 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
4305 if (to == ADDL_REGS)
4307 if (from == ADDL_REGS)
4310 /* All costs are symmetric, so reduce cases by putting the
4311 lower number class as the destination. */
4314 enum reg_class tmp = to;
4315 to = from, from = tmp;
4318 /* Moving from FR<->GR in XFmode must be more expensive than 2,
4319 so that we get secondary memory reloads. Between FR_REGS,
4320 we have to make this at least as expensive as MEMORY_MOVE_COST
4321 to avoid spectacularly poor register class preferencing. */
4324 if (to != GR_REGS || from != GR_REGS)
4325 return MEMORY_MOVE_COST (mode, to, 0);
4333 /* Moving between PR registers takes two insns. */
4334 if (from == PR_REGS)
4336 /* Moving between PR and anything but GR is impossible. */
4337 if (from != GR_REGS)
4338 return MEMORY_MOVE_COST (mode, to, 0);
4342 /* Moving between BR and anything but GR is impossible. */
4343 if (from != GR_REGS && from != GR_AND_BR_REGS)
4344 return MEMORY_MOVE_COST (mode, to, 0);
4349 /* Moving between AR and anything but GR is impossible. */
4350 if (from != GR_REGS)
4351 return MEMORY_MOVE_COST (mode, to, 0);
4356 case GR_AND_FR_REGS:
4357 case GR_AND_BR_REGS:
4368 /* Implement PREFERRED_RELOAD_CLASS. Place additional restrictions on CLASS
4369 to use when copying X into that class. */
4372 ia64_preferred_reload_class (rtx x, enum reg_class class)
4377 /* Don't allow volatile mem reloads into floating point registers.
4378 This is defined to force reload to choose the r/m case instead
4379 of the f/f case when reloading (set (reg fX) (mem/v)). */
4380 if (MEM_P (x) && MEM_VOLATILE_P (x))
4383 /* Force all unrecognized constants into the constant pool. */
4401 /* This function returns the register class required for a secondary
4402 register when copying between one of the registers in CLASS, and X,
4403 using MODE. A return value of NO_REGS means that no secondary register
4407 ia64_secondary_reload_class (enum reg_class class,
4408 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
4412 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
4413 regno = true_regnum (x);
4420 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
4421 interaction. We end up with two pseudos with overlapping lifetimes
4422 both of which are equiv to the same constant, and both which need
4423 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
4424 changes depending on the path length, which means the qty_first_reg
4425 check in make_regs_eqv can give different answers at different times.
4426 At some point I'll probably need a reload_indi pattern to handle
4429 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
4430 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
4431 non-general registers for good measure. */
4432 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
4435 /* This is needed if a pseudo used as a call_operand gets spilled to a
4437 if (GET_CODE (x) == MEM)
4442 /* Need to go through general registers to get to other class regs. */
4443 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
4446 /* This can happen when a paradoxical subreg is an operand to the
4448 /* ??? This shouldn't be necessary after instruction scheduling is
4449 enabled, because paradoxical subregs are not accepted by
4450 register_operand when INSN_SCHEDULING is defined. Or alternatively,
4451 stop the paradoxical subreg stupidity in the *_operand functions
4453 if (GET_CODE (x) == MEM
4454 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
4455 || GET_MODE (x) == QImode))
4458 /* This can happen because of the ior/and/etc patterns that accept FP
4459 registers as operands. If the third operand is a constant, then it
4460 needs to be reloaded into a FP register. */
4461 if (GET_CODE (x) == CONST_INT)
4464 /* This can happen because of register elimination in a muldi3 insn.
4465 E.g. `26107 * (unsigned long)&u'. */
4466 if (GET_CODE (x) == PLUS)
4471 /* ??? This happens if we cse/gcse a BImode value across a call,
4472 and the function has a nonlocal goto. This is because global
4473 does not allocate call crossing pseudos to hard registers when
4474 current_function_has_nonlocal_goto is true. This is relatively
4475 common for C++ programs that use exceptions. To reproduce,
4476 return NO_REGS and compile libstdc++. */
4477 if (GET_CODE (x) == MEM)
4480 /* This can happen when we take a BImode subreg of a DImode value,
4481 and that DImode value winds up in some non-GR register. */
4482 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
4494 /* Emit text to declare externally defined variables and functions, because
4495 the Intel assembler does not support undefined externals. */
4498 ia64_asm_output_external (FILE *file, tree decl, const char *name)
4500 int save_referenced;
4502 /* GNU as does not need anything here, but the HP linker does need
4503 something for external functions. */
4507 || TREE_CODE (decl) != FUNCTION_DECL
4508 || strstr (name, "__builtin_") == name))
4511 /* ??? The Intel assembler creates a reference that needs to be satisfied by
4512 the linker when we do this, so we need to be careful not to do this for
4513 builtin functions which have no library equivalent. Unfortunately, we
4514 can't tell here whether or not a function will actually be called by
4515 expand_expr, so we pull in library functions even if we may not need
4517 if (! strcmp (name, "__builtin_next_arg")
4518 || ! strcmp (name, "alloca")
4519 || ! strcmp (name, "__builtin_constant_p")
4520 || ! strcmp (name, "__builtin_args_info"))
4524 ia64_hpux_add_extern_decl (decl);
4527 /* assemble_name will set TREE_SYMBOL_REFERENCED, so we must save and
4529 save_referenced = TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl));
4530 if (TREE_CODE (decl) == FUNCTION_DECL)
4531 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
4532 (*targetm.asm_out.globalize_label) (file, name);
4533 TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)) = save_referenced;
4537 /* Parse the -mfixed-range= option string. */
4540 fix_range (const char *const_str)
4543 char *str, *dash, *comma;
4545 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
4546 REG2 are either register names or register numbers. The effect
4547 of this option is to mark the registers in the range from REG1 to
4548 REG2 as ``fixed'' so they won't be used by the compiler. This is
4549 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
4551 i = strlen (const_str);
4552 str = (char *) alloca (i + 1);
4553 memcpy (str, const_str, i + 1);
4557 dash = strchr (str, '-');
4560 warning ("value of -mfixed-range must have form REG1-REG2");
4565 comma = strchr (dash + 1, ',');
4569 first = decode_reg_name (str);
4572 warning ("unknown register name: %s", str);
4576 last = decode_reg_name (dash + 1);
4579 warning ("unknown register name: %s", dash + 1);
4587 warning ("%s-%s is an empty range", str, dash + 1);
4591 for (i = first; i <= last; ++i)
4592 fixed_regs[i] = call_used_regs[i] = 1;
4602 static struct machine_function *
4603 ia64_init_machine_status (void)
4605 return ggc_alloc_cleared (sizeof (struct machine_function));
4608 /* Handle TARGET_OPTIONS switches. */
4611 ia64_override_options (void)
4615 const char *const name; /* processor name or nickname. */
4616 const enum processor_type processor;
4618 const processor_alias_table[] =
4620 {"itanium", PROCESSOR_ITANIUM},
4621 {"itanium1", PROCESSOR_ITANIUM},
4622 {"merced", PROCESSOR_ITANIUM},
4623 {"itanium2", PROCESSOR_ITANIUM2},
4624 {"mckinley", PROCESSOR_ITANIUM2},
4627 int const pta_size = ARRAY_SIZE (processor_alias_table);
4630 if (TARGET_AUTO_PIC)
4631 target_flags |= MASK_CONST_GP;
4633 if (TARGET_INLINE_FLOAT_DIV_LAT && TARGET_INLINE_FLOAT_DIV_THR)
4635 if ((target_flags_explicit & MASK_INLINE_FLOAT_DIV_LAT)
4636 && (target_flags_explicit & MASK_INLINE_FLOAT_DIV_THR))
4638 warning ("cannot optimize floating point division for both latency and throughput");
4639 target_flags &= ~MASK_INLINE_FLOAT_DIV_THR;
4643 if (target_flags_explicit & MASK_INLINE_FLOAT_DIV_THR)
4644 target_flags &= ~MASK_INLINE_FLOAT_DIV_LAT;
4646 target_flags &= ~MASK_INLINE_FLOAT_DIV_THR;
4650 if (TARGET_INLINE_INT_DIV_LAT && TARGET_INLINE_INT_DIV_THR)
4652 if ((target_flags_explicit & MASK_INLINE_INT_DIV_LAT)
4653 && (target_flags_explicit & MASK_INLINE_INT_DIV_THR))
4655 warning ("cannot optimize integer division for both latency and throughput");
4656 target_flags &= ~MASK_INLINE_INT_DIV_THR;
4660 if (target_flags_explicit & MASK_INLINE_INT_DIV_THR)
4661 target_flags &= ~MASK_INLINE_INT_DIV_LAT;
4663 target_flags &= ~MASK_INLINE_INT_DIV_THR;
4667 if (TARGET_INLINE_SQRT_LAT && TARGET_INLINE_SQRT_THR)
4669 if ((target_flags_explicit & MASK_INLINE_SQRT_LAT)
4670 && (target_flags_explicit & MASK_INLINE_SQRT_THR))
4672 warning ("cannot optimize square root for both latency and throughput");
4673 target_flags &= ~MASK_INLINE_SQRT_THR;
4677 if (target_flags_explicit & MASK_INLINE_SQRT_THR)
4678 target_flags &= ~MASK_INLINE_SQRT_LAT;
4680 target_flags &= ~MASK_INLINE_SQRT_THR;
4684 if (TARGET_INLINE_SQRT_LAT)
4686 warning ("not yet implemented: latency-optimized inline square root");
4687 target_flags &= ~MASK_INLINE_SQRT_LAT;
4690 if (ia64_fixed_range_string)
4691 fix_range (ia64_fixed_range_string);
4693 if (ia64_tls_size_string)
4696 unsigned long tmp = strtoul (ia64_tls_size_string, &end, 10);
4697 if (*end || (tmp != 14 && tmp != 22 && tmp != 64))
4698 error ("bad value (%s) for -mtls-size= switch", ia64_tls_size_string);
4700 ia64_tls_size = tmp;
4703 if (!ia64_tune_string)
4704 ia64_tune_string = "itanium2";
4706 for (i = 0; i < pta_size; i++)
4707 if (! strcmp (ia64_tune_string, processor_alias_table[i].name))
4709 ia64_tune = processor_alias_table[i].processor;
4714 error ("bad value (%s) for -tune= switch", ia64_tune_string);
4716 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
4717 flag_schedule_insns_after_reload = 0;
4719 /* Variable tracking should be run after all optimizations which change order
4720 of insns. It also needs a valid CFG. */
4721 ia64_flag_var_tracking = flag_var_tracking;
4722 flag_var_tracking = 0;
4724 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
4726 init_machine_status = ia64_init_machine_status;
4729 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
4730 static enum attr_type ia64_safe_type (rtx);
4732 static enum attr_itanium_class
4733 ia64_safe_itanium_class (rtx insn)
4735 if (recog_memoized (insn) >= 0)
4736 return get_attr_itanium_class (insn);
4738 return ITANIUM_CLASS_UNKNOWN;
4741 static enum attr_type
4742 ia64_safe_type (rtx insn)
4744 if (recog_memoized (insn) >= 0)
4745 return get_attr_type (insn);
4747 return TYPE_UNKNOWN;
4750 /* The following collection of routines emit instruction group stop bits as
4751 necessary to avoid dependencies. */
4753 /* Need to track some additional registers as far as serialization is
4754 concerned so we can properly handle br.call and br.ret. We could
4755 make these registers visible to gcc, but since these registers are
4756 never explicitly used in gcc generated code, it seems wasteful to
4757 do so (plus it would make the call and return patterns needlessly
4759 #define REG_RP (BR_REG (0))
4760 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
4761 /* This is used for volatile asms which may require a stop bit immediately
4762 before and after them. */
4763 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
4764 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
4765 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
4767 /* For each register, we keep track of how it has been written in the
4768 current instruction group.
4770 If a register is written unconditionally (no qualifying predicate),
4771 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
4773 If a register is written if its qualifying predicate P is true, we
4774 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
4775 may be written again by the complement of P (P^1) and when this happens,
4776 WRITE_COUNT gets set to 2.
4778 The result of this is that whenever an insn attempts to write a register
4779 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
4781 If a predicate register is written by a floating-point insn, we set
4782 WRITTEN_BY_FP to true.
4784 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
4785 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
4787 struct reg_write_state
4789 unsigned int write_count : 2;
4790 unsigned int first_pred : 16;
4791 unsigned int written_by_fp : 1;
4792 unsigned int written_by_and : 1;
4793 unsigned int written_by_or : 1;
4796 /* Cumulative info for the current instruction group. */
4797 struct reg_write_state rws_sum[NUM_REGS];
4798 /* Info for the current instruction. This gets copied to rws_sum after a
4799 stop bit is emitted. */
4800 struct reg_write_state rws_insn[NUM_REGS];
4802 /* Indicates whether this is the first instruction after a stop bit,
4803 in which case we don't need another stop bit. Without this, we hit
4804 the abort in ia64_variable_issue when scheduling an alloc. */
4805 static int first_instruction;
4807 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
4808 RTL for one instruction. */
4811 unsigned int is_write : 1; /* Is register being written? */
4812 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
4813 unsigned int is_branch : 1; /* Is register used as part of a branch? */
4814 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
4815 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
4816 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
4819 static void rws_update (struct reg_write_state *, int, struct reg_flags, int);
4820 static int rws_access_regno (int, struct reg_flags, int);
4821 static int rws_access_reg (rtx, struct reg_flags, int);
4822 static void update_set_flags (rtx, struct reg_flags *, int *, rtx *);
4823 static int set_src_needs_barrier (rtx, struct reg_flags, int, rtx);
4824 static int rtx_needs_barrier (rtx, struct reg_flags, int);
4825 static void init_insn_group_barriers (void);
4826 static int group_barrier_needed_p (rtx);
4827 static int safe_group_barrier_needed_p (rtx);
4829 /* Update *RWS for REGNO, which is being written by the current instruction,
4830 with predicate PRED, and associated register flags in FLAGS. */
4833 rws_update (struct reg_write_state *rws, int regno, struct reg_flags flags, int pred)
4836 rws[regno].write_count++;
4838 rws[regno].write_count = 2;
4839 rws[regno].written_by_fp |= flags.is_fp;
4840 /* ??? Not tracking and/or across differing predicates. */
4841 rws[regno].written_by_and = flags.is_and;
4842 rws[regno].written_by_or = flags.is_or;
4843 rws[regno].first_pred = pred;
4846 /* Handle an access to register REGNO of type FLAGS using predicate register
4847 PRED. Update rws_insn and rws_sum arrays. Return 1 if this access creates
4848 a dependency with an earlier instruction in the same group. */
4851 rws_access_regno (int regno, struct reg_flags flags, int pred)
4853 int need_barrier = 0;
4855 if (regno >= NUM_REGS)
4858 if (! PR_REGNO_P (regno))
4859 flags.is_and = flags.is_or = 0;
4865 /* One insn writes same reg multiple times? */
4866 if (rws_insn[regno].write_count > 0)
4869 /* Update info for current instruction. */
4870 rws_update (rws_insn, regno, flags, pred);
4871 write_count = rws_sum[regno].write_count;
4873 switch (write_count)
4876 /* The register has not been written yet. */
4877 rws_update (rws_sum, regno, flags, pred);
4881 /* The register has been written via a predicate. If this is
4882 not a complementary predicate, then we need a barrier. */
4883 /* ??? This assumes that P and P+1 are always complementary
4884 predicates for P even. */
4885 if (flags.is_and && rws_sum[regno].written_by_and)
4887 else if (flags.is_or && rws_sum[regno].written_by_or)
4889 else if ((rws_sum[regno].first_pred ^ 1) != pred)
4891 rws_update (rws_sum, regno, flags, pred);
4895 /* The register has been unconditionally written already. We
4897 if (flags.is_and && rws_sum[regno].written_by_and)
4899 else if (flags.is_or && rws_sum[regno].written_by_or)
4903 rws_sum[regno].written_by_and = flags.is_and;
4904 rws_sum[regno].written_by_or = flags.is_or;
4913 if (flags.is_branch)
4915 /* Branches have several RAW exceptions that allow to avoid
4918 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
4919 /* RAW dependencies on branch regs are permissible as long
4920 as the writer is a non-branch instruction. Since we
4921 never generate code that uses a branch register written
4922 by a branch instruction, handling this case is
4926 if (REGNO_REG_CLASS (regno) == PR_REGS
4927 && ! rws_sum[regno].written_by_fp)
4928 /* The predicates of a branch are available within the
4929 same insn group as long as the predicate was written by
4930 something other than a floating-point instruction. */
4934 if (flags.is_and && rws_sum[regno].written_by_and)
4936 if (flags.is_or && rws_sum[regno].written_by_or)
4939 switch (rws_sum[regno].write_count)
4942 /* The register has not been written yet. */
4946 /* The register has been written via a predicate. If this is
4947 not a complementary predicate, then we need a barrier. */
4948 /* ??? This assumes that P and P+1 are always complementary
4949 predicates for P even. */
4950 if ((rws_sum[regno].first_pred ^ 1) != pred)
4955 /* The register has been unconditionally written already. We
4965 return need_barrier;
4969 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
4971 int regno = REGNO (reg);
4972 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
4975 return rws_access_regno (regno, flags, pred);
4978 int need_barrier = 0;
4980 need_barrier |= rws_access_regno (regno + n, flags, pred);
4981 return need_barrier;
4985 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
4986 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
4989 update_set_flags (rtx x, struct reg_flags *pflags, int *ppred, rtx *pcond)
4991 rtx src = SET_SRC (x);
4995 switch (GET_CODE (src))
5001 if (SET_DEST (x) == pc_rtx)
5002 /* X is a conditional branch. */
5006 int is_complemented = 0;
5008 /* X is a conditional move. */
5009 rtx cond = XEXP (src, 0);
5010 if (GET_CODE (cond) == EQ)
5011 is_complemented = 1;
5012 cond = XEXP (cond, 0);
5013 if (GET_CODE (cond) != REG
5014 && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS)
5017 if (XEXP (src, 1) == SET_DEST (x)
5018 || XEXP (src, 2) == SET_DEST (x))
5020 /* X is a conditional move that conditionally writes the
5023 /* We need another complement in this case. */
5024 if (XEXP (src, 1) == SET_DEST (x))
5025 is_complemented = ! is_complemented;
5027 *ppred = REGNO (cond);
5028 if (is_complemented)
5032 /* ??? If this is a conditional write to the dest, then this
5033 instruction does not actually read one source. This probably
5034 doesn't matter, because that source is also the dest. */
5035 /* ??? Multiple writes to predicate registers are allowed
5036 if they are all AND type compares, or if they are all OR
5037 type compares. We do not generate such instructions
5040 /* ... fall through ... */
5043 if (COMPARISON_P (src)
5044 && GET_MODE_CLASS (GET_MODE (XEXP (src, 0))) == MODE_FLOAT)
5045 /* Set pflags->is_fp to 1 so that we know we're dealing
5046 with a floating point comparison when processing the
5047 destination of the SET. */
5050 /* Discover if this is a parallel comparison. We only handle
5051 and.orcm and or.andcm at present, since we must retain a
5052 strict inverse on the predicate pair. */
5053 else if (GET_CODE (src) == AND)
5055 else if (GET_CODE (src) == IOR)
5062 /* Subroutine of rtx_needs_barrier; this function determines whether the
5063 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
5064 are as in rtx_needs_barrier. COND is an rtx that holds the condition
5068 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred, rtx cond)
5070 int need_barrier = 0;
5072 rtx src = SET_SRC (x);
5074 if (GET_CODE (src) == CALL)
5075 /* We don't need to worry about the result registers that
5076 get written by subroutine call. */
5077 return rtx_needs_barrier (src, flags, pred);
5078 else if (SET_DEST (x) == pc_rtx)
5080 /* X is a conditional branch. */
5081 /* ??? This seems redundant, as the caller sets this bit for
5083 flags.is_branch = 1;
5084 return rtx_needs_barrier (src, flags, pred);
5087 need_barrier = rtx_needs_barrier (src, flags, pred);
5089 /* This instruction unconditionally uses a predicate register. */
5091 need_barrier |= rws_access_reg (cond, flags, 0);
5094 if (GET_CODE (dst) == ZERO_EXTRACT)
5096 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
5097 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
5098 dst = XEXP (dst, 0);
5100 return need_barrier;
5103 /* Handle an access to rtx X of type FLAGS using predicate register
5104 PRED. Return 1 if this access creates a dependency with an earlier
5105 instruction in the same group. */
5108 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
5111 int is_complemented = 0;
5112 int need_barrier = 0;
5113 const char *format_ptr;
5114 struct reg_flags new_flags;
5122 switch (GET_CODE (x))
5125 update_set_flags (x, &new_flags, &pred, &cond);
5126 need_barrier = set_src_needs_barrier (x, new_flags, pred, cond);
5127 if (GET_CODE (SET_SRC (x)) != CALL)
5129 new_flags.is_write = 1;
5130 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
5135 new_flags.is_write = 0;
5136 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5138 /* Avoid multiple register writes, in case this is a pattern with
5139 multiple CALL rtx. This avoids an abort in rws_access_reg. */
5140 if (! flags.is_sibcall && ! rws_insn[REG_AR_CFM].write_count)
5142 new_flags.is_write = 1;
5143 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
5144 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
5145 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5150 /* X is a predicated instruction. */
5152 cond = COND_EXEC_TEST (x);
5155 need_barrier = rtx_needs_barrier (cond, flags, 0);
5157 if (GET_CODE (cond) == EQ)
5158 is_complemented = 1;
5159 cond = XEXP (cond, 0);
5160 if (GET_CODE (cond) != REG
5161 && REGNO_REG_CLASS (REGNO (cond)) != PR_REGS)
5163 pred = REGNO (cond);
5164 if (is_complemented)
5167 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
5168 return need_barrier;
5172 /* Clobber & use are for earlier compiler-phases only. */
5177 /* We always emit stop bits for traditional asms. We emit stop bits
5178 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
5179 if (GET_CODE (x) != ASM_OPERANDS
5180 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
5182 /* Avoid writing the register multiple times if we have multiple
5183 asm outputs. This avoids an abort in rws_access_reg. */
5184 if (! rws_insn[REG_VOLATILE].write_count)
5186 new_flags.is_write = 1;
5187 rws_access_regno (REG_VOLATILE, new_flags, pred);
5192 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
5193 We cannot just fall through here since then we would be confused
5194 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
5195 traditional asms unlike their normal usage. */
5197 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
5198 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
5203 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5205 rtx pat = XVECEXP (x, 0, i);
5206 switch (GET_CODE (pat))
5209 update_set_flags (pat, &new_flags, &pred, &cond);
5210 need_barrier |= set_src_needs_barrier (pat, new_flags,
5217 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5228 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5230 rtx pat = XVECEXP (x, 0, i);
5231 if (GET_CODE (pat) == SET)
5233 if (GET_CODE (SET_SRC (pat)) != CALL)
5235 new_flags.is_write = 1;
5236 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
5240 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
5241 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5249 if (REGNO (x) == AR_UNAT_REGNUM)
5251 for (i = 0; i < 64; ++i)
5252 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
5255 need_barrier = rws_access_reg (x, flags, pred);
5259 /* Find the regs used in memory address computation. */
5260 new_flags.is_write = 0;
5261 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5264 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
5265 case SYMBOL_REF: case LABEL_REF: case CONST:
5268 /* Operators with side-effects. */
5269 case POST_INC: case POST_DEC:
5270 if (GET_CODE (XEXP (x, 0)) != REG)
5273 new_flags.is_write = 0;
5274 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5275 new_flags.is_write = 1;
5276 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5280 if (GET_CODE (XEXP (x, 0)) != REG)
5283 new_flags.is_write = 0;
5284 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5285 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5286 new_flags.is_write = 1;
5287 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5290 /* Handle common unary and binary ops for efficiency. */
5291 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
5292 case MOD: case UDIV: case UMOD: case AND: case IOR:
5293 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
5294 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
5295 case NE: case EQ: case GE: case GT: case LE:
5296 case LT: case GEU: case GTU: case LEU: case LTU:
5297 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5298 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5301 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
5302 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
5303 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
5304 case SQRT: case FFS: case POPCOUNT:
5305 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5309 /* VEC_SELECT's second argument is a PARALLEL with integers that
5310 describe the elements selected. On ia64, those integers are
5311 always constants. Avoid walking the PARALLEL so that we don't
5312 get confused with "normal" parallels and abort. */
5313 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5317 switch (XINT (x, 1))
5319 case UNSPEC_LTOFF_DTPMOD:
5320 case UNSPEC_LTOFF_DTPREL:
5322 case UNSPEC_LTOFF_TPREL:
5324 case UNSPEC_PRED_REL_MUTEX:
5325 case UNSPEC_PIC_CALL:
5327 case UNSPEC_FETCHADD_ACQ:
5328 case UNSPEC_BSP_VALUE:
5329 case UNSPEC_FLUSHRS:
5330 case UNSPEC_BUNDLE_SELECTOR:
5333 case UNSPEC_GR_SPILL:
5334 case UNSPEC_GR_RESTORE:
5336 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
5337 HOST_WIDE_INT bit = (offset >> 3) & 63;
5339 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5340 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
5341 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
5346 case UNSPEC_FR_SPILL:
5347 case UNSPEC_FR_RESTORE:
5348 case UNSPEC_GETF_EXP:
5349 case UNSPEC_SETF_EXP:
5351 case UNSPEC_FR_SQRT_RECIP_APPROX:
5352 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5355 case UNSPEC_FR_RECIP_APPROX:
5357 case UNSPEC_COPYSIGN:
5358 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5359 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5362 case UNSPEC_CMPXCHG_ACQ:
5363 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5364 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
5372 case UNSPEC_VOLATILE:
5373 switch (XINT (x, 1))
5376 /* Alloc must always be the first instruction of a group.
5377 We force this by always returning true. */
5378 /* ??? We might get better scheduling if we explicitly check for
5379 input/local/output register dependencies, and modify the
5380 scheduler so that alloc is always reordered to the start of
5381 the current group. We could then eliminate all of the
5382 first_instruction code. */
5383 rws_access_regno (AR_PFS_REGNUM, flags, pred);
5385 new_flags.is_write = 1;
5386 rws_access_regno (REG_AR_CFM, new_flags, pred);
5389 case UNSPECV_SET_BSP:
5393 case UNSPECV_BLOCKAGE:
5394 case UNSPECV_INSN_GROUP_BARRIER:
5396 case UNSPECV_PSAC_ALL:
5397 case UNSPECV_PSAC_NORMAL:
5406 new_flags.is_write = 0;
5407 need_barrier = rws_access_regno (REG_RP, flags, pred);
5408 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
5410 new_flags.is_write = 1;
5411 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5412 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5416 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
5417 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5418 switch (format_ptr[i])
5420 case '0': /* unused field */
5421 case 'i': /* integer */
5422 case 'n': /* note */
5423 case 'w': /* wide integer */
5424 case 's': /* pointer to string */
5425 case 'S': /* optional pointer to string */
5429 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
5434 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
5435 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
5444 return need_barrier;
5447 /* Clear out the state for group_barrier_needed_p at the start of a
5448 sequence of insns. */
5451 init_insn_group_barriers (void)
5453 memset (rws_sum, 0, sizeof (rws_sum));
5454 first_instruction = 1;
5457 /* Given the current state, recorded by previous calls to this function,
5458 determine whether a group barrier (a stop bit) is necessary before INSN.
5459 Return nonzero if so. */
5462 group_barrier_needed_p (rtx insn)
5465 int need_barrier = 0;
5466 struct reg_flags flags;
5468 memset (&flags, 0, sizeof (flags));
5469 switch (GET_CODE (insn))
5475 /* A barrier doesn't imply an instruction group boundary. */
5479 memset (rws_insn, 0, sizeof (rws_insn));
5483 flags.is_branch = 1;
5484 flags.is_sibcall = SIBLING_CALL_P (insn);
5485 memset (rws_insn, 0, sizeof (rws_insn));
5487 /* Don't bundle a call following another call. */
5488 if ((pat = prev_active_insn (insn))
5489 && GET_CODE (pat) == CALL_INSN)
5495 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
5499 flags.is_branch = 1;
5501 /* Don't bundle a jump following a call. */
5502 if ((pat = prev_active_insn (insn))
5503 && GET_CODE (pat) == CALL_INSN)
5511 if (GET_CODE (PATTERN (insn)) == USE
5512 || GET_CODE (PATTERN (insn)) == CLOBBER)
5513 /* Don't care about USE and CLOBBER "insns"---those are used to
5514 indicate to the optimizer that it shouldn't get rid of
5515 certain operations. */
5518 pat = PATTERN (insn);
5520 /* Ug. Hack hacks hacked elsewhere. */
5521 switch (recog_memoized (insn))
5523 /* We play dependency tricks with the epilogue in order
5524 to get proper schedules. Undo this for dv analysis. */
5525 case CODE_FOR_epilogue_deallocate_stack:
5526 case CODE_FOR_prologue_allocate_stack:
5527 pat = XVECEXP (pat, 0, 0);
5530 /* The pattern we use for br.cloop confuses the code above.
5531 The second element of the vector is representative. */
5532 case CODE_FOR_doloop_end_internal:
5533 pat = XVECEXP (pat, 0, 1);
5536 /* Doesn't generate code. */
5537 case CODE_FOR_pred_rel_mutex:
5538 case CODE_FOR_prologue_use:
5545 memset (rws_insn, 0, sizeof (rws_insn));
5546 need_barrier = rtx_needs_barrier (pat, flags, 0);
5548 /* Check to see if the previous instruction was a volatile
5551 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
5558 if (first_instruction && INSN_P (insn)
5559 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
5560 && GET_CODE (PATTERN (insn)) != USE
5561 && GET_CODE (PATTERN (insn)) != CLOBBER)
5564 first_instruction = 0;
5567 return need_barrier;
5570 /* Like group_barrier_needed_p, but do not clobber the current state. */
5573 safe_group_barrier_needed_p (rtx insn)
5575 struct reg_write_state rws_saved[NUM_REGS];
5576 int saved_first_instruction;
5579 memcpy (rws_saved, rws_sum, NUM_REGS * sizeof *rws_saved);
5580 saved_first_instruction = first_instruction;
5582 t = group_barrier_needed_p (insn);
5584 memcpy (rws_sum, rws_saved, NUM_REGS * sizeof *rws_saved);
5585 first_instruction = saved_first_instruction;
5590 /* Scan the current function and insert stop bits as necessary to
5591 eliminate dependencies. This function assumes that a final
5592 instruction scheduling pass has been run which has already
5593 inserted most of the necessary stop bits. This function only
5594 inserts new ones at basic block boundaries, since these are
5595 invisible to the scheduler. */
5598 emit_insn_group_barriers (FILE *dump)
5602 int insns_since_last_label = 0;
5604 init_insn_group_barriers ();
5606 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5608 if (GET_CODE (insn) == CODE_LABEL)
5610 if (insns_since_last_label)
5612 insns_since_last_label = 0;
5614 else if (GET_CODE (insn) == NOTE
5615 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
5617 if (insns_since_last_label)
5619 insns_since_last_label = 0;
5621 else if (GET_CODE (insn) == INSN
5622 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
5623 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
5625 init_insn_group_barriers ();
5628 else if (INSN_P (insn))
5630 insns_since_last_label = 1;
5632 if (group_barrier_needed_p (insn))
5637 fprintf (dump, "Emitting stop before label %d\n",
5638 INSN_UID (last_label));
5639 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
5642 init_insn_group_barriers ();
5650 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
5651 This function has to emit all necessary group barriers. */
5654 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
5658 init_insn_group_barriers ();
5660 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5662 if (GET_CODE (insn) == BARRIER)
5664 rtx last = prev_active_insn (insn);
5668 if (GET_CODE (last) == JUMP_INSN
5669 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
5670 last = prev_active_insn (last);
5671 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
5672 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
5674 init_insn_group_barriers ();
5676 else if (INSN_P (insn))
5678 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
5679 init_insn_group_barriers ();
5680 else if (group_barrier_needed_p (insn))
5682 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
5683 init_insn_group_barriers ();
5684 group_barrier_needed_p (insn);
5692 /* Instruction scheduling support. */
5694 #define NR_BUNDLES 10
5696 /* A list of names of all available bundles. */
5698 static const char *bundle_name [NR_BUNDLES] =
5704 #if NR_BUNDLES == 10
5714 /* Nonzero if we should insert stop bits into the schedule. */
5716 int ia64_final_schedule = 0;
5718 /* Codes of the corresponding queried units: */
5720 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
5721 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
5723 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
5724 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
5726 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
5728 /* The following variable value is an insn group barrier. */
5730 static rtx dfa_stop_insn;
5732 /* The following variable value is the last issued insn. */
5734 static rtx last_scheduled_insn;
5736 /* The following variable value is size of the DFA state. */
5738 static size_t dfa_state_size;
5740 /* The following variable value is pointer to a DFA state used as
5741 temporary variable. */
5743 static state_t temp_dfa_state = NULL;
5745 /* The following variable value is DFA state after issuing the last
5748 static state_t prev_cycle_state = NULL;
5750 /* The following array element values are TRUE if the corresponding
5751 insn requires to add stop bits before it. */
5753 static char *stops_p;
5755 /* The following variable is used to set up the mentioned above array. */
5757 static int stop_before_p = 0;
5759 /* The following variable value is length of the arrays `clocks' and
5762 static int clocks_length;
5764 /* The following array element values are cycles on which the
5765 corresponding insn will be issued. The array is used only for
5770 /* The following array element values are numbers of cycles should be
5771 added to improve insn scheduling for MM_insns for Itanium1. */
5773 static int *add_cycles;
5775 static rtx ia64_single_set (rtx);
5776 static void ia64_emit_insn_before (rtx, rtx);
5778 /* Map a bundle number to its pseudo-op. */
5781 get_bundle_name (int b)
5783 return bundle_name[b];
5787 /* Return the maximum number of instructions a cpu can issue. */
5790 ia64_issue_rate (void)
5795 /* Helper function - like single_set, but look inside COND_EXEC. */
5798 ia64_single_set (rtx insn)
5800 rtx x = PATTERN (insn), ret;
5801 if (GET_CODE (x) == COND_EXEC)
5802 x = COND_EXEC_CODE (x);
5803 if (GET_CODE (x) == SET)
5806 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
5807 Although they are not classical single set, the second set is there just
5808 to protect it from moving past FP-relative stack accesses. */
5809 switch (recog_memoized (insn))
5811 case CODE_FOR_prologue_allocate_stack:
5812 case CODE_FOR_epilogue_deallocate_stack:
5813 ret = XVECEXP (x, 0, 0);
5817 ret = single_set_2 (insn, x);
5824 /* Adjust the cost of a scheduling dependency. Return the new cost of
5825 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
5828 ia64_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
5830 enum attr_itanium_class dep_class;
5831 enum attr_itanium_class insn_class;
5833 if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT)
5836 insn_class = ia64_safe_itanium_class (insn);
5837 dep_class = ia64_safe_itanium_class (dep_insn);
5838 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
5839 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
5845 /* Like emit_insn_before, but skip cycle_display notes.
5846 ??? When cycle display notes are implemented, update this. */
5849 ia64_emit_insn_before (rtx insn, rtx before)
5851 emit_insn_before (insn, before);
5854 /* The following function marks insns who produce addresses for load
5855 and store insns. Such insns will be placed into M slots because it
5856 decrease latency time for Itanium1 (see function
5857 `ia64_produce_address_p' and the DFA descriptions). */
5860 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
5862 rtx insn, link, next, next_tail;
5864 next_tail = NEXT_INSN (tail);
5865 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5868 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5870 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
5872 for (link = INSN_DEPEND (insn); link != 0; link = XEXP (link, 1))
5874 next = XEXP (link, 0);
5875 if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_ST
5876 || ia64_safe_itanium_class (next) == ITANIUM_CLASS_STF)
5877 && ia64_st_address_bypass_p (insn, next))
5879 else if ((ia64_safe_itanium_class (next) == ITANIUM_CLASS_LD
5880 || ia64_safe_itanium_class (next)
5881 == ITANIUM_CLASS_FLD)
5882 && ia64_ld_address_bypass_p (insn, next))
5885 insn->call = link != 0;
5889 /* We're beginning a new block. Initialize data structures as necessary. */
5892 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
5893 int sched_verbose ATTRIBUTE_UNUSED,
5894 int max_ready ATTRIBUTE_UNUSED)
5896 #ifdef ENABLE_CHECKING
5899 if (reload_completed)
5900 for (insn = NEXT_INSN (current_sched_info->prev_head);
5901 insn != current_sched_info->next_tail;
5902 insn = NEXT_INSN (insn))
5903 if (SCHED_GROUP_P (insn))
5906 last_scheduled_insn = NULL_RTX;
5907 init_insn_group_barriers ();
5910 /* We are about to being issuing insns for this clock cycle.
5911 Override the default sort algorithm to better slot instructions. */
5914 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
5915 int *pn_ready, int clock_var ATTRIBUTE_UNUSED,
5919 int n_ready = *pn_ready;
5920 rtx *e_ready = ready + n_ready;
5924 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
5926 if (reorder_type == 0)
5928 /* First, move all USEs, CLOBBERs and other crud out of the way. */
5930 for (insnp = ready; insnp < e_ready; insnp++)
5931 if (insnp < e_ready)
5934 enum attr_type t = ia64_safe_type (insn);
5935 if (t == TYPE_UNKNOWN)
5937 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
5938 || asm_noperands (PATTERN (insn)) >= 0)
5940 rtx lowest = ready[n_asms];
5941 ready[n_asms] = insn;
5947 rtx highest = ready[n_ready - 1];
5948 ready[n_ready - 1] = insn;
5955 if (n_asms < n_ready)
5957 /* Some normal insns to process. Skip the asms. */
5961 else if (n_ready > 0)
5965 if (ia64_final_schedule)
5968 int nr_need_stop = 0;
5970 for (insnp = ready; insnp < e_ready; insnp++)
5971 if (safe_group_barrier_needed_p (*insnp))
5974 if (reorder_type == 1 && n_ready == nr_need_stop)
5976 if (reorder_type == 0)
5979 /* Move down everything that needs a stop bit, preserving
5981 while (insnp-- > ready + deleted)
5982 while (insnp >= ready + deleted)
5985 if (! safe_group_barrier_needed_p (insn))
5987 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
5998 /* We are about to being issuing insns for this clock cycle. Override
5999 the default sort algorithm to better slot instructions. */
6002 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
6005 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
6006 pn_ready, clock_var, 0);
6009 /* Like ia64_sched_reorder, but called after issuing each insn.
6010 Override the default sort algorithm to better slot instructions. */
6013 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
6014 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
6015 int *pn_ready, int clock_var)
6017 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
6018 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
6019 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
6023 /* We are about to issue INSN. Return the number of insns left on the
6024 ready queue that can be issued this cycle. */
6027 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
6028 int sched_verbose ATTRIBUTE_UNUSED,
6029 rtx insn ATTRIBUTE_UNUSED,
6030 int can_issue_more ATTRIBUTE_UNUSED)
6032 last_scheduled_insn = insn;
6033 memcpy (prev_cycle_state, curr_state, dfa_state_size);
6034 if (reload_completed)
6036 if (group_barrier_needed_p (insn))
6038 if (GET_CODE (insn) == CALL_INSN)
6039 init_insn_group_barriers ();
6040 stops_p [INSN_UID (insn)] = stop_before_p;
6046 /* We are choosing insn from the ready queue. Return nonzero if INSN
6050 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
6052 if (insn == NULL_RTX || !INSN_P (insn))
6054 return (!reload_completed
6055 || !safe_group_barrier_needed_p (insn));
6058 /* The following variable value is pseudo-insn used by the DFA insn
6059 scheduler to change the DFA state when the simulated clock is
6062 static rtx dfa_pre_cycle_insn;
6064 /* We are about to being issuing INSN. Return nonzero if we cannot
6065 issue it on given cycle CLOCK and return zero if we should not sort
6066 the ready queue on the next clock start. */
6069 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
6070 int clock, int *sort_p)
6072 int setup_clocks_p = FALSE;
6074 if (insn == NULL_RTX || !INSN_P (insn))
6076 if ((reload_completed && safe_group_barrier_needed_p (insn))
6077 || (last_scheduled_insn
6078 && (GET_CODE (last_scheduled_insn) == CALL_INSN
6079 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6080 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
6082 init_insn_group_barriers ();
6083 if (verbose && dump)
6084 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
6085 last_clock == clock ? " + cycle advance" : "");
6087 if (last_clock == clock)
6089 state_transition (curr_state, dfa_stop_insn);
6090 if (TARGET_EARLY_STOP_BITS)
6091 *sort_p = (last_scheduled_insn == NULL_RTX
6092 || GET_CODE (last_scheduled_insn) != CALL_INSN);
6097 else if (reload_completed)
6098 setup_clocks_p = TRUE;
6099 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6100 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
6101 state_reset (curr_state);
6104 memcpy (curr_state, prev_cycle_state, dfa_state_size);
6105 state_transition (curr_state, dfa_stop_insn);
6106 state_transition (curr_state, dfa_pre_cycle_insn);
6107 state_transition (curr_state, NULL);
6110 else if (reload_completed)
6111 setup_clocks_p = TRUE;
6112 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM
6113 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6114 && asm_noperands (PATTERN (insn)) < 0)
6116 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
6118 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
6123 for (link = LOG_LINKS (insn); link; link = XEXP (link, 1))
6124 if (REG_NOTE_KIND (link) == 0)
6126 enum attr_itanium_class dep_class;
6127 rtx dep_insn = XEXP (link, 0);
6129 dep_class = ia64_safe_itanium_class (dep_insn);
6130 if ((dep_class == ITANIUM_CLASS_MMMUL
6131 || dep_class == ITANIUM_CLASS_MMSHF)
6132 && last_clock - clocks [INSN_UID (dep_insn)] < 4
6134 || last_clock - clocks [INSN_UID (dep_insn)] < d))
6135 d = last_clock - clocks [INSN_UID (dep_insn)];
6138 add_cycles [INSN_UID (insn)] = 3 - d;
6146 /* The following page contains abstract data `bundle states' which are
6147 used for bundling insns (inserting nops and template generation). */
6149 /* The following describes state of insn bundling. */
6153 /* Unique bundle state number to identify them in the debugging
6156 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
6157 /* number nops before and after the insn */
6158 short before_nops_num, after_nops_num;
6159 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
6161 int cost; /* cost of the state in cycles */
6162 int accumulated_insns_num; /* number of all previous insns including
6163 nops. L is considered as 2 insns */
6164 int branch_deviation; /* deviation of previous branches from 3rd slots */
6165 struct bundle_state *next; /* next state with the same insn_num */
6166 struct bundle_state *originator; /* originator (previous insn state) */
6167 /* All bundle states are in the following chain. */
6168 struct bundle_state *allocated_states_chain;
6169 /* The DFA State after issuing the insn and the nops. */
6173 /* The following is map insn number to the corresponding bundle state. */
6175 static struct bundle_state **index_to_bundle_states;
6177 /* The unique number of next bundle state. */
6179 static int bundle_states_num;
6181 /* All allocated bundle states are in the following chain. */
6183 static struct bundle_state *allocated_bundle_states_chain;
6185 /* All allocated but not used bundle states are in the following
6188 static struct bundle_state *free_bundle_state_chain;
6191 /* The following function returns a free bundle state. */
6193 static struct bundle_state *
6194 get_free_bundle_state (void)
6196 struct bundle_state *result;
6198 if (free_bundle_state_chain != NULL)
6200 result = free_bundle_state_chain;
6201 free_bundle_state_chain = result->next;
6205 result = xmalloc (sizeof (struct bundle_state));
6206 result->dfa_state = xmalloc (dfa_state_size);
6207 result->allocated_states_chain = allocated_bundle_states_chain;
6208 allocated_bundle_states_chain = result;
6210 result->unique_num = bundle_states_num++;
6215 /* The following function frees given bundle state. */
6218 free_bundle_state (struct bundle_state *state)
6220 state->next = free_bundle_state_chain;
6221 free_bundle_state_chain = state;
6224 /* Start work with abstract data `bundle states'. */
6227 initiate_bundle_states (void)
6229 bundle_states_num = 0;
6230 free_bundle_state_chain = NULL;
6231 allocated_bundle_states_chain = NULL;
6234 /* Finish work with abstract data `bundle states'. */
6237 finish_bundle_states (void)
6239 struct bundle_state *curr_state, *next_state;
6241 for (curr_state = allocated_bundle_states_chain;
6243 curr_state = next_state)
6245 next_state = curr_state->allocated_states_chain;
6246 free (curr_state->dfa_state);
6251 /* Hash table of the bundle states. The key is dfa_state and insn_num
6252 of the bundle states. */
6254 static htab_t bundle_state_table;
6256 /* The function returns hash of BUNDLE_STATE. */
6259 bundle_state_hash (const void *bundle_state)
6261 const struct bundle_state *state = (struct bundle_state *) bundle_state;
6264 for (result = i = 0; i < dfa_state_size; i++)
6265 result += (((unsigned char *) state->dfa_state) [i]
6266 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
6267 return result + state->insn_num;
6270 /* The function returns nonzero if the bundle state keys are equal. */
6273 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
6275 const struct bundle_state * state1 = (struct bundle_state *) bundle_state_1;
6276 const struct bundle_state * state2 = (struct bundle_state *) bundle_state_2;
6278 return (state1->insn_num == state2->insn_num
6279 && memcmp (state1->dfa_state, state2->dfa_state,
6280 dfa_state_size) == 0);
6283 /* The function inserts the BUNDLE_STATE into the hash table. The
6284 function returns nonzero if the bundle has been inserted into the
6285 table. The table contains the best bundle state with given key. */
6288 insert_bundle_state (struct bundle_state *bundle_state)
6292 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
6293 if (*entry_ptr == NULL)
6295 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
6296 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
6297 *entry_ptr = (void *) bundle_state;
6300 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
6301 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
6302 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
6303 > bundle_state->accumulated_insns_num
6304 || (((struct bundle_state *)
6305 *entry_ptr)->accumulated_insns_num
6306 == bundle_state->accumulated_insns_num
6307 && ((struct bundle_state *)
6308 *entry_ptr)->branch_deviation
6309 > bundle_state->branch_deviation))))
6312 struct bundle_state temp;
6314 temp = *(struct bundle_state *) *entry_ptr;
6315 *(struct bundle_state *) *entry_ptr = *bundle_state;
6316 ((struct bundle_state *) *entry_ptr)->next = temp.next;
6317 *bundle_state = temp;
6322 /* Start work with the hash table. */
6325 initiate_bundle_state_table (void)
6327 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
6331 /* Finish work with the hash table. */
6334 finish_bundle_state_table (void)
6336 htab_delete (bundle_state_table);
6341 /* The following variable is a insn `nop' used to check bundle states
6342 with different number of inserted nops. */
6344 static rtx ia64_nop;
6346 /* The following function tries to issue NOPS_NUM nops for the current
6347 state without advancing processor cycle. If it failed, the
6348 function returns FALSE and frees the current state. */
6351 try_issue_nops (struct bundle_state *curr_state, int nops_num)
6355 for (i = 0; i < nops_num; i++)
6356 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
6358 free_bundle_state (curr_state);
6364 /* The following function tries to issue INSN for the current
6365 state without advancing processor cycle. If it failed, the
6366 function returns FALSE and frees the current state. */
6369 try_issue_insn (struct bundle_state *curr_state, rtx insn)
6371 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
6373 free_bundle_state (curr_state);
6379 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
6380 starting with ORIGINATOR without advancing processor cycle. If
6381 TRY_BUNDLE_END_P is TRUE, the function also/only (if
6382 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
6383 If it was successful, the function creates new bundle state and
6384 insert into the hash table and into `index_to_bundle_states'. */
6387 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
6388 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
6390 struct bundle_state *curr_state;
6392 curr_state = get_free_bundle_state ();
6393 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
6394 curr_state->insn = insn;
6395 curr_state->insn_num = originator->insn_num + 1;
6396 curr_state->cost = originator->cost;
6397 curr_state->originator = originator;
6398 curr_state->before_nops_num = before_nops_num;
6399 curr_state->after_nops_num = 0;
6400 curr_state->accumulated_insns_num
6401 = originator->accumulated_insns_num + before_nops_num;
6402 curr_state->branch_deviation = originator->branch_deviation;
6403 if (insn == NULL_RTX)
6405 else if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
6407 if (GET_MODE (insn) == TImode)
6409 if (!try_issue_nops (curr_state, before_nops_num))
6411 if (!try_issue_insn (curr_state, insn))
6413 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
6414 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
6415 && curr_state->accumulated_insns_num % 3 != 0)
6417 free_bundle_state (curr_state);
6421 else if (GET_MODE (insn) != TImode)
6423 if (!try_issue_nops (curr_state, before_nops_num))
6425 if (!try_issue_insn (curr_state, insn))
6427 curr_state->accumulated_insns_num++;
6428 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6429 || asm_noperands (PATTERN (insn)) >= 0)
6431 if (ia64_safe_type (insn) == TYPE_L)
6432 curr_state->accumulated_insns_num++;
6436 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
6437 state_transition (curr_state->dfa_state, NULL);
6439 if (!try_issue_nops (curr_state, before_nops_num))
6441 if (!try_issue_insn (curr_state, insn))
6443 curr_state->accumulated_insns_num++;
6444 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6445 || asm_noperands (PATTERN (insn)) >= 0)
6447 /* Finish bundle containing asm insn. */
6448 curr_state->after_nops_num
6449 = 3 - curr_state->accumulated_insns_num % 3;
6450 curr_state->accumulated_insns_num
6451 += 3 - curr_state->accumulated_insns_num % 3;
6453 else if (ia64_safe_type (insn) == TYPE_L)
6454 curr_state->accumulated_insns_num++;
6456 if (ia64_safe_type (insn) == TYPE_B)
6457 curr_state->branch_deviation
6458 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
6459 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
6461 if (!only_bundle_end_p && insert_bundle_state (curr_state))
6464 struct bundle_state *curr_state1;
6465 struct bundle_state *allocated_states_chain;
6467 curr_state1 = get_free_bundle_state ();
6468 dfa_state = curr_state1->dfa_state;
6469 allocated_states_chain = curr_state1->allocated_states_chain;
6470 *curr_state1 = *curr_state;
6471 curr_state1->dfa_state = dfa_state;
6472 curr_state1->allocated_states_chain = allocated_states_chain;
6473 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
6475 curr_state = curr_state1;
6477 if (!try_issue_nops (curr_state,
6478 3 - curr_state->accumulated_insns_num % 3))
6480 curr_state->after_nops_num
6481 = 3 - curr_state->accumulated_insns_num % 3;
6482 curr_state->accumulated_insns_num
6483 += 3 - curr_state->accumulated_insns_num % 3;
6485 if (!insert_bundle_state (curr_state))
6486 free_bundle_state (curr_state);
6490 /* The following function returns position in the two window bundle
6494 get_max_pos (state_t state)
6496 if (cpu_unit_reservation_p (state, pos_6))
6498 else if (cpu_unit_reservation_p (state, pos_5))
6500 else if (cpu_unit_reservation_p (state, pos_4))
6502 else if (cpu_unit_reservation_p (state, pos_3))
6504 else if (cpu_unit_reservation_p (state, pos_2))
6506 else if (cpu_unit_reservation_p (state, pos_1))
6512 /* The function returns code of a possible template for given position
6513 and state. The function should be called only with 2 values of
6514 position equal to 3 or 6. */
6517 get_template (state_t state, int pos)
6522 if (cpu_unit_reservation_p (state, _0mii_))
6524 else if (cpu_unit_reservation_p (state, _0mmi_))
6526 else if (cpu_unit_reservation_p (state, _0mfi_))
6528 else if (cpu_unit_reservation_p (state, _0mmf_))
6530 else if (cpu_unit_reservation_p (state, _0bbb_))
6532 else if (cpu_unit_reservation_p (state, _0mbb_))
6534 else if (cpu_unit_reservation_p (state, _0mib_))
6536 else if (cpu_unit_reservation_p (state, _0mmb_))
6538 else if (cpu_unit_reservation_p (state, _0mfb_))
6540 else if (cpu_unit_reservation_p (state, _0mlx_))
6545 if (cpu_unit_reservation_p (state, _1mii_))
6547 else if (cpu_unit_reservation_p (state, _1mmi_))
6549 else if (cpu_unit_reservation_p (state, _1mfi_))
6551 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
6553 else if (cpu_unit_reservation_p (state, _1bbb_))
6555 else if (cpu_unit_reservation_p (state, _1mbb_))
6557 else if (cpu_unit_reservation_p (state, _1mib_))
6559 else if (cpu_unit_reservation_p (state, _1mmb_))
6561 else if (cpu_unit_reservation_p (state, _1mfb_))
6563 else if (cpu_unit_reservation_p (state, _1mlx_))
6572 /* The following function returns an insn important for insn bundling
6573 followed by INSN and before TAIL. */
6576 get_next_important_insn (rtx insn, rtx tail)
6578 for (; insn && insn != tail; insn = NEXT_INSN (insn))
6580 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6581 && GET_CODE (PATTERN (insn)) != USE
6582 && GET_CODE (PATTERN (insn)) != CLOBBER)
6587 /* The following function does insn bundling. Bundling means
6588 inserting templates and nop insns to fit insn groups into permitted
6589 templates. Instruction scheduling uses NDFA (non-deterministic
6590 finite automata) encoding informations about the templates and the
6591 inserted nops. Nondeterminism of the automata permits follows
6592 all possible insn sequences very fast.
6594 Unfortunately it is not possible to get information about inserting
6595 nop insns and used templates from the automata states. The
6596 automata only says that we can issue an insn possibly inserting
6597 some nops before it and using some template. Therefore insn
6598 bundling in this function is implemented by using DFA
6599 (deterministic finite automata). We follows all possible insn
6600 sequences by inserting 0-2 nops (that is what the NDFA describe for
6601 insn scheduling) before/after each insn being bundled. We know the
6602 start of simulated processor cycle from insn scheduling (insn
6603 starting a new cycle has TImode).
6605 Simple implementation of insn bundling would create enormous
6606 number of possible insn sequences satisfying information about new
6607 cycle ticks taken from the insn scheduling. To make the algorithm
6608 practical we use dynamic programming. Each decision (about
6609 inserting nops and implicitly about previous decisions) is described
6610 by structure bundle_state (see above). If we generate the same
6611 bundle state (key is automaton state after issuing the insns and
6612 nops for it), we reuse already generated one. As consequence we
6613 reject some decisions which cannot improve the solution and
6614 reduce memory for the algorithm.
6616 When we reach the end of EBB (extended basic block), we choose the
6617 best sequence and then, moving back in EBB, insert templates for
6618 the best alternative. The templates are taken from querying
6619 automaton state for each insn in chosen bundle states.
6621 So the algorithm makes two (forward and backward) passes through
6622 EBB. There is an additional forward pass through EBB for Itanium1
6623 processor. This pass inserts more nops to make dependency between
6624 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
6627 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
6629 struct bundle_state *curr_state, *next_state, *best_state;
6630 rtx insn, next_insn;
6632 int i, bundle_end_p, only_bundle_end_p, asm_p;
6633 int pos = 0, max_pos, template0, template1;
6636 enum attr_type type;
6639 /* Count insns in the EBB. */
6640 for (insn = NEXT_INSN (prev_head_insn);
6641 insn && insn != tail;
6642 insn = NEXT_INSN (insn))
6648 dfa_clean_insn_cache ();
6649 initiate_bundle_state_table ();
6650 index_to_bundle_states = xmalloc ((insn_num + 2)
6651 * sizeof (struct bundle_state *));
6652 /* First (forward) pass -- generation of bundle states. */
6653 curr_state = get_free_bundle_state ();
6654 curr_state->insn = NULL;
6655 curr_state->before_nops_num = 0;
6656 curr_state->after_nops_num = 0;
6657 curr_state->insn_num = 0;
6658 curr_state->cost = 0;
6659 curr_state->accumulated_insns_num = 0;
6660 curr_state->branch_deviation = 0;
6661 curr_state->next = NULL;
6662 curr_state->originator = NULL;
6663 state_reset (curr_state->dfa_state);
6664 index_to_bundle_states [0] = curr_state;
6666 /* Shift cycle mark if it is put on insn which could be ignored. */
6667 for (insn = NEXT_INSN (prev_head_insn);
6669 insn = NEXT_INSN (insn))
6671 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6672 || GET_CODE (PATTERN (insn)) == USE
6673 || GET_CODE (PATTERN (insn)) == CLOBBER)
6674 && GET_MODE (insn) == TImode)
6676 PUT_MODE (insn, VOIDmode);
6677 for (next_insn = NEXT_INSN (insn);
6679 next_insn = NEXT_INSN (next_insn))
6680 if (INSN_P (next_insn)
6681 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
6682 && GET_CODE (PATTERN (next_insn)) != USE
6683 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
6685 PUT_MODE (next_insn, TImode);
6689 /* Froward pass: generation of bundle states. */
6690 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
6695 || ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6696 || GET_CODE (PATTERN (insn)) == USE
6697 || GET_CODE (PATTERN (insn)) == CLOBBER)
6699 type = ia64_safe_type (insn);
6700 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
6702 index_to_bundle_states [insn_num] = NULL;
6703 for (curr_state = index_to_bundle_states [insn_num - 1];
6705 curr_state = next_state)
6707 pos = curr_state->accumulated_insns_num % 3;
6708 next_state = curr_state->next;
6709 /* We must fill up the current bundle in order to start a
6710 subsequent asm insn in a new bundle. Asm insn is always
6711 placed in a separate bundle. */
6713 = (next_insn != NULL_RTX
6714 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
6715 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
6716 /* We may fill up the current bundle if it is the cycle end
6717 without a group barrier. */
6719 = (only_bundle_end_p || next_insn == NULL_RTX
6720 || (GET_MODE (next_insn) == TImode
6721 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
6722 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
6724 /* We need to insert 2 nops for cases like M_MII. To
6725 guarantee issuing all insns on the same cycle for
6726 Itanium 1, we need to issue 2 nops after the first M
6727 insn (MnnMII where n is a nop insn). */
6728 || ((type == TYPE_M || type == TYPE_A)
6729 && ia64_tune == PROCESSOR_ITANIUM
6730 && !bundle_end_p && pos == 1))
6731 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
6733 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
6735 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
6738 if (index_to_bundle_states [insn_num] == NULL)
6740 for (curr_state = index_to_bundle_states [insn_num];
6742 curr_state = curr_state->next)
6743 if (verbose >= 2 && dump)
6745 /* This structure is taken from generated code of the
6746 pipeline hazard recognizer (see file insn-attrtab.c).
6747 Please don't forget to change the structure if a new
6748 automaton is added to .md file. */
6751 unsigned short one_automaton_state;
6752 unsigned short oneb_automaton_state;
6753 unsigned short two_automaton_state;
6754 unsigned short twob_automaton_state;
6759 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
6760 curr_state->unique_num,
6761 (curr_state->originator == NULL
6762 ? -1 : curr_state->originator->unique_num),
6764 curr_state->before_nops_num, curr_state->after_nops_num,
6765 curr_state->accumulated_insns_num, curr_state->branch_deviation,
6766 (ia64_tune == PROCESSOR_ITANIUM
6767 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
6768 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
6772 if (index_to_bundle_states [insn_num] == NULL)
6773 /* We should find a solution because the 2nd insn scheduling has
6776 /* Find a state corresponding to the best insn sequence. */
6778 for (curr_state = index_to_bundle_states [insn_num];
6780 curr_state = curr_state->next)
6781 /* We are just looking at the states with fully filled up last
6782 bundle. The first we prefer insn sequences with minimal cost
6783 then with minimal inserted nops and finally with branch insns
6784 placed in the 3rd slots. */
6785 if (curr_state->accumulated_insns_num % 3 == 0
6786 && (best_state == NULL || best_state->cost > curr_state->cost
6787 || (best_state->cost == curr_state->cost
6788 && (curr_state->accumulated_insns_num
6789 < best_state->accumulated_insns_num
6790 || (curr_state->accumulated_insns_num
6791 == best_state->accumulated_insns_num
6792 && curr_state->branch_deviation
6793 < best_state->branch_deviation)))))
6794 best_state = curr_state;
6795 /* Second (backward) pass: adding nops and templates. */
6796 insn_num = best_state->before_nops_num;
6797 template0 = template1 = -1;
6798 for (curr_state = best_state;
6799 curr_state->originator != NULL;
6800 curr_state = curr_state->originator)
6802 insn = curr_state->insn;
6803 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
6804 || asm_noperands (PATTERN (insn)) >= 0);
6806 if (verbose >= 2 && dump)
6810 unsigned short one_automaton_state;
6811 unsigned short oneb_automaton_state;
6812 unsigned short two_automaton_state;
6813 unsigned short twob_automaton_state;
6818 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
6819 curr_state->unique_num,
6820 (curr_state->originator == NULL
6821 ? -1 : curr_state->originator->unique_num),
6823 curr_state->before_nops_num, curr_state->after_nops_num,
6824 curr_state->accumulated_insns_num, curr_state->branch_deviation,
6825 (ia64_tune == PROCESSOR_ITANIUM
6826 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
6827 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
6830 /* Find the position in the current bundle window. The window can
6831 contain at most two bundles. Two bundle window means that
6832 the processor will make two bundle rotation. */
6833 max_pos = get_max_pos (curr_state->dfa_state);
6835 /* The following (negative template number) means that the
6836 processor did one bundle rotation. */
6837 || (max_pos == 3 && template0 < 0))
6839 /* We are at the end of the window -- find template(s) for
6843 template0 = get_template (curr_state->dfa_state, 3);
6846 template1 = get_template (curr_state->dfa_state, 3);
6847 template0 = get_template (curr_state->dfa_state, 6);
6850 if (max_pos > 3 && template1 < 0)
6851 /* It may happen when we have the stop inside a bundle. */
6855 template1 = get_template (curr_state->dfa_state, 3);
6859 /* Emit nops after the current insn. */
6860 for (i = 0; i < curr_state->after_nops_num; i++)
6863 emit_insn_after (nop, insn);
6869 /* We are at the start of a bundle: emit the template
6870 (it should be defined). */
6873 b = gen_bundle_selector (GEN_INT (template0));
6874 ia64_emit_insn_before (b, nop);
6875 /* If we have two bundle window, we make one bundle
6876 rotation. Otherwise template0 will be undefined
6877 (negative value). */
6878 template0 = template1;
6882 /* Move the position backward in the window. Group barrier has
6883 no slot. Asm insn takes all bundle. */
6884 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
6885 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6886 && asm_noperands (PATTERN (insn)) < 0)
6888 /* Long insn takes 2 slots. */
6889 if (ia64_safe_type (insn) == TYPE_L)
6894 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
6895 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6896 && asm_noperands (PATTERN (insn)) < 0)
6898 /* The current insn is at the bundle start: emit the
6902 b = gen_bundle_selector (GEN_INT (template0));
6903 ia64_emit_insn_before (b, insn);
6904 b = PREV_INSN (insn);
6906 /* See comment above in analogous place for emitting nops
6908 template0 = template1;
6911 /* Emit nops after the current insn. */
6912 for (i = 0; i < curr_state->before_nops_num; i++)
6915 ia64_emit_insn_before (nop, insn);
6916 nop = PREV_INSN (insn);
6923 /* See comment above in analogous place for emitting nops
6927 b = gen_bundle_selector (GEN_INT (template0));
6928 ia64_emit_insn_before (b, insn);
6929 b = PREV_INSN (insn);
6931 template0 = template1;
6936 if (ia64_tune == PROCESSOR_ITANIUM)
6937 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
6938 Itanium1 has a strange design, if the distance between an insn
6939 and dependent MM-insn is less 4 then we have a 6 additional
6940 cycles stall. So we make the distance equal to 4 cycles if it
6942 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
6947 || ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
6948 || GET_CODE (PATTERN (insn)) == USE
6949 || GET_CODE (PATTERN (insn)) == CLOBBER)
6951 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
6952 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
6953 /* We found a MM-insn which needs additional cycles. */
6959 /* Now we are searching for a template of the bundle in
6960 which the MM-insn is placed and the position of the
6961 insn in the bundle (0, 1, 2). Also we are searching
6962 for that there is a stop before the insn. */
6963 last = prev_active_insn (insn);
6964 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
6966 last = prev_active_insn (last);
6968 for (;; last = prev_active_insn (last))
6969 if (recog_memoized (last) == CODE_FOR_bundle_selector)
6971 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
6973 /* The insn is in MLX bundle. Change the template
6974 onto MFI because we will add nops before the
6975 insn. It simplifies subsequent code a lot. */
6977 = gen_bundle_selector (const2_rtx); /* -> MFI */
6980 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier
6981 && (ia64_safe_itanium_class (last)
6982 != ITANIUM_CLASS_IGNORE))
6984 /* Some check of correctness: the stop is not at the
6985 bundle start, there are no more 3 insns in the bundle,
6986 and the MM-insn is not at the start of bundle with
6988 if ((pred_stop_p && n == 0) || n > 2
6989 || (template0 == 9 && n != 0))
6991 /* Put nops after the insn in the bundle. */
6992 for (j = 3 - n; j > 0; j --)
6993 ia64_emit_insn_before (gen_nop (), insn);
6994 /* It takes into account that we will add more N nops
6995 before the insn lately -- please see code below. */
6996 add_cycles [INSN_UID (insn)]--;
6997 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
6998 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7001 add_cycles [INSN_UID (insn)]--;
7002 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
7004 /* Insert "MII;" template. */
7005 ia64_emit_insn_before (gen_bundle_selector (const0_rtx),
7007 ia64_emit_insn_before (gen_nop (), insn);
7008 ia64_emit_insn_before (gen_nop (), insn);
7011 /* To decrease code size, we use "MI;I;"
7013 ia64_emit_insn_before
7014 (gen_insn_group_barrier (GEN_INT (3)), insn);
7017 ia64_emit_insn_before (gen_nop (), insn);
7018 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7021 /* Put the MM-insn in the same slot of a bundle with the
7022 same template as the original one. */
7023 ia64_emit_insn_before (gen_bundle_selector (GEN_INT (template0)),
7025 /* To put the insn in the same slot, add necessary number
7027 for (j = n; j > 0; j --)
7028 ia64_emit_insn_before (gen_nop (), insn);
7029 /* Put the stop if the original bundle had it. */
7031 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7035 free (index_to_bundle_states);
7036 finish_bundle_state_table ();
7038 dfa_clean_insn_cache ();
7041 /* The following function is called at the end of scheduling BB or
7042 EBB. After reload, it inserts stop bits and does insn bundling. */
7045 ia64_sched_finish (FILE *dump, int sched_verbose)
7048 fprintf (dump, "// Finishing schedule.\n");
7049 if (!reload_completed)
7051 if (reload_completed)
7053 final_emit_insn_group_barriers (dump);
7054 bundling (dump, sched_verbose, current_sched_info->prev_head,
7055 current_sched_info->next_tail);
7056 if (sched_verbose && dump)
7057 fprintf (dump, "// finishing %d-%d\n",
7058 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
7059 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
7065 /* The following function inserts stop bits in scheduled BB or EBB. */
7068 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
7071 int need_barrier_p = 0;
7072 rtx prev_insn = NULL_RTX;
7074 init_insn_group_barriers ();
7076 for (insn = NEXT_INSN (current_sched_info->prev_head);
7077 insn != current_sched_info->next_tail;
7078 insn = NEXT_INSN (insn))
7080 if (GET_CODE (insn) == BARRIER)
7082 rtx last = prev_active_insn (insn);
7086 if (GET_CODE (last) == JUMP_INSN
7087 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
7088 last = prev_active_insn (last);
7089 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
7090 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
7092 init_insn_group_barriers ();
7094 prev_insn = NULL_RTX;
7096 else if (INSN_P (insn))
7098 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
7100 init_insn_group_barriers ();
7102 prev_insn = NULL_RTX;
7104 else if (need_barrier_p || group_barrier_needed_p (insn))
7106 if (TARGET_EARLY_STOP_BITS)
7111 last != current_sched_info->prev_head;
7112 last = PREV_INSN (last))
7113 if (INSN_P (last) && GET_MODE (last) == TImode
7114 && stops_p [INSN_UID (last)])
7116 if (last == current_sched_info->prev_head)
7118 last = prev_active_insn (last);
7120 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
7121 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
7123 init_insn_group_barriers ();
7124 for (last = NEXT_INSN (last);
7126 last = NEXT_INSN (last))
7128 group_barrier_needed_p (last);
7132 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
7134 init_insn_group_barriers ();
7136 group_barrier_needed_p (insn);
7137 prev_insn = NULL_RTX;
7139 else if (recog_memoized (insn) >= 0)
7141 need_barrier_p = (GET_CODE (insn) == CALL_INSN
7142 || GET_CODE (PATTERN (insn)) == ASM_INPUT
7143 || asm_noperands (PATTERN (insn)) >= 0);
7150 /* If the following function returns TRUE, we will use the the DFA
7154 ia64_first_cycle_multipass_dfa_lookahead (void)
7156 return (reload_completed ? 6 : 4);
7159 /* The following function initiates variable `dfa_pre_cycle_insn'. */
7162 ia64_init_dfa_pre_cycle_insn (void)
7164 if (temp_dfa_state == NULL)
7166 dfa_state_size = state_size ();
7167 temp_dfa_state = xmalloc (dfa_state_size);
7168 prev_cycle_state = xmalloc (dfa_state_size);
7170 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
7171 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
7172 recog_memoized (dfa_pre_cycle_insn);
7173 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
7174 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
7175 recog_memoized (dfa_stop_insn);
7178 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
7179 used by the DFA insn scheduler. */
7182 ia64_dfa_pre_cycle_insn (void)
7184 return dfa_pre_cycle_insn;
7187 /* The following function returns TRUE if PRODUCER (of type ilog or
7188 ld) produces address for CONSUMER (of type st or stf). */
7191 ia64_st_address_bypass_p (rtx producer, rtx consumer)
7195 if (producer == NULL_RTX || consumer == NULL_RTX)
7197 dest = ia64_single_set (producer);
7198 if (dest == NULL_RTX || (reg = SET_DEST (dest)) == NULL_RTX
7199 || (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG))
7201 if (GET_CODE (reg) == SUBREG)
7202 reg = SUBREG_REG (reg);
7203 dest = ia64_single_set (consumer);
7204 if (dest == NULL_RTX || (mem = SET_DEST (dest)) == NULL_RTX
7205 || GET_CODE (mem) != MEM)
7207 return reg_mentioned_p (reg, mem);
7210 /* The following function returns TRUE if PRODUCER (of type ilog or
7211 ld) produces address for CONSUMER (of type ld or fld). */
7214 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
7216 rtx dest, src, reg, mem;
7218 if (producer == NULL_RTX || consumer == NULL_RTX)
7220 dest = ia64_single_set (producer);
7221 if (dest == NULL_RTX || (reg = SET_DEST (dest)) == NULL_RTX
7222 || (GET_CODE (reg) != REG && GET_CODE (reg) != SUBREG))
7224 if (GET_CODE (reg) == SUBREG)
7225 reg = SUBREG_REG (reg);
7226 src = ia64_single_set (consumer);
7227 if (src == NULL_RTX || (mem = SET_SRC (src)) == NULL_RTX)
7229 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
7230 mem = XVECEXP (mem, 0, 0);
7231 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
7232 mem = XEXP (mem, 0);
7234 /* Note that LO_SUM is used for GOT loads. */
7235 if (GET_CODE (mem) != LO_SUM && GET_CODE (mem) != MEM)
7238 return reg_mentioned_p (reg, mem);
7241 /* The following function returns TRUE if INSN produces address for a
7242 load/store insn. We will place such insns into M slot because it
7243 decreases its latency time. */
7246 ia64_produce_address_p (rtx insn)
7252 /* Emit pseudo-ops for the assembler to describe predicate relations.
7253 At present this assumes that we only consider predicate pairs to
7254 be mutex, and that the assembler can deduce proper values from
7255 straight-line code. */
7258 emit_predicate_relation_info (void)
7262 FOR_EACH_BB_REVERSE (bb)
7265 rtx head = BB_HEAD (bb);
7267 /* We only need such notes at code labels. */
7268 if (GET_CODE (head) != CODE_LABEL)
7270 if (GET_CODE (NEXT_INSN (head)) == NOTE
7271 && NOTE_LINE_NUMBER (NEXT_INSN (head)) == NOTE_INSN_BASIC_BLOCK)
7272 head = NEXT_INSN (head);
7274 for (r = PR_REG (0); r < PR_REG (64); r += 2)
7275 if (REGNO_REG_SET_P (bb->global_live_at_start, r))
7277 rtx p = gen_rtx_REG (BImode, r);
7278 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
7279 if (head == BB_END (bb))
7285 /* Look for conditional calls that do not return, and protect predicate
7286 relations around them. Otherwise the assembler will assume the call
7287 returns, and complain about uses of call-clobbered predicates after
7289 FOR_EACH_BB_REVERSE (bb)
7291 rtx insn = BB_HEAD (bb);
7295 if (GET_CODE (insn) == CALL_INSN
7296 && GET_CODE (PATTERN (insn)) == COND_EXEC
7297 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
7299 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
7300 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
7301 if (BB_HEAD (bb) == insn)
7303 if (BB_END (bb) == insn)
7307 if (insn == BB_END (bb))
7309 insn = NEXT_INSN (insn);
7314 /* Perform machine dependent operations on the rtl chain INSNS. */
7319 /* We are freeing block_for_insn in the toplev to keep compatibility
7320 with old MDEP_REORGS that are not CFG based. Recompute it now. */
7321 compute_bb_for_insn ();
7323 /* If optimizing, we'll have split before scheduling. */
7325 split_all_insns (0);
7327 /* ??? update_life_info_in_dirty_blocks fails to terminate during
7328 non-optimizing bootstrap. */
7329 update_life_info (NULL, UPDATE_LIFE_GLOBAL_RM_NOTES, PROP_DEATH_NOTES);
7331 if (ia64_flag_schedule_insns2)
7333 timevar_push (TV_SCHED2);
7334 ia64_final_schedule = 1;
7336 initiate_bundle_states ();
7337 ia64_nop = make_insn_raw (gen_nop ());
7338 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
7339 recog_memoized (ia64_nop);
7340 clocks_length = get_max_uid () + 1;
7341 stops_p = xcalloc (1, clocks_length);
7342 if (ia64_tune == PROCESSOR_ITANIUM)
7344 clocks = xcalloc (clocks_length, sizeof (int));
7345 add_cycles = xcalloc (clocks_length, sizeof (int));
7347 if (ia64_tune == PROCESSOR_ITANIUM2)
7349 pos_1 = get_cpu_unit_code ("2_1");
7350 pos_2 = get_cpu_unit_code ("2_2");
7351 pos_3 = get_cpu_unit_code ("2_3");
7352 pos_4 = get_cpu_unit_code ("2_4");
7353 pos_5 = get_cpu_unit_code ("2_5");
7354 pos_6 = get_cpu_unit_code ("2_6");
7355 _0mii_ = get_cpu_unit_code ("2b_0mii.");
7356 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
7357 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
7358 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
7359 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
7360 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
7361 _0mib_ = get_cpu_unit_code ("2b_0mib.");
7362 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
7363 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
7364 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
7365 _1mii_ = get_cpu_unit_code ("2b_1mii.");
7366 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
7367 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
7368 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
7369 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
7370 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
7371 _1mib_ = get_cpu_unit_code ("2b_1mib.");
7372 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
7373 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
7374 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
7378 pos_1 = get_cpu_unit_code ("1_1");
7379 pos_2 = get_cpu_unit_code ("1_2");
7380 pos_3 = get_cpu_unit_code ("1_3");
7381 pos_4 = get_cpu_unit_code ("1_4");
7382 pos_5 = get_cpu_unit_code ("1_5");
7383 pos_6 = get_cpu_unit_code ("1_6");
7384 _0mii_ = get_cpu_unit_code ("1b_0mii.");
7385 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
7386 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
7387 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
7388 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
7389 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
7390 _0mib_ = get_cpu_unit_code ("1b_0mib.");
7391 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
7392 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
7393 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
7394 _1mii_ = get_cpu_unit_code ("1b_1mii.");
7395 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
7396 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
7397 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
7398 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
7399 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
7400 _1mib_ = get_cpu_unit_code ("1b_1mib.");
7401 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
7402 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
7403 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
7405 schedule_ebbs (dump_file);
7406 finish_bundle_states ();
7407 if (ia64_tune == PROCESSOR_ITANIUM)
7413 emit_insn_group_barriers (dump_file);
7415 ia64_final_schedule = 0;
7416 timevar_pop (TV_SCHED2);
7419 emit_all_insn_group_barriers (dump_file);
7421 /* A call must not be the last instruction in a function, so that the
7422 return address is still within the function, so that unwinding works
7423 properly. Note that IA-64 differs from dwarf2 on this point. */
7424 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7429 insn = get_last_insn ();
7430 if (! INSN_P (insn))
7431 insn = prev_active_insn (insn);
7432 /* Skip over insns that expand to nothing. */
7433 while (GET_CODE (insn) == INSN && get_attr_empty (insn) == EMPTY_YES)
7435 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
7436 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
7438 insn = prev_active_insn (insn);
7440 if (GET_CODE (insn) == CALL_INSN)
7443 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7444 emit_insn (gen_break_f ());
7445 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
7449 emit_predicate_relation_info ();
7451 if (ia64_flag_var_tracking)
7453 timevar_push (TV_VAR_TRACKING);
7454 variable_tracking_main ();
7455 timevar_pop (TV_VAR_TRACKING);
7459 /* Return true if REGNO is used by the epilogue. */
7462 ia64_epilogue_uses (int regno)
7467 /* With a call to a function in another module, we will write a new
7468 value to "gp". After returning from such a call, we need to make
7469 sure the function restores the original gp-value, even if the
7470 function itself does not use the gp anymore. */
7471 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
7473 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
7474 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
7475 /* For functions defined with the syscall_linkage attribute, all
7476 input registers are marked as live at all function exits. This
7477 prevents the register allocator from using the input registers,
7478 which in turn makes it possible to restart a system call after
7479 an interrupt without having to save/restore the input registers.
7480 This also prevents kernel data from leaking to application code. */
7481 return lookup_attribute ("syscall_linkage",
7482 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
7485 /* Conditional return patterns can't represent the use of `b0' as
7486 the return address, so we force the value live this way. */
7490 /* Likewise for ar.pfs, which is used by br.ret. */
7498 /* Return true if REGNO is used by the frame unwinder. */
7501 ia64_eh_uses (int regno)
7503 if (! reload_completed)
7506 if (current_frame_info.reg_save_b0
7507 && regno == current_frame_info.reg_save_b0)
7509 if (current_frame_info.reg_save_pr
7510 && regno == current_frame_info.reg_save_pr)
7512 if (current_frame_info.reg_save_ar_pfs
7513 && regno == current_frame_info.reg_save_ar_pfs)
7515 if (current_frame_info.reg_save_ar_unat
7516 && regno == current_frame_info.reg_save_ar_unat)
7518 if (current_frame_info.reg_save_ar_lc
7519 && regno == current_frame_info.reg_save_ar_lc)
7525 /* Return true if this goes in small data/bss. */
7527 /* ??? We could also support own long data here. Generating movl/add/ld8
7528 instead of addl,ld8/ld8. This makes the code bigger, but should make the
7529 code faster because there is one less load. This also includes incomplete
7530 types which can't go in sdata/sbss. */
7533 ia64_in_small_data_p (tree exp)
7535 if (TARGET_NO_SDATA)
7538 /* We want to merge strings, so we never consider them small data. */
7539 if (TREE_CODE (exp) == STRING_CST)
7542 /* Functions are never small data. */
7543 if (TREE_CODE (exp) == FUNCTION_DECL)
7546 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
7548 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
7550 if (strcmp (section, ".sdata") == 0
7551 || strncmp (section, ".sdata.", 7) == 0
7552 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
7553 || strcmp (section, ".sbss") == 0
7554 || strncmp (section, ".sbss.", 6) == 0
7555 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
7560 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
7562 /* If this is an incomplete type with size 0, then we can't put it
7563 in sdata because it might be too big when completed. */
7564 if (size > 0 && size <= ia64_section_threshold)
7571 /* Output assembly directives for prologue regions. */
7573 /* The current basic block number. */
7575 static bool last_block;
7577 /* True if we need a copy_state command at the start of the next block. */
7579 static bool need_copy_state;
7581 /* The function emits unwind directives for the start of an epilogue. */
7584 process_epilogue (void)
7586 /* If this isn't the last block of the function, then we need to label the
7587 current state, and copy it back in at the start of the next block. */
7591 fprintf (asm_out_file, "\t.label_state %d\n",
7592 ++cfun->machine->state_num);
7593 need_copy_state = true;
7596 fprintf (asm_out_file, "\t.restore sp\n");
7599 /* This function processes a SET pattern looking for specific patterns
7600 which result in emitting an assembly directive required for unwinding. */
7603 process_set (FILE *asm_out_file, rtx pat)
7605 rtx src = SET_SRC (pat);
7606 rtx dest = SET_DEST (pat);
7607 int src_regno, dest_regno;
7609 /* Look for the ALLOC insn. */
7610 if (GET_CODE (src) == UNSPEC_VOLATILE
7611 && XINT (src, 1) == UNSPECV_ALLOC
7612 && GET_CODE (dest) == REG)
7614 dest_regno = REGNO (dest);
7616 /* If this is the final destination for ar.pfs, then this must
7617 be the alloc in the prologue. */
7618 if (dest_regno == current_frame_info.reg_save_ar_pfs)
7619 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
7620 ia64_dbx_register_number (dest_regno));
7623 /* This must be an alloc before a sibcall. We must drop the
7624 old frame info. The easiest way to drop the old frame
7625 info is to ensure we had a ".restore sp" directive
7626 followed by a new prologue. If the procedure doesn't
7627 have a memory-stack frame, we'll issue a dummy ".restore
7629 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
7630 /* if haven't done process_epilogue() yet, do it now */
7631 process_epilogue ();
7632 fprintf (asm_out_file, "\t.prologue\n");
7637 /* Look for SP = .... */
7638 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
7640 if (GET_CODE (src) == PLUS)
7642 rtx op0 = XEXP (src, 0);
7643 rtx op1 = XEXP (src, 1);
7644 if (op0 == dest && GET_CODE (op1) == CONST_INT)
7646 if (INTVAL (op1) < 0)
7647 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
7650 process_epilogue ();
7655 else if (GET_CODE (src) == REG
7656 && REGNO (src) == HARD_FRAME_POINTER_REGNUM)
7657 process_epilogue ();
7664 /* Register move we need to look at. */
7665 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
7667 src_regno = REGNO (src);
7668 dest_regno = REGNO (dest);
7673 /* Saving return address pointer. */
7674 if (dest_regno != current_frame_info.reg_save_b0)
7676 fprintf (asm_out_file, "\t.save rp, r%d\n",
7677 ia64_dbx_register_number (dest_regno));
7681 if (dest_regno != current_frame_info.reg_save_pr)
7683 fprintf (asm_out_file, "\t.save pr, r%d\n",
7684 ia64_dbx_register_number (dest_regno));
7687 case AR_UNAT_REGNUM:
7688 if (dest_regno != current_frame_info.reg_save_ar_unat)
7690 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
7691 ia64_dbx_register_number (dest_regno));
7695 if (dest_regno != current_frame_info.reg_save_ar_lc)
7697 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
7698 ia64_dbx_register_number (dest_regno));
7701 case STACK_POINTER_REGNUM:
7702 if (dest_regno != HARD_FRAME_POINTER_REGNUM
7703 || ! frame_pointer_needed)
7705 fprintf (asm_out_file, "\t.vframe r%d\n",
7706 ia64_dbx_register_number (dest_regno));
7710 /* Everything else should indicate being stored to memory. */
7715 /* Memory store we need to look at. */
7716 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
7722 if (GET_CODE (XEXP (dest, 0)) == REG)
7724 base = XEXP (dest, 0);
7727 else if (GET_CODE (XEXP (dest, 0)) == PLUS
7728 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT)
7730 base = XEXP (XEXP (dest, 0), 0);
7731 off = INTVAL (XEXP (XEXP (dest, 0), 1));
7736 if (base == hard_frame_pointer_rtx)
7738 saveop = ".savepsp";
7741 else if (base == stack_pointer_rtx)
7746 src_regno = REGNO (src);
7750 if (current_frame_info.reg_save_b0 != 0)
7752 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
7756 if (current_frame_info.reg_save_pr != 0)
7758 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
7762 if (current_frame_info.reg_save_ar_lc != 0)
7764 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
7768 if (current_frame_info.reg_save_ar_pfs != 0)
7770 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
7773 case AR_UNAT_REGNUM:
7774 if (current_frame_info.reg_save_ar_unat != 0)
7776 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
7783 fprintf (asm_out_file, "\t.save.g 0x%x\n",
7784 1 << (src_regno - GR_REG (4)));
7792 fprintf (asm_out_file, "\t.save.b 0x%x\n",
7793 1 << (src_regno - BR_REG (1)));
7800 fprintf (asm_out_file, "\t.save.f 0x%x\n",
7801 1 << (src_regno - FR_REG (2)));
7804 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
7805 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
7806 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
7807 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
7808 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
7809 1 << (src_regno - FR_REG (12)));
7821 /* This function looks at a single insn and emits any directives
7822 required to unwind this insn. */
7824 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
7826 if (flag_unwind_tables
7827 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
7831 if (GET_CODE (insn) == NOTE
7832 && NOTE_LINE_NUMBER (insn) == NOTE_INSN_BASIC_BLOCK)
7834 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
7836 /* Restore unwind state from immediately before the epilogue. */
7837 if (need_copy_state)
7839 fprintf (asm_out_file, "\t.body\n");
7840 fprintf (asm_out_file, "\t.copy_state %d\n",
7841 cfun->machine->state_num);
7842 need_copy_state = false;
7846 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
7849 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
7851 pat = XEXP (pat, 0);
7853 pat = PATTERN (insn);
7855 switch (GET_CODE (pat))
7858 process_set (asm_out_file, pat);
7864 int limit = XVECLEN (pat, 0);
7865 for (par_index = 0; par_index < limit; par_index++)
7867 rtx x = XVECEXP (pat, 0, par_index);
7868 if (GET_CODE (x) == SET)
7869 process_set (asm_out_file, x);
7882 ia64_init_builtins (void)
7884 tree psi_type_node = build_pointer_type (integer_type_node);
7885 tree pdi_type_node = build_pointer_type (long_integer_type_node);
7887 /* __sync_val_compare_and_swap_si, __sync_bool_compare_and_swap_si */
7888 tree si_ftype_psi_si_si
7889 = build_function_type_list (integer_type_node,
7890 psi_type_node, integer_type_node,
7891 integer_type_node, NULL_TREE);
7893 /* __sync_val_compare_and_swap_di */
7894 tree di_ftype_pdi_di_di
7895 = build_function_type_list (long_integer_type_node,
7896 pdi_type_node, long_integer_type_node,
7897 long_integer_type_node, NULL_TREE);
7898 /* __sync_bool_compare_and_swap_di */
7899 tree si_ftype_pdi_di_di
7900 = build_function_type_list (integer_type_node,
7901 pdi_type_node, long_integer_type_node,
7902 long_integer_type_node, NULL_TREE);
7903 /* __sync_synchronize */
7904 tree void_ftype_void
7905 = build_function_type (void_type_node, void_list_node);
7907 /* __sync_lock_test_and_set_si */
7908 tree si_ftype_psi_si
7909 = build_function_type_list (integer_type_node,
7910 psi_type_node, integer_type_node, NULL_TREE);
7912 /* __sync_lock_test_and_set_di */
7913 tree di_ftype_pdi_di
7914 = build_function_type_list (long_integer_type_node,
7915 pdi_type_node, long_integer_type_node,
7918 /* __sync_lock_release_si */
7920 = build_function_type_list (void_type_node, psi_type_node, NULL_TREE);
7922 /* __sync_lock_release_di */
7924 = build_function_type_list (void_type_node, pdi_type_node, NULL_TREE);
7929 /* The __fpreg type. */
7930 fpreg_type = make_node (REAL_TYPE);
7931 /* ??? The back end should know to load/save __fpreg variables using
7932 the ldf.fill and stf.spill instructions. */
7933 TYPE_PRECISION (fpreg_type) = 80;
7934 layout_type (fpreg_type);
7935 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
7937 /* The __float80 type. */
7938 float80_type = make_node (REAL_TYPE);
7939 TYPE_PRECISION (float80_type) = 80;
7940 layout_type (float80_type);
7941 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
7943 /* The __float128 type. */
7946 tree float128_type = make_node (REAL_TYPE);
7947 TYPE_PRECISION (float128_type) = 128;
7948 layout_type (float128_type);
7949 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
7952 /* Under HPUX, this is a synonym for "long double". */
7953 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
7956 #define def_builtin(name, type, code) \
7957 lang_hooks.builtin_function ((name), (type), (code), BUILT_IN_MD, \
7960 def_builtin ("__sync_val_compare_and_swap_si", si_ftype_psi_si_si,
7961 IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI);
7962 def_builtin ("__sync_val_compare_and_swap_di", di_ftype_pdi_di_di,
7963 IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI);
7964 def_builtin ("__sync_bool_compare_and_swap_si", si_ftype_psi_si_si,
7965 IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI);
7966 def_builtin ("__sync_bool_compare_and_swap_di", si_ftype_pdi_di_di,
7967 IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI);
7969 def_builtin ("__sync_synchronize", void_ftype_void,
7970 IA64_BUILTIN_SYNCHRONIZE);
7972 def_builtin ("__sync_lock_test_and_set_si", si_ftype_psi_si,
7973 IA64_BUILTIN_LOCK_TEST_AND_SET_SI);
7974 def_builtin ("__sync_lock_test_and_set_di", di_ftype_pdi_di,
7975 IA64_BUILTIN_LOCK_TEST_AND_SET_DI);
7976 def_builtin ("__sync_lock_release_si", void_ftype_psi,
7977 IA64_BUILTIN_LOCK_RELEASE_SI);
7978 def_builtin ("__sync_lock_release_di", void_ftype_pdi,
7979 IA64_BUILTIN_LOCK_RELEASE_DI);
7981 def_builtin ("__builtin_ia64_bsp",
7982 build_function_type (ptr_type_node, void_list_node),
7985 def_builtin ("__builtin_ia64_flushrs",
7986 build_function_type (void_type_node, void_list_node),
7987 IA64_BUILTIN_FLUSHRS);
7989 def_builtin ("__sync_fetch_and_add_si", si_ftype_psi_si,
7990 IA64_BUILTIN_FETCH_AND_ADD_SI);
7991 def_builtin ("__sync_fetch_and_sub_si", si_ftype_psi_si,
7992 IA64_BUILTIN_FETCH_AND_SUB_SI);
7993 def_builtin ("__sync_fetch_and_or_si", si_ftype_psi_si,
7994 IA64_BUILTIN_FETCH_AND_OR_SI);
7995 def_builtin ("__sync_fetch_and_and_si", si_ftype_psi_si,
7996 IA64_BUILTIN_FETCH_AND_AND_SI);
7997 def_builtin ("__sync_fetch_and_xor_si", si_ftype_psi_si,
7998 IA64_BUILTIN_FETCH_AND_XOR_SI);
7999 def_builtin ("__sync_fetch_and_nand_si", si_ftype_psi_si,
8000 IA64_BUILTIN_FETCH_AND_NAND_SI);
8002 def_builtin ("__sync_add_and_fetch_si", si_ftype_psi_si,
8003 IA64_BUILTIN_ADD_AND_FETCH_SI);
8004 def_builtin ("__sync_sub_and_fetch_si", si_ftype_psi_si,
8005 IA64_BUILTIN_SUB_AND_FETCH_SI);
8006 def_builtin ("__sync_or_and_fetch_si", si_ftype_psi_si,
8007 IA64_BUILTIN_OR_AND_FETCH_SI);
8008 def_builtin ("__sync_and_and_fetch_si", si_ftype_psi_si,
8009 IA64_BUILTIN_AND_AND_FETCH_SI);
8010 def_builtin ("__sync_xor_and_fetch_si", si_ftype_psi_si,
8011 IA64_BUILTIN_XOR_AND_FETCH_SI);
8012 def_builtin ("__sync_nand_and_fetch_si", si_ftype_psi_si,
8013 IA64_BUILTIN_NAND_AND_FETCH_SI);
8015 def_builtin ("__sync_fetch_and_add_di", di_ftype_pdi_di,
8016 IA64_BUILTIN_FETCH_AND_ADD_DI);
8017 def_builtin ("__sync_fetch_and_sub_di", di_ftype_pdi_di,
8018 IA64_BUILTIN_FETCH_AND_SUB_DI);
8019 def_builtin ("__sync_fetch_and_or_di", di_ftype_pdi_di,
8020 IA64_BUILTIN_FETCH_AND_OR_DI);
8021 def_builtin ("__sync_fetch_and_and_di", di_ftype_pdi_di,
8022 IA64_BUILTIN_FETCH_AND_AND_DI);
8023 def_builtin ("__sync_fetch_and_xor_di", di_ftype_pdi_di,
8024 IA64_BUILTIN_FETCH_AND_XOR_DI);
8025 def_builtin ("__sync_fetch_and_nand_di", di_ftype_pdi_di,
8026 IA64_BUILTIN_FETCH_AND_NAND_DI);
8028 def_builtin ("__sync_add_and_fetch_di", di_ftype_pdi_di,
8029 IA64_BUILTIN_ADD_AND_FETCH_DI);
8030 def_builtin ("__sync_sub_and_fetch_di", di_ftype_pdi_di,
8031 IA64_BUILTIN_SUB_AND_FETCH_DI);
8032 def_builtin ("__sync_or_and_fetch_di", di_ftype_pdi_di,
8033 IA64_BUILTIN_OR_AND_FETCH_DI);
8034 def_builtin ("__sync_and_and_fetch_di", di_ftype_pdi_di,
8035 IA64_BUILTIN_AND_AND_FETCH_DI);
8036 def_builtin ("__sync_xor_and_fetch_di", di_ftype_pdi_di,
8037 IA64_BUILTIN_XOR_AND_FETCH_DI);
8038 def_builtin ("__sync_nand_and_fetch_di", di_ftype_pdi_di,
8039 IA64_BUILTIN_NAND_AND_FETCH_DI);
8044 /* Expand fetch_and_op intrinsics. The basic code sequence is:
8052 cmpxchgsz.acq tmp = [ptr], tmp
8053 } while (tmp != ret)
8057 ia64_expand_fetch_and_op (optab binoptab, enum machine_mode mode,
8058 tree arglist, rtx target)
8060 rtx ret, label, tmp, ccv, insn, mem, value;
8063 arg0 = TREE_VALUE (arglist);
8064 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8065 mem = expand_expr (arg0, NULL_RTX, Pmode, 0);
8066 #ifdef POINTERS_EXTEND_UNSIGNED
8067 if (GET_MODE(mem) != Pmode)
8068 mem = convert_memory_address (Pmode, mem);
8070 value = expand_expr (arg1, NULL_RTX, mode, 0);
8072 mem = gen_rtx_MEM (mode, force_reg (Pmode, mem));
8073 MEM_VOLATILE_P (mem) = 1;
8075 if (target && register_operand (target, mode))
8078 ret = gen_reg_rtx (mode);
8080 emit_insn (gen_mf ());
8082 /* Special case for fetchadd instructions. */
8083 if (binoptab == add_optab && fetchadd_operand (value, VOIDmode))
8086 insn = gen_fetchadd_acq_si (ret, mem, value);
8088 insn = gen_fetchadd_acq_di (ret, mem, value);
8093 tmp = gen_reg_rtx (mode);
8094 /* ar.ccv must always be loaded with a zero-extended DImode value. */
8095 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
8096 emit_move_insn (tmp, mem);
8098 label = gen_label_rtx ();
8100 emit_move_insn (ret, tmp);
8101 convert_move (ccv, tmp, /*unsignedp=*/1);
8103 /* Perform the specific operation. Special case NAND by noticing
8104 one_cmpl_optab instead. */
8105 if (binoptab == one_cmpl_optab)
8107 tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN);
8108 binoptab = and_optab;
8110 tmp = expand_binop (mode, binoptab, tmp, value, tmp, 1, OPTAB_WIDEN);
8113 insn = gen_cmpxchg_acq_si (tmp, mem, tmp, ccv);
8115 insn = gen_cmpxchg_acq_di (tmp, mem, tmp, ccv);
8118 emit_cmp_and_jump_insns (tmp, ret, NE, 0, mode, 1, label);
8123 /* Expand op_and_fetch intrinsics. The basic code sequence is:
8130 ret = tmp <op> value;
8131 cmpxchgsz.acq tmp = [ptr], ret
8132 } while (tmp != old)
8136 ia64_expand_op_and_fetch (optab binoptab, enum machine_mode mode,
8137 tree arglist, rtx target)
8139 rtx old, label, tmp, ret, ccv, insn, mem, value;
8142 arg0 = TREE_VALUE (arglist);
8143 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8144 mem = expand_expr (arg0, NULL_RTX, Pmode, 0);
8145 #ifdef POINTERS_EXTEND_UNSIGNED
8146 if (GET_MODE(mem) != Pmode)
8147 mem = convert_memory_address (Pmode, mem);
8150 value = expand_expr (arg1, NULL_RTX, mode, 0);
8152 mem = gen_rtx_MEM (mode, force_reg (Pmode, mem));
8153 MEM_VOLATILE_P (mem) = 1;
8155 if (target && ! register_operand (target, mode))
8158 emit_insn (gen_mf ());
8159 tmp = gen_reg_rtx (mode);
8160 old = gen_reg_rtx (mode);
8161 /* ar.ccv must always be loaded with a zero-extended DImode value. */
8162 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
8164 emit_move_insn (tmp, mem);
8166 label = gen_label_rtx ();
8168 emit_move_insn (old, tmp);
8169 convert_move (ccv, tmp, /*unsignedp=*/1);
8171 /* Perform the specific operation. Special case NAND by noticing
8172 one_cmpl_optab instead. */
8173 if (binoptab == one_cmpl_optab)
8175 tmp = expand_unop (mode, binoptab, tmp, NULL, OPTAB_WIDEN);
8176 binoptab = and_optab;
8178 ret = expand_binop (mode, binoptab, tmp, value, target, 1, OPTAB_WIDEN);
8181 insn = gen_cmpxchg_acq_si (tmp, mem, ret, ccv);
8183 insn = gen_cmpxchg_acq_di (tmp, mem, ret, ccv);
8186 emit_cmp_and_jump_insns (tmp, old, NE, 0, mode, 1, label);
8191 /* Expand val_ and bool_compare_and_swap. For val_ we want:
8195 cmpxchgsz.acq ret = [ptr], newval, ar.ccv
8198 For bool_ it's the same except return ret == oldval.
8202 ia64_expand_compare_and_swap (enum machine_mode rmode, enum machine_mode mode,
8203 int boolp, tree arglist, rtx target)
8205 tree arg0, arg1, arg2;
8206 rtx mem, old, new, ccv, tmp, insn;
8208 arg0 = TREE_VALUE (arglist);
8209 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8210 arg2 = TREE_VALUE (TREE_CHAIN (TREE_CHAIN (arglist)));
8211 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
8212 old = expand_expr (arg1, NULL_RTX, mode, 0);
8213 new = expand_expr (arg2, NULL_RTX, mode, 0);
8215 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
8216 MEM_VOLATILE_P (mem) = 1;
8218 if (GET_MODE (old) != mode)
8219 old = convert_to_mode (mode, old, /*unsignedp=*/1);
8220 if (GET_MODE (new) != mode)
8221 new = convert_to_mode (mode, new, /*unsignedp=*/1);
8223 if (! register_operand (old, mode))
8224 old = copy_to_mode_reg (mode, old);
8225 if (! register_operand (new, mode))
8226 new = copy_to_mode_reg (mode, new);
8228 if (! boolp && target && register_operand (target, mode))
8231 tmp = gen_reg_rtx (mode);
8233 ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
8234 convert_move (ccv, old, /*unsignedp=*/1);
8235 emit_insn (gen_mf ());
8237 insn = gen_cmpxchg_acq_si (tmp, mem, new, ccv);
8239 insn = gen_cmpxchg_acq_di (tmp, mem, new, ccv);
8245 target = gen_reg_rtx (rmode);
8246 return emit_store_flag_force (target, EQ, tmp, old, mode, 1, 1);
8252 /* Expand lock_test_and_set. I.e. `xchgsz ret = [ptr], new'. */
8255 ia64_expand_lock_test_and_set (enum machine_mode mode, tree arglist,
8259 rtx mem, new, ret, insn;
8261 arg0 = TREE_VALUE (arglist);
8262 arg1 = TREE_VALUE (TREE_CHAIN (arglist));
8263 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
8264 new = expand_expr (arg1, NULL_RTX, mode, 0);
8266 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
8267 MEM_VOLATILE_P (mem) = 1;
8268 if (! register_operand (new, mode))
8269 new = copy_to_mode_reg (mode, new);
8271 if (target && register_operand (target, mode))
8274 ret = gen_reg_rtx (mode);
8277 insn = gen_xchgsi (ret, mem, new);
8279 insn = gen_xchgdi (ret, mem, new);
8285 /* Expand lock_release. I.e. `stsz.rel [ptr] = r0'. */
8288 ia64_expand_lock_release (enum machine_mode mode, tree arglist,
8289 rtx target ATTRIBUTE_UNUSED)
8294 arg0 = TREE_VALUE (arglist);
8295 mem = expand_expr (arg0, NULL_RTX, ptr_mode, 0);
8297 mem = gen_rtx_MEM (mode, force_reg (ptr_mode, mem));
8298 MEM_VOLATILE_P (mem) = 1;
8300 emit_move_insn (mem, const0_rtx);
8306 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
8307 enum machine_mode mode ATTRIBUTE_UNUSED,
8308 int ignore ATTRIBUTE_UNUSED)
8310 tree fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
8311 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
8312 tree arglist = TREE_OPERAND (exp, 1);
8313 enum machine_mode rmode = VOIDmode;
8317 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI:
8318 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI:
8323 case IA64_BUILTIN_LOCK_TEST_AND_SET_SI:
8324 case IA64_BUILTIN_LOCK_RELEASE_SI:
8325 case IA64_BUILTIN_FETCH_AND_ADD_SI:
8326 case IA64_BUILTIN_FETCH_AND_SUB_SI:
8327 case IA64_BUILTIN_FETCH_AND_OR_SI:
8328 case IA64_BUILTIN_FETCH_AND_AND_SI:
8329 case IA64_BUILTIN_FETCH_AND_XOR_SI:
8330 case IA64_BUILTIN_FETCH_AND_NAND_SI:
8331 case IA64_BUILTIN_ADD_AND_FETCH_SI:
8332 case IA64_BUILTIN_SUB_AND_FETCH_SI:
8333 case IA64_BUILTIN_OR_AND_FETCH_SI:
8334 case IA64_BUILTIN_AND_AND_FETCH_SI:
8335 case IA64_BUILTIN_XOR_AND_FETCH_SI:
8336 case IA64_BUILTIN_NAND_AND_FETCH_SI:
8340 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI:
8345 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI:
8350 case IA64_BUILTIN_LOCK_TEST_AND_SET_DI:
8351 case IA64_BUILTIN_LOCK_RELEASE_DI:
8352 case IA64_BUILTIN_FETCH_AND_ADD_DI:
8353 case IA64_BUILTIN_FETCH_AND_SUB_DI:
8354 case IA64_BUILTIN_FETCH_AND_OR_DI:
8355 case IA64_BUILTIN_FETCH_AND_AND_DI:
8356 case IA64_BUILTIN_FETCH_AND_XOR_DI:
8357 case IA64_BUILTIN_FETCH_AND_NAND_DI:
8358 case IA64_BUILTIN_ADD_AND_FETCH_DI:
8359 case IA64_BUILTIN_SUB_AND_FETCH_DI:
8360 case IA64_BUILTIN_OR_AND_FETCH_DI:
8361 case IA64_BUILTIN_AND_AND_FETCH_DI:
8362 case IA64_BUILTIN_XOR_AND_FETCH_DI:
8363 case IA64_BUILTIN_NAND_AND_FETCH_DI:
8373 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_SI:
8374 case IA64_BUILTIN_BOOL_COMPARE_AND_SWAP_DI:
8375 return ia64_expand_compare_and_swap (rmode, mode, 1, arglist,
8378 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_SI:
8379 case IA64_BUILTIN_VAL_COMPARE_AND_SWAP_DI:
8380 return ia64_expand_compare_and_swap (rmode, mode, 0, arglist,
8383 case IA64_BUILTIN_SYNCHRONIZE:
8384 emit_insn (gen_mf ());
8387 case IA64_BUILTIN_LOCK_TEST_AND_SET_SI:
8388 case IA64_BUILTIN_LOCK_TEST_AND_SET_DI:
8389 return ia64_expand_lock_test_and_set (mode, arglist, target);
8391 case IA64_BUILTIN_LOCK_RELEASE_SI:
8392 case IA64_BUILTIN_LOCK_RELEASE_DI:
8393 return ia64_expand_lock_release (mode, arglist, target);
8395 case IA64_BUILTIN_BSP:
8396 if (! target || ! register_operand (target, DImode))
8397 target = gen_reg_rtx (DImode);
8398 emit_insn (gen_bsp_value (target));
8399 #ifdef POINTERS_EXTEND_UNSIGNED
8400 target = convert_memory_address (ptr_mode, target);
8404 case IA64_BUILTIN_FLUSHRS:
8405 emit_insn (gen_flushrs ());
8408 case IA64_BUILTIN_FETCH_AND_ADD_SI:
8409 case IA64_BUILTIN_FETCH_AND_ADD_DI:
8410 return ia64_expand_fetch_and_op (add_optab, mode, arglist, target);
8412 case IA64_BUILTIN_FETCH_AND_SUB_SI:
8413 case IA64_BUILTIN_FETCH_AND_SUB_DI:
8414 return ia64_expand_fetch_and_op (sub_optab, mode, arglist, target);
8416 case IA64_BUILTIN_FETCH_AND_OR_SI:
8417 case IA64_BUILTIN_FETCH_AND_OR_DI:
8418 return ia64_expand_fetch_and_op (ior_optab, mode, arglist, target);
8420 case IA64_BUILTIN_FETCH_AND_AND_SI:
8421 case IA64_BUILTIN_FETCH_AND_AND_DI:
8422 return ia64_expand_fetch_and_op (and_optab, mode, arglist, target);
8424 case IA64_BUILTIN_FETCH_AND_XOR_SI:
8425 case IA64_BUILTIN_FETCH_AND_XOR_DI:
8426 return ia64_expand_fetch_and_op (xor_optab, mode, arglist, target);
8428 case IA64_BUILTIN_FETCH_AND_NAND_SI:
8429 case IA64_BUILTIN_FETCH_AND_NAND_DI:
8430 return ia64_expand_fetch_and_op (one_cmpl_optab, mode, arglist, target);
8432 case IA64_BUILTIN_ADD_AND_FETCH_SI:
8433 case IA64_BUILTIN_ADD_AND_FETCH_DI:
8434 return ia64_expand_op_and_fetch (add_optab, mode, arglist, target);
8436 case IA64_BUILTIN_SUB_AND_FETCH_SI:
8437 case IA64_BUILTIN_SUB_AND_FETCH_DI:
8438 return ia64_expand_op_and_fetch (sub_optab, mode, arglist, target);
8440 case IA64_BUILTIN_OR_AND_FETCH_SI:
8441 case IA64_BUILTIN_OR_AND_FETCH_DI:
8442 return ia64_expand_op_and_fetch (ior_optab, mode, arglist, target);
8444 case IA64_BUILTIN_AND_AND_FETCH_SI:
8445 case IA64_BUILTIN_AND_AND_FETCH_DI:
8446 return ia64_expand_op_and_fetch (and_optab, mode, arglist, target);
8448 case IA64_BUILTIN_XOR_AND_FETCH_SI:
8449 case IA64_BUILTIN_XOR_AND_FETCH_DI:
8450 return ia64_expand_op_and_fetch (xor_optab, mode, arglist, target);
8452 case IA64_BUILTIN_NAND_AND_FETCH_SI:
8453 case IA64_BUILTIN_NAND_AND_FETCH_DI:
8454 return ia64_expand_op_and_fetch (one_cmpl_optab, mode, arglist, target);
8463 /* For the HP-UX IA64 aggregate parameters are passed stored in the
8464 most significant bits of the stack slot. */
8467 ia64_hpux_function_arg_padding (enum machine_mode mode, tree type)
8469 /* Exception to normal case for structures/unions/etc. */
8471 if (type && AGGREGATE_TYPE_P (type)
8472 && int_size_in_bytes (type) < UNITS_PER_WORD)
8475 /* Fall back to the default. */
8476 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
8479 /* Linked list of all external functions that are to be emitted by GCC.
8480 We output the name if and only if TREE_SYMBOL_REFERENCED is set in
8481 order to avoid putting out names that are never really used. */
8483 struct extern_func_list GTY(())
8485 struct extern_func_list *next;
8489 static GTY(()) struct extern_func_list *extern_func_head;
8492 ia64_hpux_add_extern_decl (tree decl)
8494 struct extern_func_list *p = ggc_alloc (sizeof (struct extern_func_list));
8497 p->next = extern_func_head;
8498 extern_func_head = p;
8501 /* Print out the list of used global functions. */
8504 ia64_hpux_file_end (void)
8506 struct extern_func_list *p;
8508 for (p = extern_func_head; p; p = p->next)
8510 tree decl = p->decl;
8511 tree id = DECL_ASSEMBLER_NAME (decl);
8516 if (!TREE_ASM_WRITTEN (decl) && TREE_SYMBOL_REFERENCED (id))
8518 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
8520 TREE_ASM_WRITTEN (decl) = 1;
8521 (*targetm.asm_out.globalize_label) (asm_out_file, name);
8522 fputs (TYPE_ASM_OP, asm_out_file);
8523 assemble_name (asm_out_file, name);
8524 fprintf (asm_out_file, "," TYPE_OPERAND_FMT "\n", "function");
8528 extern_func_head = 0;
8531 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
8532 modes of word_mode and larger. Rename the TFmode libfuncs using the
8533 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
8534 backward compatibility. */
8537 ia64_init_libfuncs (void)
8539 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
8540 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
8541 set_optab_libfunc (smod_optab, SImode, "__modsi3");
8542 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
8544 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
8545 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
8546 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
8547 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
8548 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
8550 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
8551 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
8552 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
8553 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
8554 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
8555 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
8557 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
8558 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
8559 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
8560 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
8562 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
8563 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
8566 /* Rename all the TFmode libfuncs using the HPUX conventions. */
8569 ia64_hpux_init_libfuncs (void)
8571 ia64_init_libfuncs ();
8573 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
8574 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
8575 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
8577 /* ia64_expand_compare uses this. */
8578 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
8580 /* These should never be used. */
8581 set_optab_libfunc (eq_optab, TFmode, 0);
8582 set_optab_libfunc (ne_optab, TFmode, 0);
8583 set_optab_libfunc (gt_optab, TFmode, 0);
8584 set_optab_libfunc (ge_optab, TFmode, 0);
8585 set_optab_libfunc (lt_optab, TFmode, 0);
8586 set_optab_libfunc (le_optab, TFmode, 0);
8589 /* Rename the division and modulus functions in VMS. */
8592 ia64_vms_init_libfuncs (void)
8594 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
8595 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
8596 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
8597 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
8598 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
8599 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
8600 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
8601 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
8604 /* Rename the TFmode libfuncs available from soft-fp in glibc using
8605 the HPUX conventions. */
8608 ia64_sysv4_init_libfuncs (void)
8610 ia64_init_libfuncs ();
8612 /* These functions are not part of the HPUX TFmode interface. We
8613 use them instead of _U_Qfcmp, which doesn't work the way we
8615 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
8616 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
8617 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
8618 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
8619 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
8620 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
8622 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
8623 glibc doesn't have them. */
8626 /* Switch to the section to which we should output X. The only thing
8627 special we do here is to honor small data. */
8630 ia64_select_rtx_section (enum machine_mode mode, rtx x,
8631 unsigned HOST_WIDE_INT align)
8633 if (GET_MODE_SIZE (mode) > 0
8634 && GET_MODE_SIZE (mode) <= ia64_section_threshold)
8637 default_elf_select_rtx_section (mode, x, align);
8640 /* It is illegal to have relocations in shared segments on AIX and HPUX.
8641 Pretend flag_pic is always set. */
8644 ia64_rwreloc_select_section (tree exp, int reloc, unsigned HOST_WIDE_INT align)
8646 default_elf_select_section_1 (exp, reloc, align, true);
8650 ia64_rwreloc_unique_section (tree decl, int reloc)
8652 default_unique_section_1 (decl, reloc, true);
8656 ia64_rwreloc_select_rtx_section (enum machine_mode mode, rtx x,
8657 unsigned HOST_WIDE_INT align)
8659 int save_pic = flag_pic;
8661 ia64_select_rtx_section (mode, x, align);
8662 flag_pic = save_pic;
8665 #ifndef TARGET_RWRELOC
8666 #define TARGET_RWRELOC flag_pic
8670 ia64_section_type_flags (tree decl, const char *name, int reloc)
8672 unsigned int flags = 0;
8674 if (strcmp (name, ".sdata") == 0
8675 || strncmp (name, ".sdata.", 7) == 0
8676 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
8677 || strncmp (name, ".sdata2.", 8) == 0
8678 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
8679 || strcmp (name, ".sbss") == 0
8680 || strncmp (name, ".sbss.", 6) == 0
8681 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
8682 flags = SECTION_SMALL;
8684 flags |= default_section_type_flags_1 (decl, name, reloc, TARGET_RWRELOC);
8688 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
8689 structure type and that the address of that type should be passed
8690 in out0, rather than in r8. */
8693 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
8695 tree ret_type = TREE_TYPE (fntype);
8697 /* The Itanium C++ ABI requires that out0, rather than r8, be used
8698 as the structure return address parameter, if the return value
8699 type has a non-trivial copy constructor or destructor. It is not
8700 clear if this same convention should be used for other
8701 programming languages. Until G++ 3.4, we incorrectly used r8 for
8702 these return values. */
8703 return (abi_version_at_least (2)
8705 && TYPE_MODE (ret_type) == BLKmode
8706 && TREE_ADDRESSABLE (ret_type)
8707 && strcmp (lang_hooks.name, "GNU C++") == 0);
8710 /* Output the assembler code for a thunk function. THUNK_DECL is the
8711 declaration for the thunk function itself, FUNCTION is the decl for
8712 the target function. DELTA is an immediate constant offset to be
8713 added to THIS. If VCALL_OFFSET is nonzero, the word at
8714 *(*this + vcall_offset) should be added to THIS. */
8717 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
8718 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8721 rtx this, insn, funexp;
8722 unsigned int this_parmno;
8723 unsigned int this_regno;
8725 reload_completed = 1;
8726 epilogue_completed = 1;
8728 reset_block_changes ();
8730 /* Set things up as ia64_expand_prologue might. */
8731 last_scratch_gr_reg = 15;
8733 memset (¤t_frame_info, 0, sizeof (current_frame_info));
8734 current_frame_info.spill_cfa_off = -16;
8735 current_frame_info.n_input_regs = 1;
8736 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
8738 /* Mark the end of the (empty) prologue. */
8739 emit_note (NOTE_INSN_PROLOGUE_END);
8741 /* Figure out whether "this" will be the first parameter (the
8742 typical case) or the second parameter (as happens when the
8743 virtual function returns certain class objects). */
8745 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
8747 this_regno = IN_REG (this_parmno);
8748 if (!TARGET_REG_NAMES)
8749 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
8751 this = gen_rtx_REG (Pmode, this_regno);
8754 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
8755 REG_POINTER (tmp) = 1;
8756 if (delta && CONST_OK_FOR_I (delta))
8758 emit_insn (gen_ptr_extend_plus_imm (this, tmp, GEN_INT (delta)));
8762 emit_insn (gen_ptr_extend (this, tmp));
8765 /* Apply the constant offset, if required. */
8768 rtx delta_rtx = GEN_INT (delta);
8770 if (!CONST_OK_FOR_I (delta))
8772 rtx tmp = gen_rtx_REG (Pmode, 2);
8773 emit_move_insn (tmp, delta_rtx);
8776 emit_insn (gen_adddi3 (this, this, delta_rtx));
8779 /* Apply the offset from the vtable, if required. */
8782 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
8783 rtx tmp = gen_rtx_REG (Pmode, 2);
8787 rtx t = gen_rtx_REG (ptr_mode, 2);
8788 REG_POINTER (t) = 1;
8789 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this));
8790 if (CONST_OK_FOR_I (vcall_offset))
8792 emit_insn (gen_ptr_extend_plus_imm (tmp, t,
8797 emit_insn (gen_ptr_extend (tmp, t));
8800 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this));
8804 if (!CONST_OK_FOR_J (vcall_offset))
8806 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
8807 emit_move_insn (tmp2, vcall_offset_rtx);
8808 vcall_offset_rtx = tmp2;
8810 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
8814 emit_move_insn (gen_rtx_REG (ptr_mode, 2),
8815 gen_rtx_MEM (ptr_mode, tmp));
8817 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
8819 emit_insn (gen_adddi3 (this, this, tmp));
8822 /* Generate a tail call to the target function. */
8823 if (! TREE_USED (function))
8825 assemble_external (function);
8826 TREE_USED (function) = 1;
8828 funexp = XEXP (DECL_RTL (function), 0);
8829 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
8830 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
8831 insn = get_last_insn ();
8832 SIBLING_CALL_P (insn) = 1;
8834 /* Code generation for calls relies on splitting. */
8835 reload_completed = 1;
8836 epilogue_completed = 1;
8837 try_split (PATTERN (insn), insn, 0);
8841 /* Run just enough of rest_of_compilation to get the insns emitted.
8842 There's not really enough bulk here to make other passes such as
8843 instruction scheduling worth while. Note that use_thunk calls
8844 assemble_start_function and assemble_end_function. */
8846 insn_locators_initialize ();
8847 emit_all_insn_group_barriers (NULL);
8848 insn = get_insns ();
8849 shorten_branches (insn);
8850 final_start_function (insn, file, 1);
8851 final (insn, file, 1, 0);
8852 final_end_function ();
8854 reload_completed = 0;
8855 epilogue_completed = 0;
8859 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
8862 ia64_struct_value_rtx (tree fntype,
8863 int incoming ATTRIBUTE_UNUSED)
8865 if (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype))
8867 return gen_rtx_REG (Pmode, GR_REG (8));
8871 ia64_scalar_mode_supported_p (enum machine_mode mode)
8896 ia64_vector_mode_supported_p (enum machine_mode mode)
8913 #include "gt-ia64.h"