1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to
21 the Free Software Foundation, 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
44 #include "basic-block.h"
46 #include "sched-int.h"
49 #include "target-def.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
56 /* This is used for communication between ASM_OUTPUT_LABEL and
57 ASM_OUTPUT_LABELREF. */
58 int ia64_asm_output_label = 0;
60 /* Define the information needed to generate branch and scc insns. This is
61 stored from the compare operation. */
62 struct rtx_def * ia64_compare_op0;
63 struct rtx_def * ia64_compare_op1;
65 /* Register names for ia64_expand_prologue. */
66 static const char * const ia64_reg_numbers[96] =
67 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
68 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
69 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
70 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
71 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
72 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
73 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
74 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
75 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
76 "r104","r105","r106","r107","r108","r109","r110","r111",
77 "r112","r113","r114","r115","r116","r117","r118","r119",
78 "r120","r121","r122","r123","r124","r125","r126","r127"};
80 /* ??? These strings could be shared with REGISTER_NAMES. */
81 static const char * const ia64_input_reg_names[8] =
82 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
84 /* ??? These strings could be shared with REGISTER_NAMES. */
85 static const char * const ia64_local_reg_names[80] =
86 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
87 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
88 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
89 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
90 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
91 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
92 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
93 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
94 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
95 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
97 /* ??? These strings could be shared with REGISTER_NAMES. */
98 static const char * const ia64_output_reg_names[8] =
99 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
101 /* String used with the -mfixed-range= option. */
102 const char *ia64_fixed_range_string;
104 /* Determines whether we use adds, addl, or movl to generate our
105 TLS immediate offsets. */
106 int ia64_tls_size = 22;
108 /* String used with the -mtls-size= option. */
109 const char *ia64_tls_size_string;
111 /* Which cpu are we scheduling for. */
112 enum processor_type ia64_tune;
114 /* String used with the -tune= option. */
115 const char *ia64_tune_string;
117 /* Determines whether we run our final scheduling pass or not. We always
118 avoid the normal second scheduling pass. */
119 static int ia64_flag_schedule_insns2;
121 /* Determines whether we run variable tracking in machine dependent
123 static int ia64_flag_var_tracking;
125 /* Variables which are this size or smaller are put in the sdata/sbss
128 unsigned int ia64_section_threshold;
130 /* The following variable is used by the DFA insn scheduler. The value is
131 TRUE if we do insn bundling instead of insn scheduling. */
134 /* Structure to be filled in by ia64_compute_frame_size with register
135 save masks and offsets for the current function. */
137 struct ia64_frame_info
139 HOST_WIDE_INT total_size; /* size of the stack frame, not including
140 the caller's scratch area. */
141 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
142 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
143 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
144 HARD_REG_SET mask; /* mask of saved registers. */
145 unsigned int gr_used_mask; /* mask of registers in use as gr spill
146 registers or long-term scratches. */
147 int n_spilled; /* number of spilled registers. */
148 int reg_fp; /* register for fp. */
149 int reg_save_b0; /* save register for b0. */
150 int reg_save_pr; /* save register for prs. */
151 int reg_save_ar_pfs; /* save register for ar.pfs. */
152 int reg_save_ar_unat; /* save register for ar.unat. */
153 int reg_save_ar_lc; /* save register for ar.lc. */
154 int reg_save_gp; /* save register for gp. */
155 int n_input_regs; /* number of input registers used. */
156 int n_local_regs; /* number of local registers used. */
157 int n_output_regs; /* number of output registers used. */
158 int n_rotate_regs; /* number of rotating registers used. */
160 char need_regstk; /* true if a .regstk directive needed. */
161 char initialized; /* true if the data is finalized. */
164 /* Current frame information calculated by ia64_compute_frame_size. */
165 static struct ia64_frame_info current_frame_info;
167 static int ia64_first_cycle_multipass_dfa_lookahead (void);
168 static void ia64_dependencies_evaluation_hook (rtx, rtx);
169 static void ia64_init_dfa_pre_cycle_insn (void);
170 static rtx ia64_dfa_pre_cycle_insn (void);
171 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
172 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
173 static rtx gen_tls_get_addr (void);
174 static rtx gen_thread_pointer (void);
175 static rtx ia64_expand_tls_address (enum tls_model, rtx, rtx);
176 static int find_gr_spill (int);
177 static int next_scratch_gr_reg (void);
178 static void mark_reg_gr_used_mask (rtx, void *);
179 static void ia64_compute_frame_size (HOST_WIDE_INT);
180 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
181 static void finish_spill_pointers (void);
182 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
183 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
184 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
185 static rtx gen_movdi_x (rtx, rtx, rtx);
186 static rtx gen_fr_spill_x (rtx, rtx, rtx);
187 static rtx gen_fr_restore_x (rtx, rtx, rtx);
189 static enum machine_mode hfa_element_mode (tree, bool);
190 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
192 static bool ia64_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
194 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
196 static bool ia64_function_ok_for_sibcall (tree, tree);
197 static bool ia64_return_in_memory (tree, tree);
198 static bool ia64_rtx_costs (rtx, int, int, int *);
199 static void fix_range (const char *);
200 static struct machine_function * ia64_init_machine_status (void);
201 static void emit_insn_group_barriers (FILE *);
202 static void emit_all_insn_group_barriers (FILE *);
203 static void final_emit_insn_group_barriers (FILE *);
204 static void emit_predicate_relation_info (void);
205 static void ia64_reorg (void);
206 static bool ia64_in_small_data_p (tree);
207 static void process_epilogue (void);
208 static int process_set (FILE *, rtx);
210 static rtx ia64_expand_fetch_and_op (optab, enum machine_mode, tree, rtx);
211 static rtx ia64_expand_op_and_fetch (optab, enum machine_mode, tree, rtx);
212 static rtx ia64_expand_compare_and_swap (enum machine_mode, enum machine_mode,
214 static rtx ia64_expand_lock_test_and_set (enum machine_mode, tree, rtx);
215 static rtx ia64_expand_lock_release (enum machine_mode, tree, rtx);
216 static bool ia64_assemble_integer (rtx, unsigned int, int);
217 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
218 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
219 static void ia64_output_function_end_prologue (FILE *);
221 static int ia64_issue_rate (void);
222 static int ia64_adjust_cost (rtx, rtx, rtx, int);
223 static void ia64_sched_init (FILE *, int, int);
224 static void ia64_sched_finish (FILE *, int);
225 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
226 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
227 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
228 static int ia64_variable_issue (FILE *, int, rtx, int);
230 static struct bundle_state *get_free_bundle_state (void);
231 static void free_bundle_state (struct bundle_state *);
232 static void initiate_bundle_states (void);
233 static void finish_bundle_states (void);
234 static unsigned bundle_state_hash (const void *);
235 static int bundle_state_eq_p (const void *, const void *);
236 static int insert_bundle_state (struct bundle_state *);
237 static void initiate_bundle_state_table (void);
238 static void finish_bundle_state_table (void);
239 static int try_issue_nops (struct bundle_state *, int);
240 static int try_issue_insn (struct bundle_state *, rtx);
241 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
242 static int get_max_pos (state_t);
243 static int get_template (state_t, int);
245 static rtx get_next_important_insn (rtx, rtx);
246 static void bundling (FILE *, int, rtx, rtx);
248 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
249 HOST_WIDE_INT, tree);
250 static void ia64_file_start (void);
252 static void ia64_select_rtx_section (enum machine_mode, rtx,
253 unsigned HOST_WIDE_INT);
254 static void ia64_rwreloc_select_section (tree, int, unsigned HOST_WIDE_INT)
256 static void ia64_rwreloc_unique_section (tree, int)
258 static void ia64_rwreloc_select_rtx_section (enum machine_mode, rtx,
259 unsigned HOST_WIDE_INT)
261 static unsigned int ia64_section_type_flags (tree, const char *, int);
262 static void ia64_hpux_add_extern_decl (tree decl)
264 static void ia64_hpux_file_end (void)
266 static void ia64_init_libfuncs (void)
268 static void ia64_hpux_init_libfuncs (void)
270 static void ia64_sysv4_init_libfuncs (void)
272 static void ia64_vms_init_libfuncs (void)
275 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
276 static void ia64_encode_section_info (tree, rtx, int);
277 static rtx ia64_struct_value_rtx (tree, int);
278 static tree ia64_gimplify_va_arg (tree, tree, tree *, tree *);
279 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
280 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
283 /* Table of valid machine attributes. */
284 static const struct attribute_spec ia64_attribute_table[] =
286 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
287 { "syscall_linkage", 0, 0, false, true, true, NULL },
288 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
289 { NULL, 0, 0, false, false, false, NULL }
292 /* Initialize the GCC target structure. */
293 #undef TARGET_ATTRIBUTE_TABLE
294 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
296 #undef TARGET_INIT_BUILTINS
297 #define TARGET_INIT_BUILTINS ia64_init_builtins
299 #undef TARGET_EXPAND_BUILTIN
300 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
302 #undef TARGET_ASM_BYTE_OP
303 #define TARGET_ASM_BYTE_OP "\tdata1\t"
304 #undef TARGET_ASM_ALIGNED_HI_OP
305 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
306 #undef TARGET_ASM_ALIGNED_SI_OP
307 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
308 #undef TARGET_ASM_ALIGNED_DI_OP
309 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
310 #undef TARGET_ASM_UNALIGNED_HI_OP
311 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
312 #undef TARGET_ASM_UNALIGNED_SI_OP
313 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
314 #undef TARGET_ASM_UNALIGNED_DI_OP
315 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
316 #undef TARGET_ASM_INTEGER
317 #define TARGET_ASM_INTEGER ia64_assemble_integer
319 #undef TARGET_ASM_FUNCTION_PROLOGUE
320 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
321 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
322 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
323 #undef TARGET_ASM_FUNCTION_EPILOGUE
324 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
326 #undef TARGET_IN_SMALL_DATA_P
327 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
329 #undef TARGET_SCHED_ADJUST_COST
330 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
331 #undef TARGET_SCHED_ISSUE_RATE
332 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
333 #undef TARGET_SCHED_VARIABLE_ISSUE
334 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
335 #undef TARGET_SCHED_INIT
336 #define TARGET_SCHED_INIT ia64_sched_init
337 #undef TARGET_SCHED_FINISH
338 #define TARGET_SCHED_FINISH ia64_sched_finish
339 #undef TARGET_SCHED_REORDER
340 #define TARGET_SCHED_REORDER ia64_sched_reorder
341 #undef TARGET_SCHED_REORDER2
342 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
344 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
345 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
347 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
348 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
350 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
351 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
352 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
353 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
355 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
356 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
357 ia64_first_cycle_multipass_dfa_lookahead_guard
359 #undef TARGET_SCHED_DFA_NEW_CYCLE
360 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
362 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
363 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
364 #undef TARGET_PASS_BY_REFERENCE
365 #define TARGET_PASS_BY_REFERENCE ia64_pass_by_reference
366 #undef TARGET_ARG_PARTIAL_BYTES
367 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
369 #undef TARGET_ASM_OUTPUT_MI_THUNK
370 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
371 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
372 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
374 #undef TARGET_ASM_FILE_START
375 #define TARGET_ASM_FILE_START ia64_file_start
377 #undef TARGET_RTX_COSTS
378 #define TARGET_RTX_COSTS ia64_rtx_costs
379 #undef TARGET_ADDRESS_COST
380 #define TARGET_ADDRESS_COST hook_int_rtx_0
382 #undef TARGET_MACHINE_DEPENDENT_REORG
383 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
385 #undef TARGET_ENCODE_SECTION_INFO
386 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
388 #undef TARGET_SECTION_TYPE_FLAGS
389 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
391 /* ??? ABI doesn't allow us to define this. */
393 #undef TARGET_PROMOTE_FUNCTION_ARGS
394 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
397 /* ??? ABI doesn't allow us to define this. */
399 #undef TARGET_PROMOTE_FUNCTION_RETURN
400 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
403 /* ??? Investigate. */
405 #undef TARGET_PROMOTE_PROTOTYPES
406 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
409 #undef TARGET_STRUCT_VALUE_RTX
410 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
411 #undef TARGET_RETURN_IN_MEMORY
412 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
413 #undef TARGET_SETUP_INCOMING_VARARGS
414 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
415 #undef TARGET_STRICT_ARGUMENT_NAMING
416 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
417 #undef TARGET_MUST_PASS_IN_STACK
418 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
420 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
421 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
423 #undef TARGET_UNWIND_EMIT
424 #define TARGET_UNWIND_EMIT process_for_unwind_directive
426 #undef TARGET_SCALAR_MODE_SUPPORTED_P
427 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
428 #undef TARGET_VECTOR_MODE_SUPPORTED_P
429 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
431 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
432 in an order different from the specified program order. */
433 #undef TARGET_RELAXED_ORDERING
434 #define TARGET_RELAXED_ORDERING true
436 struct gcc_target targetm = TARGET_INITIALIZER;
440 ADDR_AREA_NORMAL, /* normal address area */
441 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
445 static GTY(()) tree small_ident1;
446 static GTY(()) tree small_ident2;
451 if (small_ident1 == 0)
453 small_ident1 = get_identifier ("small");
454 small_ident2 = get_identifier ("__small__");
458 /* Retrieve the address area that has been chosen for the given decl. */
460 static ia64_addr_area
461 ia64_get_addr_area (tree decl)
465 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
471 id = TREE_VALUE (TREE_VALUE (model_attr));
472 if (id == small_ident1 || id == small_ident2)
473 return ADDR_AREA_SMALL;
475 return ADDR_AREA_NORMAL;
479 ia64_handle_model_attribute (tree *node, tree name, tree args,
480 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
482 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
484 tree arg, decl = *node;
487 arg = TREE_VALUE (args);
488 if (arg == small_ident1 || arg == small_ident2)
490 addr_area = ADDR_AREA_SMALL;
494 warning ("invalid argument of %qs attribute",
495 IDENTIFIER_POINTER (name));
496 *no_add_attrs = true;
499 switch (TREE_CODE (decl))
502 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
504 && !TREE_STATIC (decl))
506 error ("%Jan address area attribute cannot be specified for "
507 "local variables", decl, decl);
508 *no_add_attrs = true;
510 area = ia64_get_addr_area (decl);
511 if (area != ADDR_AREA_NORMAL && addr_area != area)
513 error ("%Jaddress area of '%s' conflicts with previous "
514 "declaration", decl, decl);
515 *no_add_attrs = true;
520 error ("%Jaddress area attribute cannot be specified for functions",
522 *no_add_attrs = true;
526 warning ("%qs attribute ignored", IDENTIFIER_POINTER (name));
527 *no_add_attrs = true;
535 ia64_encode_addr_area (tree decl, rtx symbol)
539 flags = SYMBOL_REF_FLAGS (symbol);
540 switch (ia64_get_addr_area (decl))
542 case ADDR_AREA_NORMAL: break;
543 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
546 SYMBOL_REF_FLAGS (symbol) = flags;
550 ia64_encode_section_info (tree decl, rtx rtl, int first)
552 default_encode_section_info (decl, rtl, first);
554 /* Careful not to prod global register variables. */
555 if (TREE_CODE (decl) == VAR_DECL
556 && GET_CODE (DECL_RTL (decl)) == MEM
557 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
558 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
559 ia64_encode_addr_area (decl, XEXP (rtl, 0));
562 /* Implement CONST_OK_FOR_LETTER_P. */
565 ia64_const_ok_for_letter_p (HOST_WIDE_INT value, char c)
570 return CONST_OK_FOR_I (value);
572 return CONST_OK_FOR_J (value);
574 return CONST_OK_FOR_K (value);
576 return CONST_OK_FOR_L (value);
578 return CONST_OK_FOR_M (value);
580 return CONST_OK_FOR_N (value);
582 return CONST_OK_FOR_O (value);
584 return CONST_OK_FOR_P (value);
590 /* Implement CONST_DOUBLE_OK_FOR_LETTER_P. */
593 ia64_const_double_ok_for_letter_p (rtx value, char c)
598 return CONST_DOUBLE_OK_FOR_G (value);
604 /* Implement EXTRA_CONSTRAINT. */
607 ia64_extra_constraint (rtx value, char c)
612 /* Non-volatile memory for FP_REG loads/stores. */
613 return memory_operand(value, VOIDmode) && !MEM_VOLATILE_P (value);
616 /* 1..4 for shladd arguments. */
617 return (GET_CODE (value) == CONST_INT
618 && INTVAL (value) >= 1 && INTVAL (value) <= 4);
621 /* Non-post-inc memory for asms and other unsavory creatures. */
622 return (GET_CODE (value) == MEM
623 && GET_RTX_CLASS (GET_CODE (XEXP (value, 0))) != RTX_AUTOINC
624 && (reload_in_progress || memory_operand (value, VOIDmode)));
627 /* Symbol ref to small-address-area. */
628 return (GET_CODE (value) == SYMBOL_REF
629 && SYMBOL_REF_SMALL_ADDR_P (value));
633 return value == CONST0_RTX (GET_MODE (value));
636 /* An integer vector, such that conversion to an integer yields a
637 value appropriate for an integer 'J' constraint. */
638 if (GET_CODE (value) == CONST_VECTOR
639 && GET_MODE_CLASS (GET_MODE (value)) == MODE_VECTOR_INT)
641 value = simplify_subreg (DImode, value, GET_MODE (value), 0);
642 return ia64_const_ok_for_letter_p (INTVAL (value), 'J');
647 /* A V2SF vector containing elements that satisfy 'G'. */
649 (GET_CODE (value) == CONST_VECTOR
650 && GET_MODE (value) == V2SFmode
651 && ia64_const_double_ok_for_letter_p (XVECEXP (value, 0, 0), 'G')
652 && ia64_const_double_ok_for_letter_p (XVECEXP (value, 0, 1), 'G'));
659 /* Return 1 if the operands of a move are ok. */
662 ia64_move_ok (rtx dst, rtx src)
664 /* If we're under init_recog_no_volatile, we'll not be able to use
665 memory_operand. So check the code directly and don't worry about
666 the validity of the underlying address, which should have been
667 checked elsewhere anyway. */
668 if (GET_CODE (dst) != MEM)
670 if (GET_CODE (src) == MEM)
672 if (register_operand (src, VOIDmode))
675 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
676 if (INTEGRAL_MODE_P (GET_MODE (dst)))
677 return src == const0_rtx;
679 return GET_CODE (src) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (src);
683 addp4_optimize_ok (rtx op1, rtx op2)
685 return (basereg_operand (op1, GET_MODE(op1)) !=
686 basereg_operand (op2, GET_MODE(op2)));
689 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
690 Return the length of the field, or <= 0 on failure. */
693 ia64_depz_field_mask (rtx rop, rtx rshift)
695 unsigned HOST_WIDE_INT op = INTVAL (rop);
696 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
698 /* Get rid of the zero bits we're shifting in. */
701 /* We must now have a solid block of 1's at bit 0. */
702 return exact_log2 (op + 1);
705 /* Expand a symbolic constant load. */
708 ia64_expand_load_address (rtx dest, rtx src)
710 if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (src))
712 if (GET_CODE (dest) != REG)
715 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
716 having to pointer-extend the value afterward. Other forms of address
717 computation below are also more natural to compute as 64-bit quantities.
718 If we've been given an SImode destination register, change it. */
719 if (GET_MODE (dest) != Pmode)
720 dest = gen_rtx_REG (Pmode, REGNO (dest));
722 if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_SMALL_ADDR_P (src))
724 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
727 else if (TARGET_AUTO_PIC)
729 emit_insn (gen_load_gprel64 (dest, src));
732 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
734 emit_insn (gen_load_fptr (dest, src));
737 else if (sdata_symbolic_operand (src, VOIDmode))
739 emit_insn (gen_load_gprel (dest, src));
743 if (GET_CODE (src) == CONST
744 && GET_CODE (XEXP (src, 0)) == PLUS
745 && GET_CODE (XEXP (XEXP (src, 0), 1)) == CONST_INT
746 && (INTVAL (XEXP (XEXP (src, 0), 1)) & 0x3fff) != 0)
748 rtx sym = XEXP (XEXP (src, 0), 0);
749 HOST_WIDE_INT ofs, hi, lo;
751 /* Split the offset into a sign extended 14-bit low part
752 and a complementary high part. */
753 ofs = INTVAL (XEXP (XEXP (src, 0), 1));
754 lo = ((ofs & 0x3fff) ^ 0x2000) - 0x2000;
757 ia64_expand_load_address (dest, plus_constant (sym, hi));
758 emit_insn (gen_adddi3 (dest, dest, GEN_INT (lo)));
764 tmp = gen_rtx_HIGH (Pmode, src);
765 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
766 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
768 tmp = gen_rtx_LO_SUM (GET_MODE (dest), dest, src);
769 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
773 static GTY(()) rtx gen_tls_tga;
775 gen_tls_get_addr (void)
778 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
782 static GTY(()) rtx thread_pointer_rtx;
784 gen_thread_pointer (void)
786 if (!thread_pointer_rtx)
787 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
788 return thread_pointer_rtx;
792 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1)
794 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
799 case TLS_MODEL_GLOBAL_DYNAMIC:
802 tga_op1 = gen_reg_rtx (Pmode);
803 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
804 tga_op1 = gen_const_mem (Pmode, tga_op1);
806 tga_op2 = gen_reg_rtx (Pmode);
807 emit_insn (gen_load_ltoff_dtprel (tga_op2, op1));
808 tga_op2 = gen_const_mem (Pmode, tga_op2);
810 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
811 LCT_CONST, Pmode, 2, tga_op1,
812 Pmode, tga_op2, Pmode);
814 insns = get_insns ();
817 if (GET_MODE (op0) != Pmode)
819 emit_libcall_block (insns, op0, tga_ret, op1);
822 case TLS_MODEL_LOCAL_DYNAMIC:
823 /* ??? This isn't the completely proper way to do local-dynamic
824 If the call to __tls_get_addr is used only by a single symbol,
825 then we should (somehow) move the dtprel to the second arg
826 to avoid the extra add. */
829 tga_op1 = gen_reg_rtx (Pmode);
830 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
831 tga_op1 = gen_const_mem (Pmode, tga_op1);
833 tga_op2 = const0_rtx;
835 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
836 LCT_CONST, Pmode, 2, tga_op1,
837 Pmode, tga_op2, Pmode);
839 insns = get_insns ();
842 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
844 tmp = gen_reg_rtx (Pmode);
845 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
847 if (!register_operand (op0, Pmode))
848 op0 = gen_reg_rtx (Pmode);
851 emit_insn (gen_load_dtprel (op0, op1));
852 emit_insn (gen_adddi3 (op0, tmp, op0));
855 emit_insn (gen_add_dtprel (op0, tmp, op1));
858 case TLS_MODEL_INITIAL_EXEC:
859 tmp = gen_reg_rtx (Pmode);
860 emit_insn (gen_load_ltoff_tprel (tmp, op1));
861 tmp = gen_const_mem (Pmode, tmp);
862 tmp = force_reg (Pmode, tmp);
864 if (!register_operand (op0, Pmode))
865 op0 = gen_reg_rtx (Pmode);
866 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
869 case TLS_MODEL_LOCAL_EXEC:
870 if (!register_operand (op0, Pmode))
871 op0 = gen_reg_rtx (Pmode);
874 emit_insn (gen_load_tprel (op0, op1));
875 emit_insn (gen_adddi3 (op0, gen_thread_pointer (), op0));
878 emit_insn (gen_add_tprel (op0, gen_thread_pointer (), op1));
887 if (GET_MODE (orig_op0) == Pmode)
889 return gen_lowpart (GET_MODE (orig_op0), op0);
893 ia64_expand_move (rtx op0, rtx op1)
895 enum machine_mode mode = GET_MODE (op0);
897 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
898 op1 = force_reg (mode, op1);
900 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
902 enum tls_model tls_kind;
903 if (GET_CODE (op1) == SYMBOL_REF
904 && (tls_kind = SYMBOL_REF_TLS_MODEL (op1)))
905 return ia64_expand_tls_address (tls_kind, op0, op1);
907 if (!TARGET_NO_PIC && reload_completed)
909 ia64_expand_load_address (op0, op1);
917 /* Split a move from OP1 to OP0 conditional on COND. */
920 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
922 rtx insn, first = get_last_insn ();
924 emit_move_insn (op0, op1);
926 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
928 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
932 /* Split a post-reload TImode or TFmode reference into two DImode
933 components. This is made extra difficult by the fact that we do
934 not get any scratch registers to work with, because reload cannot
935 be prevented from giving us a scratch that overlaps the register
936 pair involved. So instead, when addressing memory, we tweak the
937 pointer register up and back down with POST_INCs. Or up and not
938 back down when we can get away with it.
940 REVERSED is true when the loads must be done in reversed order
941 (high word first) for correctness. DEAD is true when the pointer
942 dies with the second insn we generate and therefore the second
943 address must not carry a postmodify.
945 May return an insn which is to be emitted after the moves. */
948 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
952 switch (GET_CODE (in))
955 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
956 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
961 /* Cannot occur reversed. */
962 if (reversed) abort ();
964 if (GET_MODE (in) != TFmode)
965 split_double (in, &out[0], &out[1]);
967 /* split_double does not understand how to split a TFmode
968 quantity into a pair of DImode constants. */
971 unsigned HOST_WIDE_INT p[2];
972 long l[4]; /* TFmode is 128 bits */
974 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
975 real_to_target (l, &r, TFmode);
977 if (FLOAT_WORDS_BIG_ENDIAN)
979 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
980 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
984 p[0] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
985 p[1] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
987 out[0] = GEN_INT (p[0]);
988 out[1] = GEN_INT (p[1]);
994 rtx base = XEXP (in, 0);
997 switch (GET_CODE (base))
1002 out[0] = adjust_automodify_address
1003 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1004 out[1] = adjust_automodify_address
1005 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1009 /* Reversal requires a pre-increment, which can only
1010 be done as a separate insn. */
1011 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1012 out[0] = adjust_automodify_address
1013 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1014 out[1] = adjust_address (in, DImode, 0);
1019 if (reversed || dead) abort ();
1020 /* Just do the increment in two steps. */
1021 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1022 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1026 if (reversed || dead) abort ();
1027 /* Add 8, subtract 24. */
1028 base = XEXP (base, 0);
1029 out[0] = adjust_automodify_address
1030 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1031 out[1] = adjust_automodify_address
1033 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1038 if (reversed || dead) abort ();
1039 /* Extract and adjust the modification. This case is
1040 trickier than the others, because we might have an
1041 index register, or we might have a combined offset that
1042 doesn't fit a signed 9-bit displacement field. We can
1043 assume the incoming expression is already legitimate. */
1044 offset = XEXP (base, 1);
1045 base = XEXP (base, 0);
1047 out[0] = adjust_automodify_address
1048 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1050 if (GET_CODE (XEXP (offset, 1)) == REG)
1052 /* Can't adjust the postmodify to match. Emit the
1053 original, then a separate addition insn. */
1054 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1055 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1057 else if (GET_CODE (XEXP (offset, 1)) != CONST_INT)
1059 else if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1061 /* Again the postmodify cannot be made to match, but
1062 in this case it's more efficient to get rid of the
1063 postmodify entirely and fix up with an add insn. */
1064 out[1] = adjust_automodify_address (in, DImode, base, 8);
1065 fixup = gen_adddi3 (base, base,
1066 GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1070 /* Combined offset still fits in the displacement field.
1071 (We cannot overflow it at the high end.) */
1072 out[1] = adjust_automodify_address
1074 gen_rtx_POST_MODIFY (Pmode, base,
1075 gen_rtx_PLUS (Pmode, base,
1076 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1094 /* Split a TImode or TFmode move instruction after reload.
1095 This is used by *movtf_internal and *movti_internal. */
1097 ia64_split_tmode_move (rtx operands[])
1099 rtx in[2], out[2], insn;
1102 bool reversed = false;
1104 /* It is possible for reload to decide to overwrite a pointer with
1105 the value it points to. In that case we have to do the loads in
1106 the appropriate order so that the pointer is not destroyed too
1107 early. Also we must not generate a postmodify for that second
1108 load, or rws_access_regno will abort. */
1109 if (GET_CODE (operands[1]) == MEM
1110 && reg_overlap_mentioned_p (operands[0], operands[1]))
1112 rtx base = XEXP (operands[1], 0);
1113 while (GET_CODE (base) != REG)
1114 base = XEXP (base, 0);
1116 if (REGNO (base) == REGNO (operands[0]))
1120 /* Another reason to do the moves in reversed order is if the first
1121 element of the target register pair is also the second element of
1122 the source register pair. */
1123 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1124 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1127 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1128 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1130 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1131 if (GET_CODE (EXP) == MEM \
1132 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1133 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1134 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1135 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1136 XEXP (XEXP (EXP, 0), 0), \
1139 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1140 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1141 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1143 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1144 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1145 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1148 emit_insn (fixup[0]);
1150 emit_insn (fixup[1]);
1152 #undef MAYBE_ADD_REG_INC_NOTE
1155 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1156 through memory plus an extra GR scratch register. Except that you can
1157 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1158 SECONDARY_RELOAD_CLASS, but not both.
1160 We got into problems in the first place by allowing a construct like
1161 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1162 This solution attempts to prevent this situation from occurring. When
1163 we see something like the above, we spill the inner register to memory. */
1166 spill_xfmode_operand (rtx in, int force)
1168 if (GET_CODE (in) == SUBREG
1169 && GET_MODE (SUBREG_REG (in)) == TImode
1170 && GET_CODE (SUBREG_REG (in)) == REG)
1172 rtx memt = assign_stack_temp (TImode, 16, 0);
1173 emit_move_insn (memt, SUBREG_REG (in));
1174 return adjust_address (memt, XFmode, 0);
1176 else if (force && GET_CODE (in) == REG)
1178 rtx memx = assign_stack_temp (XFmode, 16, 0);
1179 emit_move_insn (memx, in);
1186 /* Emit comparison instruction if necessary, returning the expression
1187 that holds the compare result in the proper mode. */
1189 static GTY(()) rtx cmptf_libfunc;
1192 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1194 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1197 /* If we have a BImode input, then we already have a compare result, and
1198 do not need to emit another comparison. */
1199 if (GET_MODE (op0) == BImode)
1201 if ((code == NE || code == EQ) && op1 == const0_rtx)
1206 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1207 magic number as its third argument, that indicates what to do.
1208 The return value is an integer to be compared against zero. */
1209 else if (GET_MODE (op0) == TFmode)
1212 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1218 enum rtx_code ncode;
1220 if (!cmptf_libfunc || GET_MODE (op1) != TFmode)
1224 /* 1 = equal, 0 = not equal. Equality operators do
1225 not raise FP_INVALID when given an SNaN operand. */
1226 case EQ: magic = QCMP_EQ; ncode = NE; break;
1227 case NE: magic = QCMP_EQ; ncode = EQ; break;
1228 /* isunordered() from C99. */
1229 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1230 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1231 /* Relational operators raise FP_INVALID when given
1233 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1234 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1235 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1236 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1237 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1238 Expanders for buneq etc. weuld have to be added to ia64.md
1239 for this to be useful. */
1245 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1246 op0, TFmode, op1, TFmode,
1247 GEN_INT (magic), DImode);
1248 cmp = gen_reg_rtx (BImode);
1249 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1250 gen_rtx_fmt_ee (ncode, BImode,
1253 insns = get_insns ();
1256 emit_libcall_block (insns, cmp, cmp,
1257 gen_rtx_fmt_ee (code, BImode, op0, op1));
1262 cmp = gen_reg_rtx (BImode);
1263 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1264 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1268 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1271 /* Generate an integral vector comparison. */
1274 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1275 rtx dest, rtx op0, rtx op1)
1277 bool negate = false;
1312 rtx w0h, w0l, w1h, w1l, ch, cl;
1313 enum machine_mode wmode;
1314 rtx (*unpack_l) (rtx, rtx, rtx);
1315 rtx (*unpack_h) (rtx, rtx, rtx);
1316 rtx (*pack) (rtx, rtx, rtx);
1318 /* We don't have native unsigned comparisons, but we can generate
1319 them better than generic code can. */
1321 if (mode == V2SImode)
1323 else if (mode == V8QImode)
1326 pack = gen_pack2_sss;
1327 unpack_l = gen_unpack1_l;
1328 unpack_h = gen_unpack1_h;
1330 else if (mode == V4HImode)
1333 pack = gen_pack4_sss;
1334 unpack_l = gen_unpack2_l;
1335 unpack_h = gen_unpack2_h;
1340 /* Unpack into wider vectors, zero extending the elements. */
1342 w0l = gen_reg_rtx (wmode);
1343 w0h = gen_reg_rtx (wmode);
1344 w1l = gen_reg_rtx (wmode);
1345 w1h = gen_reg_rtx (wmode);
1346 emit_insn (unpack_l (gen_lowpart (mode, w0l), op0, CONST0_RTX (mode)));
1347 emit_insn (unpack_h (gen_lowpart (mode, w0h), op0, CONST0_RTX (mode)));
1348 emit_insn (unpack_l (gen_lowpart (mode, w1l), op1, CONST0_RTX (mode)));
1349 emit_insn (unpack_h (gen_lowpart (mode, w1h), op1, CONST0_RTX (mode)));
1351 /* Compare in the wider mode. */
1353 cl = gen_reg_rtx (wmode);
1354 ch = gen_reg_rtx (wmode);
1355 code = signed_condition (code);
1356 ia64_expand_vecint_compare (code, wmode, cl, w0l, w1l);
1357 negate = ia64_expand_vecint_compare (code, wmode, ch, w0h, w1h);
1359 /* Repack into a single narrower vector. */
1361 emit_insn (pack (dest, cl, ch));
1369 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1370 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1376 ia64_expand_vcondu_v2si (enum rtx_code code, rtx operands[])
1378 rtx dl, dh, bl, bh, op1l, op1h, op2l, op2h, op4l, op4h, op5l, op5h, x;
1380 /* In this case, we extract the two SImode quantities and generate
1381 normal comparisons for each of them. */
1383 op1l = gen_lowpart (SImode, operands[1]);
1384 op2l = gen_lowpart (SImode, operands[2]);
1385 op4l = gen_lowpart (SImode, operands[4]);
1386 op5l = gen_lowpart (SImode, operands[5]);
1388 op1h = gen_reg_rtx (SImode);
1389 op2h = gen_reg_rtx (SImode);
1390 op4h = gen_reg_rtx (SImode);
1391 op5h = gen_reg_rtx (SImode);
1393 emit_insn (gen_lshrdi3 (gen_lowpart (DImode, op1h),
1394 gen_lowpart (DImode, operands[1]), GEN_INT (32)));
1395 emit_insn (gen_lshrdi3 (gen_lowpart (DImode, op2h),
1396 gen_lowpart (DImode, operands[2]), GEN_INT (32)));
1397 emit_insn (gen_lshrdi3 (gen_lowpart (DImode, op4h),
1398 gen_lowpart (DImode, operands[4]), GEN_INT (32)));
1399 emit_insn (gen_lshrdi3 (gen_lowpart (DImode, op5h),
1400 gen_lowpart (DImode, operands[5]), GEN_INT (32)));
1402 bl = gen_reg_rtx (BImode);
1403 x = gen_rtx_fmt_ee (code, BImode, op4l, op5l);
1404 emit_insn (gen_rtx_SET (VOIDmode, bl, x));
1406 bh = gen_reg_rtx (BImode);
1407 x = gen_rtx_fmt_ee (code, BImode, op4h, op5h);
1408 emit_insn (gen_rtx_SET (VOIDmode, bh, x));
1410 /* With the results of the comparisons, emit conditional moves. */
1412 dl = gen_reg_rtx (SImode);
1413 x = gen_rtx_IF_THEN_ELSE (SImode, bl, op1l, op2l);
1414 emit_insn (gen_rtx_SET (VOIDmode, dl, x));
1416 dh = gen_reg_rtx (SImode);
1417 x = gen_rtx_IF_THEN_ELSE (SImode, bh, op1h, op2h);
1418 emit_insn (gen_rtx_SET (VOIDmode, dh, x));
1420 /* Merge the two partial results back into a vector. */
1422 x = gen_rtx_VEC_CONCAT (V2SImode, dl, dh);
1423 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1426 /* Emit an integral vector conditional move. */
1429 ia64_expand_vecint_cmov (rtx operands[])
1431 enum machine_mode mode = GET_MODE (operands[0]);
1432 enum rtx_code code = GET_CODE (operands[3]);
1436 /* Since we don't have unsigned V2SImode comparisons, it's more efficient
1437 to special-case them entirely. */
1438 if (mode == V2SImode
1439 && (code == GTU || code == GEU || code == LEU || code == LTU))
1441 ia64_expand_vcondu_v2si (code, operands);
1445 cmp = gen_reg_rtx (mode);
1446 negate = ia64_expand_vecint_compare (code, mode, cmp,
1447 operands[4], operands[5]);
1449 ot = operands[1+negate];
1450 of = operands[2-negate];
1452 if (ot == CONST0_RTX (mode))
1454 if (of == CONST0_RTX (mode))
1456 emit_move_insn (operands[0], ot);
1460 x = gen_rtx_NOT (mode, cmp);
1461 x = gen_rtx_AND (mode, x, of);
1462 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1464 else if (of == CONST0_RTX (mode))
1466 x = gen_rtx_AND (mode, cmp, ot);
1467 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1473 t = gen_reg_rtx (mode);
1474 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1475 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1477 f = gen_reg_rtx (mode);
1478 x = gen_rtx_NOT (mode, cmp);
1479 x = gen_rtx_AND (mode, x, operands[2-negate]);
1480 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1482 x = gen_rtx_IOR (mode, t, f);
1483 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1487 /* Emit an integral vector min or max operation. Return true if all done. */
1490 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1495 /* These four combinations are supported directly. */
1496 if (mode == V8QImode && (code == UMIN || code == UMAX))
1498 if (mode == V4HImode && (code == SMIN || code == SMAX))
1501 /* Everything else implemented via vector comparisons. */
1502 xops[0] = operands[0];
1503 xops[4] = xops[1] = operands[1];
1504 xops[5] = xops[2] = operands[2];
1523 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
1525 ia64_expand_vecint_cmov (xops);
1529 /* Emit the appropriate sequence for a call. */
1532 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1537 addr = XEXP (addr, 0);
1538 addr = convert_memory_address (DImode, addr);
1539 b0 = gen_rtx_REG (DImode, R_BR (0));
1541 /* ??? Should do this for functions known to bind local too. */
1542 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1545 insn = gen_sibcall_nogp (addr);
1547 insn = gen_call_nogp (addr, b0);
1549 insn = gen_call_value_nogp (retval, addr, b0);
1550 insn = emit_call_insn (insn);
1555 insn = gen_sibcall_gp (addr);
1557 insn = gen_call_gp (addr, b0);
1559 insn = gen_call_value_gp (retval, addr, b0);
1560 insn = emit_call_insn (insn);
1562 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1566 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1570 ia64_reload_gp (void)
1574 if (current_frame_info.reg_save_gp)
1575 tmp = gen_rtx_REG (DImode, current_frame_info.reg_save_gp);
1578 HOST_WIDE_INT offset;
1580 offset = (current_frame_info.spill_cfa_off
1581 + current_frame_info.spill_size);
1582 if (frame_pointer_needed)
1584 tmp = hard_frame_pointer_rtx;
1589 tmp = stack_pointer_rtx;
1590 offset = current_frame_info.total_size - offset;
1593 if (CONST_OK_FOR_I (offset))
1594 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1595 tmp, GEN_INT (offset)));
1598 emit_move_insn (pic_offset_table_rtx, GEN_INT (offset));
1599 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1600 pic_offset_table_rtx, tmp));
1603 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1606 emit_move_insn (pic_offset_table_rtx, tmp);
1610 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1611 rtx scratch_b, int noreturn_p, int sibcall_p)
1614 bool is_desc = false;
1616 /* If we find we're calling through a register, then we're actually
1617 calling through a descriptor, so load up the values. */
1618 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1623 /* ??? We are currently constrained to *not* use peep2, because
1624 we can legitimately change the global lifetime of the GP
1625 (in the form of killing where previously live). This is
1626 because a call through a descriptor doesn't use the previous
1627 value of the GP, while a direct call does, and we do not
1628 commit to either form until the split here.
1630 That said, this means that we lack precise life info for
1631 whether ADDR is dead after this call. This is not terribly
1632 important, since we can fix things up essentially for free
1633 with the POST_DEC below, but it's nice to not use it when we
1634 can immediately tell it's not necessary. */
1635 addr_dead_p = ((noreturn_p || sibcall_p
1636 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1638 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1640 /* Load the code address into scratch_b. */
1641 tmp = gen_rtx_POST_INC (Pmode, addr);
1642 tmp = gen_rtx_MEM (Pmode, tmp);
1643 emit_move_insn (scratch_r, tmp);
1644 emit_move_insn (scratch_b, scratch_r);
1646 /* Load the GP address. If ADDR is not dead here, then we must
1647 revert the change made above via the POST_INCREMENT. */
1649 tmp = gen_rtx_POST_DEC (Pmode, addr);
1652 tmp = gen_rtx_MEM (Pmode, tmp);
1653 emit_move_insn (pic_offset_table_rtx, tmp);
1660 insn = gen_sibcall_nogp (addr);
1662 insn = gen_call_value_nogp (retval, addr, retaddr);
1664 insn = gen_call_nogp (addr, retaddr);
1665 emit_call_insn (insn);
1667 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
1671 /* Begin the assembly file. */
1674 ia64_file_start (void)
1676 default_file_start ();
1677 emit_safe_across_calls ();
1681 emit_safe_across_calls (void)
1683 unsigned int rs, re;
1690 while (rs < 64 && call_used_regs[PR_REG (rs)])
1694 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
1698 fputs ("\t.pred.safe_across_calls ", asm_out_file);
1702 fputc (',', asm_out_file);
1704 fprintf (asm_out_file, "p%u", rs);
1706 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
1710 fputc ('\n', asm_out_file);
1713 /* Helper function for ia64_compute_frame_size: find an appropriate general
1714 register to spill some special register to. SPECIAL_SPILL_MASK contains
1715 bits in GR0 to GR31 that have already been allocated by this routine.
1716 TRY_LOCALS is true if we should attempt to locate a local regnum. */
1719 find_gr_spill (int try_locals)
1723 /* If this is a leaf function, first try an otherwise unused
1724 call-clobbered register. */
1725 if (current_function_is_leaf)
1727 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1728 if (! regs_ever_live[regno]
1729 && call_used_regs[regno]
1730 && ! fixed_regs[regno]
1731 && ! global_regs[regno]
1732 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1734 current_frame_info.gr_used_mask |= 1 << regno;
1741 regno = current_frame_info.n_local_regs;
1742 /* If there is a frame pointer, then we can't use loc79, because
1743 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
1744 reg_name switching code in ia64_expand_prologue. */
1745 if (regno < (80 - frame_pointer_needed))
1747 current_frame_info.n_local_regs = regno + 1;
1748 return LOC_REG (0) + regno;
1752 /* Failed to find a general register to spill to. Must use stack. */
1756 /* In order to make for nice schedules, we try to allocate every temporary
1757 to a different register. We must of course stay away from call-saved,
1758 fixed, and global registers. We must also stay away from registers
1759 allocated in current_frame_info.gr_used_mask, since those include regs
1760 used all through the prologue.
1762 Any register allocated here must be used immediately. The idea is to
1763 aid scheduling, not to solve data flow problems. */
1765 static int last_scratch_gr_reg;
1768 next_scratch_gr_reg (void)
1772 for (i = 0; i < 32; ++i)
1774 regno = (last_scratch_gr_reg + i + 1) & 31;
1775 if (call_used_regs[regno]
1776 && ! fixed_regs[regno]
1777 && ! global_regs[regno]
1778 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1780 last_scratch_gr_reg = regno;
1785 /* There must be _something_ available. */
1789 /* Helper function for ia64_compute_frame_size, called through
1790 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
1793 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
1795 unsigned int regno = REGNO (reg);
1798 unsigned int i, n = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1799 for (i = 0; i < n; ++i)
1800 current_frame_info.gr_used_mask |= 1 << (regno + i);
1804 /* Returns the number of bytes offset between the frame pointer and the stack
1805 pointer for the current function. SIZE is the number of bytes of space
1806 needed for local variables. */
1809 ia64_compute_frame_size (HOST_WIDE_INT size)
1811 HOST_WIDE_INT total_size;
1812 HOST_WIDE_INT spill_size = 0;
1813 HOST_WIDE_INT extra_spill_size = 0;
1814 HOST_WIDE_INT pretend_args_size;
1817 int spilled_gr_p = 0;
1818 int spilled_fr_p = 0;
1822 if (current_frame_info.initialized)
1825 memset (¤t_frame_info, 0, sizeof current_frame_info);
1826 CLEAR_HARD_REG_SET (mask);
1828 /* Don't allocate scratches to the return register. */
1829 diddle_return_value (mark_reg_gr_used_mask, NULL);
1831 /* Don't allocate scratches to the EH scratch registers. */
1832 if (cfun->machine->ia64_eh_epilogue_sp)
1833 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
1834 if (cfun->machine->ia64_eh_epilogue_bsp)
1835 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
1837 /* Find the size of the register stack frame. We have only 80 local
1838 registers, because we reserve 8 for the inputs and 8 for the
1841 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
1842 since we'll be adjusting that down later. */
1843 regno = LOC_REG (78) + ! frame_pointer_needed;
1844 for (; regno >= LOC_REG (0); regno--)
1845 if (regs_ever_live[regno])
1847 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
1849 /* For functions marked with the syscall_linkage attribute, we must mark
1850 all eight input registers as in use, so that locals aren't visible to
1853 if (cfun->machine->n_varargs > 0
1854 || lookup_attribute ("syscall_linkage",
1855 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
1856 current_frame_info.n_input_regs = 8;
1859 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
1860 if (regs_ever_live[regno])
1862 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
1865 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
1866 if (regs_ever_live[regno])
1868 i = regno - OUT_REG (0) + 1;
1870 /* When -p profiling, we need one output register for the mcount argument.
1871 Likewise for -a profiling for the bb_init_func argument. For -ax
1872 profiling, we need two output registers for the two bb_init_trace_func
1874 if (current_function_profile)
1876 current_frame_info.n_output_regs = i;
1878 /* ??? No rotating register support yet. */
1879 current_frame_info.n_rotate_regs = 0;
1881 /* Discover which registers need spilling, and how much room that
1882 will take. Begin with floating point and general registers,
1883 which will always wind up on the stack. */
1885 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
1886 if (regs_ever_live[regno] && ! call_used_regs[regno])
1888 SET_HARD_REG_BIT (mask, regno);
1894 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1895 if (regs_ever_live[regno] && ! call_used_regs[regno])
1897 SET_HARD_REG_BIT (mask, regno);
1903 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
1904 if (regs_ever_live[regno] && ! call_used_regs[regno])
1906 SET_HARD_REG_BIT (mask, regno);
1911 /* Now come all special registers that might get saved in other
1912 general registers. */
1914 if (frame_pointer_needed)
1916 current_frame_info.reg_fp = find_gr_spill (1);
1917 /* If we did not get a register, then we take LOC79. This is guaranteed
1918 to be free, even if regs_ever_live is already set, because this is
1919 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
1920 as we don't count loc79 above. */
1921 if (current_frame_info.reg_fp == 0)
1923 current_frame_info.reg_fp = LOC_REG (79);
1924 current_frame_info.n_local_regs++;
1928 if (! current_function_is_leaf)
1930 /* Emit a save of BR0 if we call other functions. Do this even
1931 if this function doesn't return, as EH depends on this to be
1932 able to unwind the stack. */
1933 SET_HARD_REG_BIT (mask, BR_REG (0));
1935 current_frame_info.reg_save_b0 = find_gr_spill (1);
1936 if (current_frame_info.reg_save_b0 == 0)
1942 /* Similarly for ar.pfs. */
1943 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
1944 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
1945 if (current_frame_info.reg_save_ar_pfs == 0)
1947 extra_spill_size += 8;
1951 /* Similarly for gp. Note that if we're calling setjmp, the stacked
1952 registers are clobbered, so we fall back to the stack. */
1953 current_frame_info.reg_save_gp
1954 = (current_function_calls_setjmp ? 0 : find_gr_spill (1));
1955 if (current_frame_info.reg_save_gp == 0)
1957 SET_HARD_REG_BIT (mask, GR_REG (1));
1964 if (regs_ever_live[BR_REG (0)] && ! call_used_regs[BR_REG (0)])
1966 SET_HARD_REG_BIT (mask, BR_REG (0));
1971 if (regs_ever_live[AR_PFS_REGNUM])
1973 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
1974 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
1975 if (current_frame_info.reg_save_ar_pfs == 0)
1977 extra_spill_size += 8;
1983 /* Unwind descriptor hackery: things are most efficient if we allocate
1984 consecutive GR save registers for RP, PFS, FP in that order. However,
1985 it is absolutely critical that FP get the only hard register that's
1986 guaranteed to be free, so we allocated it first. If all three did
1987 happen to be allocated hard regs, and are consecutive, rearrange them
1988 into the preferred order now. */
1989 if (current_frame_info.reg_fp != 0
1990 && current_frame_info.reg_save_b0 == current_frame_info.reg_fp + 1
1991 && current_frame_info.reg_save_ar_pfs == current_frame_info.reg_fp + 2)
1993 current_frame_info.reg_save_b0 = current_frame_info.reg_fp;
1994 current_frame_info.reg_save_ar_pfs = current_frame_info.reg_fp + 1;
1995 current_frame_info.reg_fp = current_frame_info.reg_fp + 2;
1998 /* See if we need to store the predicate register block. */
1999 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2000 if (regs_ever_live[regno] && ! call_used_regs[regno])
2002 if (regno <= PR_REG (63))
2004 SET_HARD_REG_BIT (mask, PR_REG (0));
2005 current_frame_info.reg_save_pr = find_gr_spill (1);
2006 if (current_frame_info.reg_save_pr == 0)
2008 extra_spill_size += 8;
2012 /* ??? Mark them all as used so that register renaming and such
2013 are free to use them. */
2014 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2015 regs_ever_live[regno] = 1;
2018 /* If we're forced to use st8.spill, we're forced to save and restore
2019 ar.unat as well. The check for existing liveness allows inline asm
2020 to touch ar.unat. */
2021 if (spilled_gr_p || cfun->machine->n_varargs
2022 || regs_ever_live[AR_UNAT_REGNUM])
2024 regs_ever_live[AR_UNAT_REGNUM] = 1;
2025 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2026 current_frame_info.reg_save_ar_unat = find_gr_spill (spill_size == 0);
2027 if (current_frame_info.reg_save_ar_unat == 0)
2029 extra_spill_size += 8;
2034 if (regs_ever_live[AR_LC_REGNUM])
2036 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2037 current_frame_info.reg_save_ar_lc = find_gr_spill (spill_size == 0);
2038 if (current_frame_info.reg_save_ar_lc == 0)
2040 extra_spill_size += 8;
2045 /* If we have an odd number of words of pretend arguments written to
2046 the stack, then the FR save area will be unaligned. We round the
2047 size of this area up to keep things 16 byte aligned. */
2049 pretend_args_size = IA64_STACK_ALIGN (current_function_pretend_args_size);
2051 pretend_args_size = current_function_pretend_args_size;
2053 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2054 + current_function_outgoing_args_size);
2055 total_size = IA64_STACK_ALIGN (total_size);
2057 /* We always use the 16-byte scratch area provided by the caller, but
2058 if we are a leaf function, there's no one to which we need to provide
2060 if (current_function_is_leaf)
2061 total_size = MAX (0, total_size - 16);
2063 current_frame_info.total_size = total_size;
2064 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2065 current_frame_info.spill_size = spill_size;
2066 current_frame_info.extra_spill_size = extra_spill_size;
2067 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2068 current_frame_info.n_spilled = n_spilled;
2069 current_frame_info.initialized = reload_completed;
2072 /* Compute the initial difference between the specified pair of registers. */
2075 ia64_initial_elimination_offset (int from, int to)
2077 HOST_WIDE_INT offset;
2079 ia64_compute_frame_size (get_frame_size ());
2082 case FRAME_POINTER_REGNUM:
2083 if (to == HARD_FRAME_POINTER_REGNUM)
2085 if (current_function_is_leaf)
2086 offset = -current_frame_info.total_size;
2088 offset = -(current_frame_info.total_size
2089 - current_function_outgoing_args_size - 16);
2091 else if (to == STACK_POINTER_REGNUM)
2093 if (current_function_is_leaf)
2096 offset = 16 + current_function_outgoing_args_size;
2102 case ARG_POINTER_REGNUM:
2103 /* Arguments start above the 16 byte save area, unless stdarg
2104 in which case we store through the 16 byte save area. */
2105 if (to == HARD_FRAME_POINTER_REGNUM)
2106 offset = 16 - current_function_pretend_args_size;
2107 else if (to == STACK_POINTER_REGNUM)
2108 offset = (current_frame_info.total_size
2109 + 16 - current_function_pretend_args_size);
2121 /* If there are more than a trivial number of register spills, we use
2122 two interleaved iterators so that we can get two memory references
2125 In order to simplify things in the prologue and epilogue expanders,
2126 we use helper functions to fix up the memory references after the
2127 fact with the appropriate offsets to a POST_MODIFY memory mode.
2128 The following data structure tracks the state of the two iterators
2129 while insns are being emitted. */
2131 struct spill_fill_data
2133 rtx init_after; /* point at which to emit initializations */
2134 rtx init_reg[2]; /* initial base register */
2135 rtx iter_reg[2]; /* the iterator registers */
2136 rtx *prev_addr[2]; /* address of last memory use */
2137 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2138 HOST_WIDE_INT prev_off[2]; /* last offset */
2139 int n_iter; /* number of iterators in use */
2140 int next_iter; /* next iterator to use */
2141 unsigned int save_gr_used_mask;
2144 static struct spill_fill_data spill_fill_data;
2147 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2151 spill_fill_data.init_after = get_last_insn ();
2152 spill_fill_data.init_reg[0] = init_reg;
2153 spill_fill_data.init_reg[1] = init_reg;
2154 spill_fill_data.prev_addr[0] = NULL;
2155 spill_fill_data.prev_addr[1] = NULL;
2156 spill_fill_data.prev_insn[0] = NULL;
2157 spill_fill_data.prev_insn[1] = NULL;
2158 spill_fill_data.prev_off[0] = cfa_off;
2159 spill_fill_data.prev_off[1] = cfa_off;
2160 spill_fill_data.next_iter = 0;
2161 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2163 spill_fill_data.n_iter = 1 + (n_spills > 2);
2164 for (i = 0; i < spill_fill_data.n_iter; ++i)
2166 int regno = next_scratch_gr_reg ();
2167 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2168 current_frame_info.gr_used_mask |= 1 << regno;
2173 finish_spill_pointers (void)
2175 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
2179 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
2181 int iter = spill_fill_data.next_iter;
2182 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
2183 rtx disp_rtx = GEN_INT (disp);
2186 if (spill_fill_data.prev_addr[iter])
2188 if (CONST_OK_FOR_N (disp))
2190 *spill_fill_data.prev_addr[iter]
2191 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2192 gen_rtx_PLUS (DImode,
2193 spill_fill_data.iter_reg[iter],
2195 REG_NOTES (spill_fill_data.prev_insn[iter])
2196 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
2197 REG_NOTES (spill_fill_data.prev_insn[iter]));
2201 /* ??? Could use register post_modify for loads. */
2202 if (! CONST_OK_FOR_I (disp))
2204 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2205 emit_move_insn (tmp, disp_rtx);
2208 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2209 spill_fill_data.iter_reg[iter], disp_rtx));
2212 /* Micro-optimization: if we've created a frame pointer, it's at
2213 CFA 0, which may allow the real iterator to be initialized lower,
2214 slightly increasing parallelism. Also, if there are few saves
2215 it may eliminate the iterator entirely. */
2217 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
2218 && frame_pointer_needed)
2220 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
2221 set_mem_alias_set (mem, get_varargs_alias_set ());
2229 seq = gen_movdi (spill_fill_data.iter_reg[iter],
2230 spill_fill_data.init_reg[iter]);
2235 if (! CONST_OK_FOR_I (disp))
2237 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2238 emit_move_insn (tmp, disp_rtx);
2242 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2243 spill_fill_data.init_reg[iter],
2250 /* Careful for being the first insn in a sequence. */
2251 if (spill_fill_data.init_after)
2252 insn = emit_insn_after (seq, spill_fill_data.init_after);
2255 rtx first = get_insns ();
2257 insn = emit_insn_before (seq, first);
2259 insn = emit_insn (seq);
2261 spill_fill_data.init_after = insn;
2263 /* If DISP is 0, we may or may not have a further adjustment
2264 afterward. If we do, then the load/store insn may be modified
2265 to be a post-modify. If we don't, then this copy may be
2266 eliminated by copyprop_hardreg_forward, which makes this
2267 insn garbage, which runs afoul of the sanity check in
2268 propagate_one_insn. So mark this insn as legal to delete. */
2270 REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx,
2274 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
2276 /* ??? Not all of the spills are for varargs, but some of them are.
2277 The rest of the spills belong in an alias set of their own. But
2278 it doesn't actually hurt to include them here. */
2279 set_mem_alias_set (mem, get_varargs_alias_set ());
2281 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
2282 spill_fill_data.prev_off[iter] = cfa_off;
2284 if (++iter >= spill_fill_data.n_iter)
2286 spill_fill_data.next_iter = iter;
2292 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
2295 int iter = spill_fill_data.next_iter;
2298 mem = spill_restore_mem (reg, cfa_off);
2299 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
2300 spill_fill_data.prev_insn[iter] = insn;
2307 RTX_FRAME_RELATED_P (insn) = 1;
2309 /* Don't even pretend that the unwind code can intuit its way
2310 through a pair of interleaved post_modify iterators. Just
2311 provide the correct answer. */
2313 if (frame_pointer_needed)
2315 base = hard_frame_pointer_rtx;
2320 base = stack_pointer_rtx;
2321 off = current_frame_info.total_size - cfa_off;
2325 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2326 gen_rtx_SET (VOIDmode,
2327 gen_rtx_MEM (GET_MODE (reg),
2328 plus_constant (base, off)),
2335 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
2337 int iter = spill_fill_data.next_iter;
2340 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
2341 GEN_INT (cfa_off)));
2342 spill_fill_data.prev_insn[iter] = insn;
2345 /* Wrapper functions that discards the CONST_INT spill offset. These
2346 exist so that we can give gr_spill/gr_fill the offset they need and
2347 use a consistent function interface. */
2350 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2352 return gen_movdi (dest, src);
2356 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2358 return gen_fr_spill (dest, src);
2362 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2364 return gen_fr_restore (dest, src);
2367 /* Called after register allocation to add any instructions needed for the
2368 prologue. Using a prologue insn is favored compared to putting all of the
2369 instructions in output_function_prologue(), since it allows the scheduler
2370 to intermix instructions with the saves of the caller saved registers. In
2371 some cases, it might be necessary to emit a barrier instruction as the last
2372 insn to prevent such scheduling.
2374 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2375 so that the debug info generation code can handle them properly.
2377 The register save area is layed out like so:
2379 [ varargs spill area ]
2380 [ fr register spill area ]
2381 [ br register spill area ]
2382 [ ar register spill area ]
2383 [ pr register spill area ]
2384 [ gr register spill area ] */
2386 /* ??? Get inefficient code when the frame size is larger than can fit in an
2387 adds instruction. */
2390 ia64_expand_prologue (void)
2392 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2393 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2396 ia64_compute_frame_size (get_frame_size ());
2397 last_scratch_gr_reg = 15;
2399 /* If there is no epilogue, then we don't need some prologue insns.
2400 We need to avoid emitting the dead prologue insns, because flow
2401 will complain about them. */
2407 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
2408 if ((e->flags & EDGE_FAKE) == 0
2409 && (e->flags & EDGE_FALLTHRU) != 0)
2411 epilogue_p = (e != NULL);
2416 /* Set the local, input, and output register names. We need to do this
2417 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2418 half. If we use in/loc/out register names, then we get assembler errors
2419 in crtn.S because there is no alloc insn or regstk directive in there. */
2420 if (! TARGET_REG_NAMES)
2422 int inputs = current_frame_info.n_input_regs;
2423 int locals = current_frame_info.n_local_regs;
2424 int outputs = current_frame_info.n_output_regs;
2426 for (i = 0; i < inputs; i++)
2427 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2428 for (i = 0; i < locals; i++)
2429 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2430 for (i = 0; i < outputs; i++)
2431 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2434 /* Set the frame pointer register name. The regnum is logically loc79,
2435 but of course we'll not have allocated that many locals. Rather than
2436 worrying about renumbering the existing rtxs, we adjust the name. */
2437 /* ??? This code means that we can never use one local register when
2438 there is a frame pointer. loc79 gets wasted in this case, as it is
2439 renamed to a register that will never be used. See also the try_locals
2440 code in find_gr_spill. */
2441 if (current_frame_info.reg_fp)
2443 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2444 reg_names[HARD_FRAME_POINTER_REGNUM]
2445 = reg_names[current_frame_info.reg_fp];
2446 reg_names[current_frame_info.reg_fp] = tmp;
2449 /* We don't need an alloc instruction if we've used no outputs or locals. */
2450 if (current_frame_info.n_local_regs == 0
2451 && current_frame_info.n_output_regs == 0
2452 && current_frame_info.n_input_regs <= current_function_args_info.int_regs
2453 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2455 /* If there is no alloc, but there are input registers used, then we
2456 need a .regstk directive. */
2457 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
2458 ar_pfs_save_reg = NULL_RTX;
2462 current_frame_info.need_regstk = 0;
2464 if (current_frame_info.reg_save_ar_pfs)
2465 regno = current_frame_info.reg_save_ar_pfs;
2467 regno = next_scratch_gr_reg ();
2468 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
2470 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
2471 GEN_INT (current_frame_info.n_input_regs),
2472 GEN_INT (current_frame_info.n_local_regs),
2473 GEN_INT (current_frame_info.n_output_regs),
2474 GEN_INT (current_frame_info.n_rotate_regs)));
2475 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_pfs != 0);
2478 /* Set up frame pointer, stack pointer, and spill iterators. */
2480 n_varargs = cfun->machine->n_varargs;
2481 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
2482 stack_pointer_rtx, 0);
2484 if (frame_pointer_needed)
2486 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
2487 RTX_FRAME_RELATED_P (insn) = 1;
2490 if (current_frame_info.total_size != 0)
2492 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
2495 if (CONST_OK_FOR_I (- current_frame_info.total_size))
2496 offset = frame_size_rtx;
2499 regno = next_scratch_gr_reg ();
2500 offset = gen_rtx_REG (DImode, regno);
2501 emit_move_insn (offset, frame_size_rtx);
2504 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
2505 stack_pointer_rtx, offset));
2507 if (! frame_pointer_needed)
2509 RTX_FRAME_RELATED_P (insn) = 1;
2510 if (GET_CODE (offset) != CONST_INT)
2513 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2514 gen_rtx_SET (VOIDmode,
2516 gen_rtx_PLUS (DImode,
2523 /* ??? At this point we must generate a magic insn that appears to
2524 modify the stack pointer, the frame pointer, and all spill
2525 iterators. This would allow the most scheduling freedom. For
2526 now, just hard stop. */
2527 emit_insn (gen_blockage ());
2530 /* Must copy out ar.unat before doing any integer spills. */
2531 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2533 if (current_frame_info.reg_save_ar_unat)
2535 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2538 alt_regno = next_scratch_gr_reg ();
2539 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2540 current_frame_info.gr_used_mask |= 1 << alt_regno;
2543 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2544 insn = emit_move_insn (ar_unat_save_reg, reg);
2545 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_unat != 0);
2547 /* Even if we're not going to generate an epilogue, we still
2548 need to save the register so that EH works. */
2549 if (! epilogue_p && current_frame_info.reg_save_ar_unat)
2550 emit_insn (gen_prologue_use (ar_unat_save_reg));
2553 ar_unat_save_reg = NULL_RTX;
2555 /* Spill all varargs registers. Do this before spilling any GR registers,
2556 since we want the UNAT bits for the GR registers to override the UNAT
2557 bits from varargs, which we don't care about. */
2560 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
2562 reg = gen_rtx_REG (DImode, regno);
2563 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
2566 /* Locate the bottom of the register save area. */
2567 cfa_off = (current_frame_info.spill_cfa_off
2568 + current_frame_info.spill_size
2569 + current_frame_info.extra_spill_size);
2571 /* Save the predicate register block either in a register or in memory. */
2572 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2574 reg = gen_rtx_REG (DImode, PR_REG (0));
2575 if (current_frame_info.reg_save_pr != 0)
2577 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2578 insn = emit_move_insn (alt_reg, reg);
2580 /* ??? Denote pr spill/fill by a DImode move that modifies all
2581 64 hard registers. */
2582 RTX_FRAME_RELATED_P (insn) = 1;
2584 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2585 gen_rtx_SET (VOIDmode, alt_reg, reg),
2588 /* Even if we're not going to generate an epilogue, we still
2589 need to save the register so that EH works. */
2591 emit_insn (gen_prologue_use (alt_reg));
2595 alt_regno = next_scratch_gr_reg ();
2596 alt_reg = gen_rtx_REG (DImode, alt_regno);
2597 insn = emit_move_insn (alt_reg, reg);
2598 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2603 /* Handle AR regs in numerical order. All of them get special handling. */
2604 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
2605 && current_frame_info.reg_save_ar_unat == 0)
2607 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2608 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
2612 /* The alloc insn already copied ar.pfs into a general register. The
2613 only thing we have to do now is copy that register to a stack slot
2614 if we'd not allocated a local register for the job. */
2615 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
2616 && current_frame_info.reg_save_ar_pfs == 0)
2618 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2619 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
2623 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2625 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2626 if (current_frame_info.reg_save_ar_lc != 0)
2628 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2629 insn = emit_move_insn (alt_reg, reg);
2630 RTX_FRAME_RELATED_P (insn) = 1;
2632 /* Even if we're not going to generate an epilogue, we still
2633 need to save the register so that EH works. */
2635 emit_insn (gen_prologue_use (alt_reg));
2639 alt_regno = next_scratch_gr_reg ();
2640 alt_reg = gen_rtx_REG (DImode, alt_regno);
2641 emit_move_insn (alt_reg, reg);
2642 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2647 if (current_frame_info.reg_save_gp)
2649 insn = emit_move_insn (gen_rtx_REG (DImode,
2650 current_frame_info.reg_save_gp),
2651 pic_offset_table_rtx);
2652 /* We don't know for sure yet if this is actually needed, since
2653 we've not split the PIC call patterns. If all of the calls
2654 are indirect, and not followed by any uses of the gp, then
2655 this save is dead. Allow it to go away. */
2657 = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, REG_NOTES (insn));
2660 /* We should now be at the base of the gr/br/fr spill area. */
2661 if (cfa_off != (current_frame_info.spill_cfa_off
2662 + current_frame_info.spill_size))
2665 /* Spill all general registers. */
2666 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
2667 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2669 reg = gen_rtx_REG (DImode, regno);
2670 do_spill (gen_gr_spill, reg, cfa_off, reg);
2674 /* Handle BR0 specially -- it may be getting stored permanently in
2675 some GR register. */
2676 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2678 reg = gen_rtx_REG (DImode, BR_REG (0));
2679 if (current_frame_info.reg_save_b0 != 0)
2681 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2682 insn = emit_move_insn (alt_reg, reg);
2683 RTX_FRAME_RELATED_P (insn) = 1;
2685 /* Even if we're not going to generate an epilogue, we still
2686 need to save the register so that EH works. */
2688 emit_insn (gen_prologue_use (alt_reg));
2692 alt_regno = next_scratch_gr_reg ();
2693 alt_reg = gen_rtx_REG (DImode, alt_regno);
2694 emit_move_insn (alt_reg, reg);
2695 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2700 /* Spill the rest of the BR registers. */
2701 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2702 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2704 alt_regno = next_scratch_gr_reg ();
2705 alt_reg = gen_rtx_REG (DImode, alt_regno);
2706 reg = gen_rtx_REG (DImode, regno);
2707 emit_move_insn (alt_reg, reg);
2708 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2712 /* Align the frame and spill all FR registers. */
2713 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2714 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2718 reg = gen_rtx_REG (XFmode, regno);
2719 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
2723 if (cfa_off != current_frame_info.spill_cfa_off)
2726 finish_spill_pointers ();
2729 /* Called after register allocation to add any instructions needed for the
2730 epilogue. Using an epilogue insn is favored compared to putting all of the
2731 instructions in output_function_prologue(), since it allows the scheduler
2732 to intermix instructions with the saves of the caller saved registers. In
2733 some cases, it might be necessary to emit a barrier instruction as the last
2734 insn to prevent such scheduling. */
2737 ia64_expand_epilogue (int sibcall_p)
2739 rtx insn, reg, alt_reg, ar_unat_save_reg;
2740 int regno, alt_regno, cfa_off;
2742 ia64_compute_frame_size (get_frame_size ());
2744 /* If there is a frame pointer, then we use it instead of the stack
2745 pointer, so that the stack pointer does not need to be valid when
2746 the epilogue starts. See EXIT_IGNORE_STACK. */
2747 if (frame_pointer_needed)
2748 setup_spill_pointers (current_frame_info.n_spilled,
2749 hard_frame_pointer_rtx, 0);
2751 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
2752 current_frame_info.total_size);
2754 if (current_frame_info.total_size != 0)
2756 /* ??? At this point we must generate a magic insn that appears to
2757 modify the spill iterators and the frame pointer. This would
2758 allow the most scheduling freedom. For now, just hard stop. */
2759 emit_insn (gen_blockage ());
2762 /* Locate the bottom of the register save area. */
2763 cfa_off = (current_frame_info.spill_cfa_off
2764 + current_frame_info.spill_size
2765 + current_frame_info.extra_spill_size);
2767 /* Restore the predicate registers. */
2768 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2770 if (current_frame_info.reg_save_pr != 0)
2771 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2774 alt_regno = next_scratch_gr_reg ();
2775 alt_reg = gen_rtx_REG (DImode, alt_regno);
2776 do_restore (gen_movdi_x, alt_reg, cfa_off);
2779 reg = gen_rtx_REG (DImode, PR_REG (0));
2780 emit_move_insn (reg, alt_reg);
2783 /* Restore the application registers. */
2785 /* Load the saved unat from the stack, but do not restore it until
2786 after the GRs have been restored. */
2787 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2789 if (current_frame_info.reg_save_ar_unat != 0)
2791 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2794 alt_regno = next_scratch_gr_reg ();
2795 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2796 current_frame_info.gr_used_mask |= 1 << alt_regno;
2797 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
2802 ar_unat_save_reg = NULL_RTX;
2804 if (current_frame_info.reg_save_ar_pfs != 0)
2806 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_pfs);
2807 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2808 emit_move_insn (reg, alt_reg);
2810 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2812 alt_regno = next_scratch_gr_reg ();
2813 alt_reg = gen_rtx_REG (DImode, alt_regno);
2814 do_restore (gen_movdi_x, alt_reg, cfa_off);
2816 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2817 emit_move_insn (reg, alt_reg);
2820 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2822 if (current_frame_info.reg_save_ar_lc != 0)
2823 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2826 alt_regno = next_scratch_gr_reg ();
2827 alt_reg = gen_rtx_REG (DImode, alt_regno);
2828 do_restore (gen_movdi_x, alt_reg, cfa_off);
2831 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2832 emit_move_insn (reg, alt_reg);
2835 /* We should now be at the base of the gr/br/fr spill area. */
2836 if (cfa_off != (current_frame_info.spill_cfa_off
2837 + current_frame_info.spill_size))
2840 /* The GP may be stored on the stack in the prologue, but it's
2841 never restored in the epilogue. Skip the stack slot. */
2842 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
2845 /* Restore all general registers. */
2846 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
2847 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2849 reg = gen_rtx_REG (DImode, regno);
2850 do_restore (gen_gr_restore, reg, cfa_off);
2854 /* Restore the branch registers. Handle B0 specially, as it may
2855 have gotten stored in some GR register. */
2856 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2858 if (current_frame_info.reg_save_b0 != 0)
2859 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2862 alt_regno = next_scratch_gr_reg ();
2863 alt_reg = gen_rtx_REG (DImode, alt_regno);
2864 do_restore (gen_movdi_x, alt_reg, cfa_off);
2867 reg = gen_rtx_REG (DImode, BR_REG (0));
2868 emit_move_insn (reg, alt_reg);
2871 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2872 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2874 alt_regno = next_scratch_gr_reg ();
2875 alt_reg = gen_rtx_REG (DImode, alt_regno);
2876 do_restore (gen_movdi_x, alt_reg, cfa_off);
2878 reg = gen_rtx_REG (DImode, regno);
2879 emit_move_insn (reg, alt_reg);
2882 /* Restore floating point registers. */
2883 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2884 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2888 reg = gen_rtx_REG (XFmode, regno);
2889 do_restore (gen_fr_restore_x, reg, cfa_off);
2893 /* Restore ar.unat for real. */
2894 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2896 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2897 emit_move_insn (reg, ar_unat_save_reg);
2900 if (cfa_off != current_frame_info.spill_cfa_off)
2903 finish_spill_pointers ();
2905 if (current_frame_info.total_size || cfun->machine->ia64_eh_epilogue_sp)
2907 /* ??? At this point we must generate a magic insn that appears to
2908 modify the spill iterators, the stack pointer, and the frame
2909 pointer. This would allow the most scheduling freedom. For now,
2911 emit_insn (gen_blockage ());
2914 if (cfun->machine->ia64_eh_epilogue_sp)
2915 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
2916 else if (frame_pointer_needed)
2918 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
2919 RTX_FRAME_RELATED_P (insn) = 1;
2921 else if (current_frame_info.total_size)
2923 rtx offset, frame_size_rtx;
2925 frame_size_rtx = GEN_INT (current_frame_info.total_size);
2926 if (CONST_OK_FOR_I (current_frame_info.total_size))
2927 offset = frame_size_rtx;
2930 regno = next_scratch_gr_reg ();
2931 offset = gen_rtx_REG (DImode, regno);
2932 emit_move_insn (offset, frame_size_rtx);
2935 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
2938 RTX_FRAME_RELATED_P (insn) = 1;
2939 if (GET_CODE (offset) != CONST_INT)
2942 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2943 gen_rtx_SET (VOIDmode,
2945 gen_rtx_PLUS (DImode,
2952 if (cfun->machine->ia64_eh_epilogue_bsp)
2953 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
2956 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
2959 int fp = GR_REG (2);
2960 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
2961 first available call clobbered register. If there was a frame_pointer
2962 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
2963 so we have to make sure we're using the string "r2" when emitting
2964 the register name for the assembler. */
2965 if (current_frame_info.reg_fp && current_frame_info.reg_fp == GR_REG (2))
2966 fp = HARD_FRAME_POINTER_REGNUM;
2968 /* We must emit an alloc to force the input registers to become output
2969 registers. Otherwise, if the callee tries to pass its parameters
2970 through to another call without an intervening alloc, then these
2972 /* ??? We don't need to preserve all input registers. We only need to
2973 preserve those input registers used as arguments to the sibling call.
2974 It is unclear how to compute that number here. */
2975 if (current_frame_info.n_input_regs != 0)
2977 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
2978 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
2979 const0_rtx, const0_rtx,
2980 n_inputs, const0_rtx));
2981 RTX_FRAME_RELATED_P (insn) = 1;
2986 /* Return 1 if br.ret can do all the work required to return from a
2990 ia64_direct_return (void)
2992 if (reload_completed && ! frame_pointer_needed)
2994 ia64_compute_frame_size (get_frame_size ());
2996 return (current_frame_info.total_size == 0
2997 && current_frame_info.n_spilled == 0
2998 && current_frame_info.reg_save_b0 == 0
2999 && current_frame_info.reg_save_pr == 0
3000 && current_frame_info.reg_save_ar_pfs == 0
3001 && current_frame_info.reg_save_ar_unat == 0
3002 && current_frame_info.reg_save_ar_lc == 0);
3007 /* Return the magic cookie that we use to hold the return address
3008 during early compilation. */
3011 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3015 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3018 /* Split this value after reload, now that we know where the return
3019 address is saved. */
3022 ia64_split_return_addr_rtx (rtx dest)
3026 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3028 if (current_frame_info.reg_save_b0 != 0)
3029 src = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
3035 /* Compute offset from CFA for BR0. */
3036 /* ??? Must be kept in sync with ia64_expand_prologue. */
3037 off = (current_frame_info.spill_cfa_off
3038 + current_frame_info.spill_size);
3039 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3040 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3043 /* Convert CFA offset to a register based offset. */
3044 if (frame_pointer_needed)
3045 src = hard_frame_pointer_rtx;
3048 src = stack_pointer_rtx;
3049 off += current_frame_info.total_size;
3052 /* Load address into scratch register. */
3053 if (CONST_OK_FOR_I (off))
3054 emit_insn (gen_adddi3 (dest, src, GEN_INT (off)));
3057 emit_move_insn (dest, GEN_INT (off));
3058 emit_insn (gen_adddi3 (dest, src, dest));
3061 src = gen_rtx_MEM (Pmode, dest);
3065 src = gen_rtx_REG (DImode, BR_REG (0));
3067 emit_move_insn (dest, src);
3071 ia64_hard_regno_rename_ok (int from, int to)
3073 /* Don't clobber any of the registers we reserved for the prologue. */
3074 if (to == current_frame_info.reg_fp
3075 || to == current_frame_info.reg_save_b0
3076 || to == current_frame_info.reg_save_pr
3077 || to == current_frame_info.reg_save_ar_pfs
3078 || to == current_frame_info.reg_save_ar_unat
3079 || to == current_frame_info.reg_save_ar_lc)
3082 if (from == current_frame_info.reg_fp
3083 || from == current_frame_info.reg_save_b0
3084 || from == current_frame_info.reg_save_pr
3085 || from == current_frame_info.reg_save_ar_pfs
3086 || from == current_frame_info.reg_save_ar_unat
3087 || from == current_frame_info.reg_save_ar_lc)
3090 /* Don't use output registers outside the register frame. */
3091 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3094 /* Retain even/oddness on predicate register pairs. */
3095 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3096 return (from & 1) == (to & 1);
3101 /* Target hook for assembling integer objects. Handle word-sized
3102 aligned objects and detect the cases when @fptr is needed. */
3105 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3107 if (size == POINTER_SIZE / BITS_PER_UNIT
3108 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3109 && GET_CODE (x) == SYMBOL_REF
3110 && SYMBOL_REF_FUNCTION_P (x))
3112 static const char * const directive[2][2] = {
3113 /* 64-bit pointer */ /* 32-bit pointer */
3114 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
3115 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
3117 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
3118 output_addr_const (asm_out_file, x);
3119 fputs (")\n", asm_out_file);
3122 return default_assemble_integer (x, size, aligned_p);
3125 /* Emit the function prologue. */
3128 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3130 int mask, grsave, grsave_prev;
3132 if (current_frame_info.need_regstk)
3133 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
3134 current_frame_info.n_input_regs,
3135 current_frame_info.n_local_regs,
3136 current_frame_info.n_output_regs,
3137 current_frame_info.n_rotate_regs);
3139 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3142 /* Emit the .prologue directive. */
3145 grsave = grsave_prev = 0;
3146 if (current_frame_info.reg_save_b0 != 0)
3149 grsave = grsave_prev = current_frame_info.reg_save_b0;
3151 if (current_frame_info.reg_save_ar_pfs != 0
3152 && (grsave_prev == 0
3153 || current_frame_info.reg_save_ar_pfs == grsave_prev + 1))
3156 if (grsave_prev == 0)
3157 grsave = current_frame_info.reg_save_ar_pfs;
3158 grsave_prev = current_frame_info.reg_save_ar_pfs;
3160 if (current_frame_info.reg_fp != 0
3161 && (grsave_prev == 0
3162 || current_frame_info.reg_fp == grsave_prev + 1))
3165 if (grsave_prev == 0)
3166 grsave = HARD_FRAME_POINTER_REGNUM;
3167 grsave_prev = current_frame_info.reg_fp;
3169 if (current_frame_info.reg_save_pr != 0
3170 && (grsave_prev == 0
3171 || current_frame_info.reg_save_pr == grsave_prev + 1))
3174 if (grsave_prev == 0)
3175 grsave = current_frame_info.reg_save_pr;
3178 if (mask && TARGET_GNU_AS)
3179 fprintf (file, "\t.prologue %d, %d\n", mask,
3180 ia64_dbx_register_number (grsave));
3182 fputs ("\t.prologue\n", file);
3184 /* Emit a .spill directive, if necessary, to relocate the base of
3185 the register spill area. */
3186 if (current_frame_info.spill_cfa_off != -16)
3187 fprintf (file, "\t.spill %ld\n",
3188 (long) (current_frame_info.spill_cfa_off
3189 + current_frame_info.spill_size));
3192 /* Emit the .body directive at the scheduled end of the prologue. */
3195 ia64_output_function_end_prologue (FILE *file)
3197 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3200 fputs ("\t.body\n", file);
3203 /* Emit the function epilogue. */
3206 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
3207 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3211 if (current_frame_info.reg_fp)
3213 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3214 reg_names[HARD_FRAME_POINTER_REGNUM]
3215 = reg_names[current_frame_info.reg_fp];
3216 reg_names[current_frame_info.reg_fp] = tmp;
3218 if (! TARGET_REG_NAMES)
3220 for (i = 0; i < current_frame_info.n_input_regs; i++)
3221 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
3222 for (i = 0; i < current_frame_info.n_local_regs; i++)
3223 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
3224 for (i = 0; i < current_frame_info.n_output_regs; i++)
3225 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
3228 current_frame_info.initialized = 0;
3232 ia64_dbx_register_number (int regno)
3234 /* In ia64_expand_prologue we quite literally renamed the frame pointer
3235 from its home at loc79 to something inside the register frame. We
3236 must perform the same renumbering here for the debug info. */
3237 if (current_frame_info.reg_fp)
3239 if (regno == HARD_FRAME_POINTER_REGNUM)
3240 regno = current_frame_info.reg_fp;
3241 else if (regno == current_frame_info.reg_fp)
3242 regno = HARD_FRAME_POINTER_REGNUM;
3245 if (IN_REGNO_P (regno))
3246 return 32 + regno - IN_REG (0);
3247 else if (LOC_REGNO_P (regno))
3248 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
3249 else if (OUT_REGNO_P (regno))
3250 return (32 + current_frame_info.n_input_regs
3251 + current_frame_info.n_local_regs + regno - OUT_REG (0));
3257 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
3259 rtx addr_reg, eight = GEN_INT (8);
3261 /* The Intel assembler requires that the global __ia64_trampoline symbol
3262 be declared explicitly */
3265 static bool declared_ia64_trampoline = false;
3267 if (!declared_ia64_trampoline)
3269 declared_ia64_trampoline = true;
3270 (*targetm.asm_out.globalize_label) (asm_out_file,
3271 "__ia64_trampoline");
3275 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
3276 addr = convert_memory_address (Pmode, addr);
3277 fnaddr = convert_memory_address (Pmode, fnaddr);
3278 static_chain = convert_memory_address (Pmode, static_chain);
3280 /* Load up our iterator. */
3281 addr_reg = gen_reg_rtx (Pmode);
3282 emit_move_insn (addr_reg, addr);
3284 /* The first two words are the fake descriptor:
3285 __ia64_trampoline, ADDR+16. */
3286 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3287 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
3288 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3290 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3291 copy_to_reg (plus_constant (addr, 16)));
3292 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3294 /* The third word is the target descriptor. */
3295 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
3296 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3298 /* The fourth word is the static chain. */
3299 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
3302 /* Do any needed setup for a variadic function. CUM has not been updated
3303 for the last named argument which has type TYPE and mode MODE.
3305 We generate the actual spill instructions during prologue generation. */
3308 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3309 tree type, int * pretend_size,
3310 int second_time ATTRIBUTE_UNUSED)
3312 CUMULATIVE_ARGS next_cum = *cum;
3314 /* Skip the current argument. */
3315 ia64_function_arg_advance (&next_cum, mode, type, 1);
3317 if (next_cum.words < MAX_ARGUMENT_SLOTS)
3319 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
3320 *pretend_size = n * UNITS_PER_WORD;
3321 cfun->machine->n_varargs = n;
3325 /* Check whether TYPE is a homogeneous floating point aggregate. If
3326 it is, return the mode of the floating point type that appears
3327 in all leafs. If it is not, return VOIDmode.
3329 An aggregate is a homogeneous floating point aggregate is if all
3330 fields/elements in it have the same floating point type (e.g,
3331 SFmode). 128-bit quad-precision floats are excluded.
3333 Variable sized aggregates should never arrive here, since we should
3334 have already decided to pass them by reference. Top-level zero-sized
3335 aggregates are excluded because our parallels crash the middle-end. */
3337 static enum machine_mode
3338 hfa_element_mode (tree type, bool nested)
3340 enum machine_mode element_mode = VOIDmode;
3341 enum machine_mode mode;
3342 enum tree_code code = TREE_CODE (type);
3343 int know_element_mode = 0;
3346 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
3351 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
3352 case BOOLEAN_TYPE: case CHAR_TYPE: case POINTER_TYPE:
3353 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
3354 case FILE_TYPE: case LANG_TYPE: case FUNCTION_TYPE:
3357 /* Fortran complex types are supposed to be HFAs, so we need to handle
3358 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
3361 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
3362 && TYPE_MODE (type) != TCmode)
3363 return GET_MODE_INNER (TYPE_MODE (type));
3368 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
3369 mode if this is contained within an aggregate. */
3370 if (nested && TYPE_MODE (type) != TFmode)
3371 return TYPE_MODE (type);
3376 return hfa_element_mode (TREE_TYPE (type), 1);
3380 case QUAL_UNION_TYPE:
3381 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3383 if (TREE_CODE (t) != FIELD_DECL)
3386 mode = hfa_element_mode (TREE_TYPE (t), 1);
3387 if (know_element_mode)
3389 if (mode != element_mode)
3392 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3396 know_element_mode = 1;
3397 element_mode = mode;
3400 return element_mode;
3403 /* If we reach here, we probably have some front-end specific type
3404 that the backend doesn't know about. This can happen via the
3405 aggregate_value_p call in init_function_start. All we can do is
3406 ignore unknown tree types. */
3413 /* Return the number of words required to hold a quantity of TYPE and MODE
3414 when passed as an argument. */
3416 ia64_function_arg_words (tree type, enum machine_mode mode)
3420 if (mode == BLKmode)
3421 words = int_size_in_bytes (type);
3423 words = GET_MODE_SIZE (mode);
3425 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
3428 /* Return the number of registers that should be skipped so the current
3429 argument (described by TYPE and WORDS) will be properly aligned.
3431 Integer and float arguments larger than 8 bytes start at the next
3432 even boundary. Aggregates larger than 8 bytes start at the next
3433 even boundary if the aggregate has 16 byte alignment. Note that
3434 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
3435 but are still to be aligned in registers.
3437 ??? The ABI does not specify how to handle aggregates with
3438 alignment from 9 to 15 bytes, or greater than 16. We handle them
3439 all as if they had 16 byte alignment. Such aggregates can occur
3440 only if gcc extensions are used. */
3442 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
3444 if ((cum->words & 1) == 0)
3448 && TREE_CODE (type) != INTEGER_TYPE
3449 && TREE_CODE (type) != REAL_TYPE)
3450 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
3455 /* Return rtx for register where argument is passed, or zero if it is passed
3457 /* ??? 128-bit quad-precision floats are always passed in general
3461 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
3462 int named, int incoming)
3464 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
3465 int words = ia64_function_arg_words (type, mode);
3466 int offset = ia64_function_arg_offset (cum, type, words);
3467 enum machine_mode hfa_mode = VOIDmode;
3469 /* If all argument slots are used, then it must go on the stack. */