1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING. If not, write to
21 the Free Software Foundation, 59 Temple Place - Suite 330,
22 Boston, MA 02111-1307, USA. */
26 #include "coretypes.h"
31 #include "hard-reg-set.h"
33 #include "insn-config.h"
34 #include "conditions.h"
36 #include "insn-attr.h"
44 #include "basic-block.h"
46 #include "sched-int.h"
49 #include "target-def.h"
52 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "tree-gimple.h"
56 /* This is used for communication between ASM_OUTPUT_LABEL and
57 ASM_OUTPUT_LABELREF. */
58 int ia64_asm_output_label = 0;
60 /* Define the information needed to generate branch and scc insns. This is
61 stored from the compare operation. */
62 struct rtx_def * ia64_compare_op0;
63 struct rtx_def * ia64_compare_op1;
65 /* Register names for ia64_expand_prologue. */
66 static const char * const ia64_reg_numbers[96] =
67 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
68 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
69 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
70 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
71 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
72 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
73 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
74 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
75 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
76 "r104","r105","r106","r107","r108","r109","r110","r111",
77 "r112","r113","r114","r115","r116","r117","r118","r119",
78 "r120","r121","r122","r123","r124","r125","r126","r127"};
80 /* ??? These strings could be shared with REGISTER_NAMES. */
81 static const char * const ia64_input_reg_names[8] =
82 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
84 /* ??? These strings could be shared with REGISTER_NAMES. */
85 static const char * const ia64_local_reg_names[80] =
86 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
87 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
88 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
89 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
90 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
91 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
92 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
93 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
94 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
95 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
97 /* ??? These strings could be shared with REGISTER_NAMES. */
98 static const char * const ia64_output_reg_names[8] =
99 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
101 /* String used with the -mfixed-range= option. */
102 const char *ia64_fixed_range_string;
104 /* Determines whether we use adds, addl, or movl to generate our
105 TLS immediate offsets. */
106 int ia64_tls_size = 22;
108 /* String used with the -mtls-size= option. */
109 const char *ia64_tls_size_string;
111 /* Which cpu are we scheduling for. */
112 enum processor_type ia64_tune;
114 /* String used with the -tune= option. */
115 const char *ia64_tune_string;
117 /* Determines whether we run our final scheduling pass or not. We always
118 avoid the normal second scheduling pass. */
119 static int ia64_flag_schedule_insns2;
121 /* Determines whether we run variable tracking in machine dependent
123 static int ia64_flag_var_tracking;
125 /* Variables which are this size or smaller are put in the sdata/sbss
128 unsigned int ia64_section_threshold;
130 /* The following variable is used by the DFA insn scheduler. The value is
131 TRUE if we do insn bundling instead of insn scheduling. */
134 /* Structure to be filled in by ia64_compute_frame_size with register
135 save masks and offsets for the current function. */
137 struct ia64_frame_info
139 HOST_WIDE_INT total_size; /* size of the stack frame, not including
140 the caller's scratch area. */
141 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
142 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
143 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
144 HARD_REG_SET mask; /* mask of saved registers. */
145 unsigned int gr_used_mask; /* mask of registers in use as gr spill
146 registers or long-term scratches. */
147 int n_spilled; /* number of spilled registers. */
148 int reg_fp; /* register for fp. */
149 int reg_save_b0; /* save register for b0. */
150 int reg_save_pr; /* save register for prs. */
151 int reg_save_ar_pfs; /* save register for ar.pfs. */
152 int reg_save_ar_unat; /* save register for ar.unat. */
153 int reg_save_ar_lc; /* save register for ar.lc. */
154 int reg_save_gp; /* save register for gp. */
155 int n_input_regs; /* number of input registers used. */
156 int n_local_regs; /* number of local registers used. */
157 int n_output_regs; /* number of output registers used. */
158 int n_rotate_regs; /* number of rotating registers used. */
160 char need_regstk; /* true if a .regstk directive needed. */
161 char initialized; /* true if the data is finalized. */
164 /* Current frame information calculated by ia64_compute_frame_size. */
165 static struct ia64_frame_info current_frame_info;
167 static int ia64_first_cycle_multipass_dfa_lookahead (void);
168 static void ia64_dependencies_evaluation_hook (rtx, rtx);
169 static void ia64_init_dfa_pre_cycle_insn (void);
170 static rtx ia64_dfa_pre_cycle_insn (void);
171 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
172 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
173 static rtx gen_tls_get_addr (void);
174 static rtx gen_thread_pointer (void);
175 static rtx ia64_expand_tls_address (enum tls_model, rtx, rtx);
176 static int find_gr_spill (int);
177 static int next_scratch_gr_reg (void);
178 static void mark_reg_gr_used_mask (rtx, void *);
179 static void ia64_compute_frame_size (HOST_WIDE_INT);
180 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
181 static void finish_spill_pointers (void);
182 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
183 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
184 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
185 static rtx gen_movdi_x (rtx, rtx, rtx);
186 static rtx gen_fr_spill_x (rtx, rtx, rtx);
187 static rtx gen_fr_restore_x (rtx, rtx, rtx);
189 static enum machine_mode hfa_element_mode (tree, int);
190 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
192 static bool ia64_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
194 static bool ia64_function_ok_for_sibcall (tree, tree);
195 static bool ia64_return_in_memory (tree, tree);
196 static bool ia64_rtx_costs (rtx, int, int, int *);
197 static void fix_range (const char *);
198 static struct machine_function * ia64_init_machine_status (void);
199 static void emit_insn_group_barriers (FILE *);
200 static void emit_all_insn_group_barriers (FILE *);
201 static void final_emit_insn_group_barriers (FILE *);
202 static void emit_predicate_relation_info (void);
203 static void ia64_reorg (void);
204 static bool ia64_in_small_data_p (tree);
205 static void process_epilogue (void);
206 static int process_set (FILE *, rtx);
208 static rtx ia64_expand_fetch_and_op (optab, enum machine_mode, tree, rtx);
209 static rtx ia64_expand_op_and_fetch (optab, enum machine_mode, tree, rtx);
210 static rtx ia64_expand_compare_and_swap (enum machine_mode, enum machine_mode,
212 static rtx ia64_expand_lock_test_and_set (enum machine_mode, tree, rtx);
213 static rtx ia64_expand_lock_release (enum machine_mode, tree, rtx);
214 static bool ia64_assemble_integer (rtx, unsigned int, int);
215 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
216 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
217 static void ia64_output_function_end_prologue (FILE *);
219 static int ia64_issue_rate (void);
220 static int ia64_adjust_cost (rtx, rtx, rtx, int);
221 static void ia64_sched_init (FILE *, int, int);
222 static void ia64_sched_finish (FILE *, int);
223 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
224 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
225 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
226 static int ia64_variable_issue (FILE *, int, rtx, int);
228 static struct bundle_state *get_free_bundle_state (void);
229 static void free_bundle_state (struct bundle_state *);
230 static void initiate_bundle_states (void);
231 static void finish_bundle_states (void);
232 static unsigned bundle_state_hash (const void *);
233 static int bundle_state_eq_p (const void *, const void *);
234 static int insert_bundle_state (struct bundle_state *);
235 static void initiate_bundle_state_table (void);
236 static void finish_bundle_state_table (void);
237 static int try_issue_nops (struct bundle_state *, int);
238 static int try_issue_insn (struct bundle_state *, rtx);
239 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
240 static int get_max_pos (state_t);
241 static int get_template (state_t, int);
243 static rtx get_next_important_insn (rtx, rtx);
244 static void bundling (FILE *, int, rtx, rtx);
246 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
247 HOST_WIDE_INT, tree);
248 static void ia64_file_start (void);
250 static void ia64_select_rtx_section (enum machine_mode, rtx,
251 unsigned HOST_WIDE_INT);
252 static void ia64_rwreloc_select_section (tree, int, unsigned HOST_WIDE_INT)
254 static void ia64_rwreloc_unique_section (tree, int)
256 static void ia64_rwreloc_select_rtx_section (enum machine_mode, rtx,
257 unsigned HOST_WIDE_INT)
259 static unsigned int ia64_rwreloc_section_type_flags (tree, const char *, int)
262 static void ia64_hpux_add_extern_decl (tree decl)
264 static void ia64_hpux_file_end (void)
266 static void ia64_init_libfuncs (void)
268 static void ia64_hpux_init_libfuncs (void)
270 static void ia64_sysv4_init_libfuncs (void)
272 static void ia64_vms_init_libfuncs (void)
275 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
276 static void ia64_encode_section_info (tree, rtx, int);
277 static rtx ia64_struct_value_rtx (tree, int);
278 static tree ia64_gimplify_va_arg (tree, tree, tree *, tree *);
279 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
282 /* Table of valid machine attributes. */
283 static const struct attribute_spec ia64_attribute_table[] =
285 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
286 { "syscall_linkage", 0, 0, false, true, true, NULL },
287 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
288 { NULL, 0, 0, false, false, false, NULL }
291 /* Initialize the GCC target structure. */
292 #undef TARGET_ATTRIBUTE_TABLE
293 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
295 #undef TARGET_INIT_BUILTINS
296 #define TARGET_INIT_BUILTINS ia64_init_builtins
298 #undef TARGET_EXPAND_BUILTIN
299 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
301 #undef TARGET_ASM_BYTE_OP
302 #define TARGET_ASM_BYTE_OP "\tdata1\t"
303 #undef TARGET_ASM_ALIGNED_HI_OP
304 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
305 #undef TARGET_ASM_ALIGNED_SI_OP
306 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
307 #undef TARGET_ASM_ALIGNED_DI_OP
308 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
309 #undef TARGET_ASM_UNALIGNED_HI_OP
310 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
311 #undef TARGET_ASM_UNALIGNED_SI_OP
312 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
313 #undef TARGET_ASM_UNALIGNED_DI_OP
314 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
315 #undef TARGET_ASM_INTEGER
316 #define TARGET_ASM_INTEGER ia64_assemble_integer
318 #undef TARGET_ASM_FUNCTION_PROLOGUE
319 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
320 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
321 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
322 #undef TARGET_ASM_FUNCTION_EPILOGUE
323 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
325 #undef TARGET_IN_SMALL_DATA_P
326 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
328 #undef TARGET_SCHED_ADJUST_COST
329 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
330 #undef TARGET_SCHED_ISSUE_RATE
331 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
332 #undef TARGET_SCHED_VARIABLE_ISSUE
333 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
334 #undef TARGET_SCHED_INIT
335 #define TARGET_SCHED_INIT ia64_sched_init
336 #undef TARGET_SCHED_FINISH
337 #define TARGET_SCHED_FINISH ia64_sched_finish
338 #undef TARGET_SCHED_REORDER
339 #define TARGET_SCHED_REORDER ia64_sched_reorder
340 #undef TARGET_SCHED_REORDER2
341 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
343 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
344 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
346 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
347 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
349 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
350 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
351 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
352 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
354 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
355 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
356 ia64_first_cycle_multipass_dfa_lookahead_guard
358 #undef TARGET_SCHED_DFA_NEW_CYCLE
359 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
361 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
362 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
363 #undef TARGET_PASS_BY_REFERENCE
364 #define TARGET_PASS_BY_REFERENCE ia64_pass_by_reference
366 #undef TARGET_ASM_OUTPUT_MI_THUNK
367 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
368 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
369 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
371 #undef TARGET_ASM_FILE_START
372 #define TARGET_ASM_FILE_START ia64_file_start
374 #undef TARGET_RTX_COSTS
375 #define TARGET_RTX_COSTS ia64_rtx_costs
376 #undef TARGET_ADDRESS_COST
377 #define TARGET_ADDRESS_COST hook_int_rtx_0
379 #undef TARGET_MACHINE_DEPENDENT_REORG
380 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
382 #undef TARGET_ENCODE_SECTION_INFO
383 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
385 /* ??? ABI doesn't allow us to define this. */
387 #undef TARGET_PROMOTE_FUNCTION_ARGS
388 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
391 /* ??? ABI doesn't allow us to define this. */
393 #undef TARGET_PROMOTE_FUNCTION_RETURN
394 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
397 /* ??? Investigate. */
399 #undef TARGET_PROMOTE_PROTOTYPES
400 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
403 #undef TARGET_STRUCT_VALUE_RTX
404 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
405 #undef TARGET_RETURN_IN_MEMORY
406 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
407 #undef TARGET_SETUP_INCOMING_VARARGS
408 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
409 #undef TARGET_STRICT_ARGUMENT_NAMING
410 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
411 #undef TARGET_MUST_PASS_IN_STACK
412 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
414 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
415 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
417 #undef TARGET_UNWIND_EMIT
418 #define TARGET_UNWIND_EMIT process_for_unwind_directive
420 #undef TARGET_SCALAR_MODE_SUPPORTED_P
421 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
423 struct gcc_target targetm = TARGET_INITIALIZER;
427 ADDR_AREA_NORMAL, /* normal address area */
428 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
432 static GTY(()) tree small_ident1;
433 static GTY(()) tree small_ident2;
438 if (small_ident1 == 0)
440 small_ident1 = get_identifier ("small");
441 small_ident2 = get_identifier ("__small__");
445 /* Retrieve the address area that has been chosen for the given decl. */
447 static ia64_addr_area
448 ia64_get_addr_area (tree decl)
452 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
458 id = TREE_VALUE (TREE_VALUE (model_attr));
459 if (id == small_ident1 || id == small_ident2)
460 return ADDR_AREA_SMALL;
462 return ADDR_AREA_NORMAL;
466 ia64_handle_model_attribute (tree *node, tree name, tree args, int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
468 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
470 tree arg, decl = *node;
473 arg = TREE_VALUE (args);
474 if (arg == small_ident1 || arg == small_ident2)
476 addr_area = ADDR_AREA_SMALL;
480 warning ("invalid argument of `%s' attribute",
481 IDENTIFIER_POINTER (name));
482 *no_add_attrs = true;
485 switch (TREE_CODE (decl))
488 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
490 && !TREE_STATIC (decl))
492 error ("%Jan address area attribute cannot be specified for "
493 "local variables", decl, decl);
494 *no_add_attrs = true;
496 area = ia64_get_addr_area (decl);
497 if (area != ADDR_AREA_NORMAL && addr_area != area)
499 error ("%Jaddress area of '%s' conflicts with previous "
500 "declaration", decl, decl);
501 *no_add_attrs = true;
506 error ("%Jaddress area attribute cannot be specified for functions",
508 *no_add_attrs = true;
512 warning ("`%s' attribute ignored", IDENTIFIER_POINTER (name));
513 *no_add_attrs = true;
521 ia64_encode_addr_area (tree decl, rtx symbol)
525 flags = SYMBOL_REF_FLAGS (symbol);
526 switch (ia64_get_addr_area (decl))
528 case ADDR_AREA_NORMAL: break;
529 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
532 SYMBOL_REF_FLAGS (symbol) = flags;
536 ia64_encode_section_info (tree decl, rtx rtl, int first)
538 default_encode_section_info (decl, rtl, first);
540 /* Careful not to prod global register variables. */
541 if (TREE_CODE (decl) == VAR_DECL
542 && GET_CODE (DECL_RTL (decl)) == MEM
543 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
544 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
545 ia64_encode_addr_area (decl, XEXP (rtl, 0));
548 /* Return 1 if the operands of a move are ok. */
551 ia64_move_ok (rtx dst, rtx src)
553 /* If we're under init_recog_no_volatile, we'll not be able to use
554 memory_operand. So check the code directly and don't worry about
555 the validity of the underlying address, which should have been
556 checked elsewhere anyway. */
557 if (GET_CODE (dst) != MEM)
559 if (GET_CODE (src) == MEM)
561 if (register_operand (src, VOIDmode))
564 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
565 if (INTEGRAL_MODE_P (GET_MODE (dst)))
566 return src == const0_rtx;
568 return GET_CODE (src) == CONST_DOUBLE && CONST_DOUBLE_OK_FOR_G (src);
572 addp4_optimize_ok (rtx op1, rtx op2)
574 return (basereg_operand (op1, GET_MODE(op1)) !=
575 basereg_operand (op2, GET_MODE(op2)));
578 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
579 Return the length of the field, or <= 0 on failure. */
582 ia64_depz_field_mask (rtx rop, rtx rshift)
584 unsigned HOST_WIDE_INT op = INTVAL (rop);
585 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
587 /* Get rid of the zero bits we're shifting in. */
590 /* We must now have a solid block of 1's at bit 0. */
591 return exact_log2 (op + 1);
594 /* Expand a symbolic constant load. */
597 ia64_expand_load_address (rtx dest, rtx src)
599 if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (src))
601 if (GET_CODE (dest) != REG)
604 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
605 having to pointer-extend the value afterward. Other forms of address
606 computation below are also more natural to compute as 64-bit quantities.
607 If we've been given an SImode destination register, change it. */
608 if (GET_MODE (dest) != Pmode)
609 dest = gen_rtx_REG (Pmode, REGNO (dest));
611 if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_SMALL_ADDR_P (src))
613 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
616 else if (TARGET_AUTO_PIC)
618 emit_insn (gen_load_gprel64 (dest, src));
621 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
623 emit_insn (gen_load_fptr (dest, src));
626 else if (sdata_symbolic_operand (src, VOIDmode))
628 emit_insn (gen_load_gprel (dest, src));
632 if (GET_CODE (src) == CONST
633 && GET_CODE (XEXP (src, 0)) == PLUS
634 && GET_CODE (XEXP (XEXP (src, 0), 1)) == CONST_INT
635 && (INTVAL (XEXP (XEXP (src, 0), 1)) & 0x3fff) != 0)
637 rtx sym = XEXP (XEXP (src, 0), 0);
638 HOST_WIDE_INT ofs, hi, lo;
640 /* Split the offset into a sign extended 14-bit low part
641 and a complementary high part. */
642 ofs = INTVAL (XEXP (XEXP (src, 0), 1));
643 lo = ((ofs & 0x3fff) ^ 0x2000) - 0x2000;
646 ia64_expand_load_address (dest, plus_constant (sym, hi));
647 emit_insn (gen_adddi3 (dest, dest, GEN_INT (lo)));
653 tmp = gen_rtx_HIGH (Pmode, src);
654 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
655 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
657 tmp = gen_rtx_LO_SUM (GET_MODE (dest), dest, src);
658 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
662 static GTY(()) rtx gen_tls_tga;
664 gen_tls_get_addr (void)
667 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
671 static GTY(()) rtx thread_pointer_rtx;
673 gen_thread_pointer (void)
675 if (!thread_pointer_rtx)
676 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
677 return thread_pointer_rtx;
681 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1)
683 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
688 case TLS_MODEL_GLOBAL_DYNAMIC:
691 tga_op1 = gen_reg_rtx (Pmode);
692 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
693 tga_op1 = gen_const_mem (Pmode, tga_op1);
695 tga_op2 = gen_reg_rtx (Pmode);
696 emit_insn (gen_load_ltoff_dtprel (tga_op2, op1));
697 tga_op2 = gen_const_mem (Pmode, tga_op2);
699 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
700 LCT_CONST, Pmode, 2, tga_op1,
701 Pmode, tga_op2, Pmode);
703 insns = get_insns ();
706 if (GET_MODE (op0) != Pmode)
708 emit_libcall_block (insns, op0, tga_ret, op1);
711 case TLS_MODEL_LOCAL_DYNAMIC:
712 /* ??? This isn't the completely proper way to do local-dynamic
713 If the call to __tls_get_addr is used only by a single symbol,
714 then we should (somehow) move the dtprel to the second arg
715 to avoid the extra add. */
718 tga_op1 = gen_reg_rtx (Pmode);
719 emit_insn (gen_load_ltoff_dtpmod (tga_op1, op1));
720 tga_op1 = gen_const_mem (Pmode, tga_op1);
722 tga_op2 = const0_rtx;
724 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
725 LCT_CONST, Pmode, 2, tga_op1,
726 Pmode, tga_op2, Pmode);
728 insns = get_insns ();
731 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
733 tmp = gen_reg_rtx (Pmode);
734 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
736 if (!register_operand (op0, Pmode))
737 op0 = gen_reg_rtx (Pmode);
740 emit_insn (gen_load_dtprel (op0, op1));
741 emit_insn (gen_adddi3 (op0, tmp, op0));
744 emit_insn (gen_add_dtprel (op0, tmp, op1));
747 case TLS_MODEL_INITIAL_EXEC:
748 tmp = gen_reg_rtx (Pmode);
749 emit_insn (gen_load_ltoff_tprel (tmp, op1));
750 tmp = gen_const_mem (Pmode, tmp);
751 tmp = force_reg (Pmode, tmp);
753 if (!register_operand (op0, Pmode))
754 op0 = gen_reg_rtx (Pmode);
755 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
758 case TLS_MODEL_LOCAL_EXEC:
759 if (!register_operand (op0, Pmode))
760 op0 = gen_reg_rtx (Pmode);
763 emit_insn (gen_load_tprel (op0, op1));
764 emit_insn (gen_adddi3 (op0, gen_thread_pointer (), op0));
767 emit_insn (gen_add_tprel (op0, gen_thread_pointer (), op1));
776 if (GET_MODE (orig_op0) == Pmode)
778 return gen_lowpart (GET_MODE (orig_op0), op0);
782 ia64_expand_move (rtx op0, rtx op1)
784 enum machine_mode mode = GET_MODE (op0);
786 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
787 op1 = force_reg (mode, op1);
789 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
791 enum tls_model tls_kind;
792 if (GET_CODE (op1) == SYMBOL_REF
793 && (tls_kind = SYMBOL_REF_TLS_MODEL (op1)))
794 return ia64_expand_tls_address (tls_kind, op0, op1);
796 if (!TARGET_NO_PIC && reload_completed)
798 ia64_expand_load_address (op0, op1);
806 /* Split a move from OP1 to OP0 conditional on COND. */
809 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
811 rtx insn, first = get_last_insn ();
813 emit_move_insn (op0, op1);
815 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
817 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
821 /* Split a post-reload TImode or TFmode reference into two DImode
822 components. This is made extra difficult by the fact that we do
823 not get any scratch registers to work with, because reload cannot
824 be prevented from giving us a scratch that overlaps the register
825 pair involved. So instead, when addressing memory, we tweak the
826 pointer register up and back down with POST_INCs. Or up and not
827 back down when we can get away with it.
829 REVERSED is true when the loads must be done in reversed order
830 (high word first) for correctness. DEAD is true when the pointer
831 dies with the second insn we generate and therefore the second
832 address must not carry a postmodify.
834 May return an insn which is to be emitted after the moves. */
837 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
841 switch (GET_CODE (in))
844 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
845 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
850 /* Cannot occur reversed. */
851 if (reversed) abort ();
853 if (GET_MODE (in) != TFmode)
854 split_double (in, &out[0], &out[1]);
856 /* split_double does not understand how to split a TFmode
857 quantity into a pair of DImode constants. */
860 unsigned HOST_WIDE_INT p[2];
861 long l[4]; /* TFmode is 128 bits */
863 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
864 real_to_target (l, &r, TFmode);
866 if (FLOAT_WORDS_BIG_ENDIAN)
868 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
869 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
873 p[0] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
874 p[1] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
876 out[0] = GEN_INT (p[0]);
877 out[1] = GEN_INT (p[1]);
883 rtx base = XEXP (in, 0);
886 switch (GET_CODE (base))
891 out[0] = adjust_automodify_address
892 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
893 out[1] = adjust_automodify_address
894 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
898 /* Reversal requires a pre-increment, which can only
899 be done as a separate insn. */
900 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
901 out[0] = adjust_automodify_address
902 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
903 out[1] = adjust_address (in, DImode, 0);
908 if (reversed || dead) abort ();
909 /* Just do the increment in two steps. */
910 out[0] = adjust_automodify_address (in, DImode, 0, 0);
911 out[1] = adjust_automodify_address (in, DImode, 0, 8);
915 if (reversed || dead) abort ();
916 /* Add 8, subtract 24. */
917 base = XEXP (base, 0);
918 out[0] = adjust_automodify_address
919 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
920 out[1] = adjust_automodify_address
922 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
927 if (reversed || dead) abort ();
928 /* Extract and adjust the modification. This case is
929 trickier than the others, because we might have an
930 index register, or we might have a combined offset that
931 doesn't fit a signed 9-bit displacement field. We can
932 assume the incoming expression is already legitimate. */
933 offset = XEXP (base, 1);
934 base = XEXP (base, 0);
936 out[0] = adjust_automodify_address
937 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
939 if (GET_CODE (XEXP (offset, 1)) == REG)
941 /* Can't adjust the postmodify to match. Emit the
942 original, then a separate addition insn. */
943 out[1] = adjust_automodify_address (in, DImode, 0, 8);
944 fixup = gen_adddi3 (base, base, GEN_INT (-8));
946 else if (GET_CODE (XEXP (offset, 1)) != CONST_INT)
948 else if (INTVAL (XEXP (offset, 1)) < -256 + 8)
950 /* Again the postmodify cannot be made to match, but
951 in this case it's more efficient to get rid of the
952 postmodify entirely and fix up with an add insn. */
953 out[1] = adjust_automodify_address (in, DImode, base, 8);
954 fixup = gen_adddi3 (base, base,
955 GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
959 /* Combined offset still fits in the displacement field.
960 (We cannot overflow it at the high end.) */
961 out[1] = adjust_automodify_address
963 gen_rtx_POST_MODIFY (Pmode, base,
964 gen_rtx_PLUS (Pmode, base,
965 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
983 /* Split a TImode or TFmode move instruction after reload.
984 This is used by *movtf_internal and *movti_internal. */
986 ia64_split_tmode_move (rtx operands[])
988 rtx in[2], out[2], insn;
991 bool reversed = false;
993 /* It is possible for reload to decide to overwrite a pointer with
994 the value it points to. In that case we have to do the loads in
995 the appropriate order so that the pointer is not destroyed too
996 early. Also we must not generate a postmodify for that second
997 load, or rws_access_regno will abort. */
998 if (GET_CODE (operands[1]) == MEM
999 && reg_overlap_mentioned_p (operands[0], operands[1]))
1001 rtx base = XEXP (operands[1], 0);
1002 while (GET_CODE (base) != REG)
1003 base = XEXP (base, 0);
1005 if (REGNO (base) == REGNO (operands[0]))
1009 /* Another reason to do the moves in reversed order is if the first
1010 element of the target register pair is also the second element of
1011 the source register pair. */
1012 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1013 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1016 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1017 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1019 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1020 if (GET_CODE (EXP) == MEM \
1021 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1022 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1023 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1024 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1025 XEXP (XEXP (EXP, 0), 0), \
1028 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1029 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1030 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1032 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1033 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1034 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1037 emit_insn (fixup[0]);
1039 emit_insn (fixup[1]);
1041 #undef MAYBE_ADD_REG_INC_NOTE
1044 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1045 through memory plus an extra GR scratch register. Except that you can
1046 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1047 SECONDARY_RELOAD_CLASS, but not both.
1049 We got into problems in the first place by allowing a construct like
1050 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1051 This solution attempts to prevent this situation from occurring. When
1052 we see something like the above, we spill the inner register to memory. */
1055 spill_xfmode_operand (rtx in, int force)
1057 if (GET_CODE (in) == SUBREG
1058 && GET_MODE (SUBREG_REG (in)) == TImode
1059 && GET_CODE (SUBREG_REG (in)) == REG)
1061 rtx memt = assign_stack_temp (TImode, 16, 0);
1062 emit_move_insn (memt, SUBREG_REG (in));
1063 return adjust_address (memt, XFmode, 0);
1065 else if (force && GET_CODE (in) == REG)
1067 rtx memx = assign_stack_temp (XFmode, 16, 0);
1068 emit_move_insn (memx, in);
1075 /* Emit comparison instruction if necessary, returning the expression
1076 that holds the compare result in the proper mode. */
1078 static GTY(()) rtx cmptf_libfunc;
1081 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1083 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1086 /* If we have a BImode input, then we already have a compare result, and
1087 do not need to emit another comparison. */
1088 if (GET_MODE (op0) == BImode)
1090 if ((code == NE || code == EQ) && op1 == const0_rtx)
1095 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1096 magic number as its third argument, that indicates what to do.
1097 The return value is an integer to be compared against zero. */
1098 else if (GET_MODE (op0) == TFmode)
1101 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1107 enum rtx_code ncode;
1109 if (!cmptf_libfunc || GET_MODE (op1) != TFmode)
1113 /* 1 = equal, 0 = not equal. Equality operators do
1114 not raise FP_INVALID when given an SNaN operand. */
1115 case EQ: magic = QCMP_EQ; ncode = NE; break;
1116 case NE: magic = QCMP_EQ; ncode = EQ; break;
1117 /* isunordered() from C99. */
1118 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1119 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1120 /* Relational operators raise FP_INVALID when given
1122 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1123 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1124 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1125 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1126 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1127 Expanders for buneq etc. weuld have to be added to ia64.md
1128 for this to be useful. */
1134 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1135 op0, TFmode, op1, TFmode,
1136 GEN_INT (magic), DImode);
1137 cmp = gen_reg_rtx (BImode);
1138 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1139 gen_rtx_fmt_ee (ncode, BImode,
1142 insns = get_insns ();
1145 emit_libcall_block (insns, cmp, cmp,
1146 gen_rtx_fmt_ee (code, BImode, op0, op1));
1151 cmp = gen_reg_rtx (BImode);
1152 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1153 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1157 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1160 /* Emit the appropriate sequence for a call. */
1163 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1168 addr = XEXP (addr, 0);
1169 addr = convert_memory_address (DImode, addr);
1170 b0 = gen_rtx_REG (DImode, R_BR (0));
1172 /* ??? Should do this for functions known to bind local too. */
1173 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1176 insn = gen_sibcall_nogp (addr);
1178 insn = gen_call_nogp (addr, b0);
1180 insn = gen_call_value_nogp (retval, addr, b0);
1181 insn = emit_call_insn (insn);
1186 insn = gen_sibcall_gp (addr);
1188 insn = gen_call_gp (addr, b0);
1190 insn = gen_call_value_gp (retval, addr, b0);
1191 insn = emit_call_insn (insn);
1193 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1197 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1201 ia64_reload_gp (void)
1205 if (current_frame_info.reg_save_gp)
1206 tmp = gen_rtx_REG (DImode, current_frame_info.reg_save_gp);
1209 HOST_WIDE_INT offset;
1211 offset = (current_frame_info.spill_cfa_off
1212 + current_frame_info.spill_size);
1213 if (frame_pointer_needed)
1215 tmp = hard_frame_pointer_rtx;
1220 tmp = stack_pointer_rtx;
1221 offset = current_frame_info.total_size - offset;
1224 if (CONST_OK_FOR_I (offset))
1225 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1226 tmp, GEN_INT (offset)));
1229 emit_move_insn (pic_offset_table_rtx, GEN_INT (offset));
1230 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1231 pic_offset_table_rtx, tmp));
1234 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1237 emit_move_insn (pic_offset_table_rtx, tmp);
1241 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1242 rtx scratch_b, int noreturn_p, int sibcall_p)
1245 bool is_desc = false;
1247 /* If we find we're calling through a register, then we're actually
1248 calling through a descriptor, so load up the values. */
1249 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1254 /* ??? We are currently constrained to *not* use peep2, because
1255 we can legitimately change the global lifetime of the GP
1256 (in the form of killing where previously live). This is
1257 because a call through a descriptor doesn't use the previous
1258 value of the GP, while a direct call does, and we do not
1259 commit to either form until the split here.
1261 That said, this means that we lack precise life info for
1262 whether ADDR is dead after this call. This is not terribly
1263 important, since we can fix things up essentially for free
1264 with the POST_DEC below, but it's nice to not use it when we
1265 can immediately tell it's not necessary. */
1266 addr_dead_p = ((noreturn_p || sibcall_p
1267 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1269 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1271 /* Load the code address into scratch_b. */
1272 tmp = gen_rtx_POST_INC (Pmode, addr);
1273 tmp = gen_rtx_MEM (Pmode, tmp);
1274 emit_move_insn (scratch_r, tmp);
1275 emit_move_insn (scratch_b, scratch_r);
1277 /* Load the GP address. If ADDR is not dead here, then we must
1278 revert the change made above via the POST_INCREMENT. */
1280 tmp = gen_rtx_POST_DEC (Pmode, addr);
1283 tmp = gen_rtx_MEM (Pmode, tmp);
1284 emit_move_insn (pic_offset_table_rtx, tmp);
1291 insn = gen_sibcall_nogp (addr);
1293 insn = gen_call_value_nogp (retval, addr, retaddr);
1295 insn = gen_call_nogp (addr, retaddr);
1296 emit_call_insn (insn);
1298 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
1302 /* Begin the assembly file. */
1305 ia64_file_start (void)
1307 default_file_start ();
1308 emit_safe_across_calls ();
1312 emit_safe_across_calls (void)
1314 unsigned int rs, re;
1321 while (rs < 64 && call_used_regs[PR_REG (rs)])
1325 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
1329 fputs ("\t.pred.safe_across_calls ", asm_out_file);
1333 fputc (',', asm_out_file);
1335 fprintf (asm_out_file, "p%u", rs);
1337 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
1341 fputc ('\n', asm_out_file);
1344 /* Helper function for ia64_compute_frame_size: find an appropriate general
1345 register to spill some special register to. SPECIAL_SPILL_MASK contains
1346 bits in GR0 to GR31 that have already been allocated by this routine.
1347 TRY_LOCALS is true if we should attempt to locate a local regnum. */
1350 find_gr_spill (int try_locals)
1354 /* If this is a leaf function, first try an otherwise unused
1355 call-clobbered register. */
1356 if (current_function_is_leaf)
1358 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1359 if (! regs_ever_live[regno]
1360 && call_used_regs[regno]
1361 && ! fixed_regs[regno]
1362 && ! global_regs[regno]
1363 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1365 current_frame_info.gr_used_mask |= 1 << regno;
1372 regno = current_frame_info.n_local_regs;
1373 /* If there is a frame pointer, then we can't use loc79, because
1374 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
1375 reg_name switching code in ia64_expand_prologue. */
1376 if (regno < (80 - frame_pointer_needed))
1378 current_frame_info.n_local_regs = regno + 1;
1379 return LOC_REG (0) + regno;
1383 /* Failed to find a general register to spill to. Must use stack. */
1387 /* In order to make for nice schedules, we try to allocate every temporary
1388 to a different register. We must of course stay away from call-saved,
1389 fixed, and global registers. We must also stay away from registers
1390 allocated in current_frame_info.gr_used_mask, since those include regs
1391 used all through the prologue.
1393 Any register allocated here must be used immediately. The idea is to
1394 aid scheduling, not to solve data flow problems. */
1396 static int last_scratch_gr_reg;
1399 next_scratch_gr_reg (void)
1403 for (i = 0; i < 32; ++i)
1405 regno = (last_scratch_gr_reg + i + 1) & 31;
1406 if (call_used_regs[regno]
1407 && ! fixed_regs[regno]
1408 && ! global_regs[regno]
1409 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
1411 last_scratch_gr_reg = regno;
1416 /* There must be _something_ available. */
1420 /* Helper function for ia64_compute_frame_size, called through
1421 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
1424 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
1426 unsigned int regno = REGNO (reg);
1429 unsigned int i, n = HARD_REGNO_NREGS (regno, GET_MODE (reg));
1430 for (i = 0; i < n; ++i)
1431 current_frame_info.gr_used_mask |= 1 << (regno + i);
1435 /* Returns the number of bytes offset between the frame pointer and the stack
1436 pointer for the current function. SIZE is the number of bytes of space
1437 needed for local variables. */
1440 ia64_compute_frame_size (HOST_WIDE_INT size)
1442 HOST_WIDE_INT total_size;
1443 HOST_WIDE_INT spill_size = 0;
1444 HOST_WIDE_INT extra_spill_size = 0;
1445 HOST_WIDE_INT pretend_args_size;
1448 int spilled_gr_p = 0;
1449 int spilled_fr_p = 0;
1453 if (current_frame_info.initialized)
1456 memset (¤t_frame_info, 0, sizeof current_frame_info);
1457 CLEAR_HARD_REG_SET (mask);
1459 /* Don't allocate scratches to the return register. */
1460 diddle_return_value (mark_reg_gr_used_mask, NULL);
1462 /* Don't allocate scratches to the EH scratch registers. */
1463 if (cfun->machine->ia64_eh_epilogue_sp)
1464 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
1465 if (cfun->machine->ia64_eh_epilogue_bsp)
1466 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
1468 /* Find the size of the register stack frame. We have only 80 local
1469 registers, because we reserve 8 for the inputs and 8 for the
1472 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
1473 since we'll be adjusting that down later. */
1474 regno = LOC_REG (78) + ! frame_pointer_needed;
1475 for (; regno >= LOC_REG (0); regno--)
1476 if (regs_ever_live[regno])
1478 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
1480 /* For functions marked with the syscall_linkage attribute, we must mark
1481 all eight input registers as in use, so that locals aren't visible to
1484 if (cfun->machine->n_varargs > 0
1485 || lookup_attribute ("syscall_linkage",
1486 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
1487 current_frame_info.n_input_regs = 8;
1490 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
1491 if (regs_ever_live[regno])
1493 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
1496 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
1497 if (regs_ever_live[regno])
1499 i = regno - OUT_REG (0) + 1;
1501 /* When -p profiling, we need one output register for the mcount argument.
1502 Likewise for -a profiling for the bb_init_func argument. For -ax
1503 profiling, we need two output registers for the two bb_init_trace_func
1505 if (current_function_profile)
1507 current_frame_info.n_output_regs = i;
1509 /* ??? No rotating register support yet. */
1510 current_frame_info.n_rotate_regs = 0;
1512 /* Discover which registers need spilling, and how much room that
1513 will take. Begin with floating point and general registers,
1514 which will always wind up on the stack. */
1516 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
1517 if (regs_ever_live[regno] && ! call_used_regs[regno])
1519 SET_HARD_REG_BIT (mask, regno);
1525 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
1526 if (regs_ever_live[regno] && ! call_used_regs[regno])
1528 SET_HARD_REG_BIT (mask, regno);
1534 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
1535 if (regs_ever_live[regno] && ! call_used_regs[regno])
1537 SET_HARD_REG_BIT (mask, regno);
1542 /* Now come all special registers that might get saved in other
1543 general registers. */
1545 if (frame_pointer_needed)
1547 current_frame_info.reg_fp = find_gr_spill (1);
1548 /* If we did not get a register, then we take LOC79. This is guaranteed
1549 to be free, even if regs_ever_live is already set, because this is
1550 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
1551 as we don't count loc79 above. */
1552 if (current_frame_info.reg_fp == 0)
1554 current_frame_info.reg_fp = LOC_REG (79);
1555 current_frame_info.n_local_regs++;
1559 if (! current_function_is_leaf)
1561 /* Emit a save of BR0 if we call other functions. Do this even
1562 if this function doesn't return, as EH depends on this to be
1563 able to unwind the stack. */
1564 SET_HARD_REG_BIT (mask, BR_REG (0));
1566 current_frame_info.reg_save_b0 = find_gr_spill (1);
1567 if (current_frame_info.reg_save_b0 == 0)
1573 /* Similarly for ar.pfs. */
1574 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
1575 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
1576 if (current_frame_info.reg_save_ar_pfs == 0)
1578 extra_spill_size += 8;
1582 /* Similarly for gp. Note that if we're calling setjmp, the stacked
1583 registers are clobbered, so we fall back to the stack. */
1584 current_frame_info.reg_save_gp
1585 = (current_function_calls_setjmp ? 0 : find_gr_spill (1));
1586 if (current_frame_info.reg_save_gp == 0)
1588 SET_HARD_REG_BIT (mask, GR_REG (1));
1595 if (regs_ever_live[BR_REG (0)] && ! call_used_regs[BR_REG (0)])
1597 SET_HARD_REG_BIT (mask, BR_REG (0));
1602 if (regs_ever_live[AR_PFS_REGNUM])
1604 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
1605 current_frame_info.reg_save_ar_pfs = find_gr_spill (1);
1606 if (current_frame_info.reg_save_ar_pfs == 0)
1608 extra_spill_size += 8;
1614 /* Unwind descriptor hackery: things are most efficient if we allocate
1615 consecutive GR save registers for RP, PFS, FP in that order. However,
1616 it is absolutely critical that FP get the only hard register that's
1617 guaranteed to be free, so we allocated it first. If all three did
1618 happen to be allocated hard regs, and are consecutive, rearrange them
1619 into the preferred order now. */
1620 if (current_frame_info.reg_fp != 0
1621 && current_frame_info.reg_save_b0 == current_frame_info.reg_fp + 1
1622 && current_frame_info.reg_save_ar_pfs == current_frame_info.reg_fp + 2)
1624 current_frame_info.reg_save_b0 = current_frame_info.reg_fp;
1625 current_frame_info.reg_save_ar_pfs = current_frame_info.reg_fp + 1;
1626 current_frame_info.reg_fp = current_frame_info.reg_fp + 2;
1629 /* See if we need to store the predicate register block. */
1630 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
1631 if (regs_ever_live[regno] && ! call_used_regs[regno])
1633 if (regno <= PR_REG (63))
1635 SET_HARD_REG_BIT (mask, PR_REG (0));
1636 current_frame_info.reg_save_pr = find_gr_spill (1);
1637 if (current_frame_info.reg_save_pr == 0)
1639 extra_spill_size += 8;
1643 /* ??? Mark them all as used so that register renaming and such
1644 are free to use them. */
1645 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
1646 regs_ever_live[regno] = 1;
1649 /* If we're forced to use st8.spill, we're forced to save and restore
1650 ar.unat as well. The check for existing liveness allows inline asm
1651 to touch ar.unat. */
1652 if (spilled_gr_p || cfun->machine->n_varargs
1653 || regs_ever_live[AR_UNAT_REGNUM])
1655 regs_ever_live[AR_UNAT_REGNUM] = 1;
1656 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
1657 current_frame_info.reg_save_ar_unat = find_gr_spill (spill_size == 0);
1658 if (current_frame_info.reg_save_ar_unat == 0)
1660 extra_spill_size += 8;
1665 if (regs_ever_live[AR_LC_REGNUM])
1667 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
1668 current_frame_info.reg_save_ar_lc = find_gr_spill (spill_size == 0);
1669 if (current_frame_info.reg_save_ar_lc == 0)
1671 extra_spill_size += 8;
1676 /* If we have an odd number of words of pretend arguments written to
1677 the stack, then the FR save area will be unaligned. We round the
1678 size of this area up to keep things 16 byte aligned. */
1680 pretend_args_size = IA64_STACK_ALIGN (current_function_pretend_args_size);
1682 pretend_args_size = current_function_pretend_args_size;
1684 total_size = (spill_size + extra_spill_size + size + pretend_args_size
1685 + current_function_outgoing_args_size);
1686 total_size = IA64_STACK_ALIGN (total_size);
1688 /* We always use the 16-byte scratch area provided by the caller, but
1689 if we are a leaf function, there's no one to which we need to provide
1691 if (current_function_is_leaf)
1692 total_size = MAX (0, total_size - 16);
1694 current_frame_info.total_size = total_size;
1695 current_frame_info.spill_cfa_off = pretend_args_size - 16;
1696 current_frame_info.spill_size = spill_size;
1697 current_frame_info.extra_spill_size = extra_spill_size;
1698 COPY_HARD_REG_SET (current_frame_info.mask, mask);
1699 current_frame_info.n_spilled = n_spilled;
1700 current_frame_info.initialized = reload_completed;
1703 /* Compute the initial difference between the specified pair of registers. */
1706 ia64_initial_elimination_offset (int from, int to)
1708 HOST_WIDE_INT offset;
1710 ia64_compute_frame_size (get_frame_size ());
1713 case FRAME_POINTER_REGNUM:
1714 if (to == HARD_FRAME_POINTER_REGNUM)
1716 if (current_function_is_leaf)
1717 offset = -current_frame_info.total_size;
1719 offset = -(current_frame_info.total_size
1720 - current_function_outgoing_args_size - 16);
1722 else if (to == STACK_POINTER_REGNUM)
1724 if (current_function_is_leaf)
1727 offset = 16 + current_function_outgoing_args_size;
1733 case ARG_POINTER_REGNUM:
1734 /* Arguments start above the 16 byte save area, unless stdarg
1735 in which case we store through the 16 byte save area. */
1736 if (to == HARD_FRAME_POINTER_REGNUM)
1737 offset = 16 - current_function_pretend_args_size;
1738 else if (to == STACK_POINTER_REGNUM)
1739 offset = (current_frame_info.total_size
1740 + 16 - current_function_pretend_args_size);
1752 /* If there are more than a trivial number of register spills, we use
1753 two interleaved iterators so that we can get two memory references
1756 In order to simplify things in the prologue and epilogue expanders,
1757 we use helper functions to fix up the memory references after the
1758 fact with the appropriate offsets to a POST_MODIFY memory mode.
1759 The following data structure tracks the state of the two iterators
1760 while insns are being emitted. */
1762 struct spill_fill_data
1764 rtx init_after; /* point at which to emit initializations */
1765 rtx init_reg[2]; /* initial base register */
1766 rtx iter_reg[2]; /* the iterator registers */
1767 rtx *prev_addr[2]; /* address of last memory use */
1768 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
1769 HOST_WIDE_INT prev_off[2]; /* last offset */
1770 int n_iter; /* number of iterators in use */
1771 int next_iter; /* next iterator to use */
1772 unsigned int save_gr_used_mask;
1775 static struct spill_fill_data spill_fill_data;
1778 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
1782 spill_fill_data.init_after = get_last_insn ();
1783 spill_fill_data.init_reg[0] = init_reg;
1784 spill_fill_data.init_reg[1] = init_reg;
1785 spill_fill_data.prev_addr[0] = NULL;
1786 spill_fill_data.prev_addr[1] = NULL;
1787 spill_fill_data.prev_insn[0] = NULL;
1788 spill_fill_data.prev_insn[1] = NULL;
1789 spill_fill_data.prev_off[0] = cfa_off;
1790 spill_fill_data.prev_off[1] = cfa_off;
1791 spill_fill_data.next_iter = 0;
1792 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
1794 spill_fill_data.n_iter = 1 + (n_spills > 2);
1795 for (i = 0; i < spill_fill_data.n_iter; ++i)
1797 int regno = next_scratch_gr_reg ();
1798 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
1799 current_frame_info.gr_used_mask |= 1 << regno;
1804 finish_spill_pointers (void)
1806 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
1810 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
1812 int iter = spill_fill_data.next_iter;
1813 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
1814 rtx disp_rtx = GEN_INT (disp);
1817 if (spill_fill_data.prev_addr[iter])
1819 if (CONST_OK_FOR_N (disp))
1821 *spill_fill_data.prev_addr[iter]
1822 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
1823 gen_rtx_PLUS (DImode,
1824 spill_fill_data.iter_reg[iter],
1826 REG_NOTES (spill_fill_data.prev_insn[iter])
1827 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
1828 REG_NOTES (spill_fill_data.prev_insn[iter]));
1832 /* ??? Could use register post_modify for loads. */
1833 if (! CONST_OK_FOR_I (disp))
1835 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
1836 emit_move_insn (tmp, disp_rtx);
1839 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
1840 spill_fill_data.iter_reg[iter], disp_rtx));
1843 /* Micro-optimization: if we've created a frame pointer, it's at
1844 CFA 0, which may allow the real iterator to be initialized lower,
1845 slightly increasing parallelism. Also, if there are few saves
1846 it may eliminate the iterator entirely. */
1848 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
1849 && frame_pointer_needed)
1851 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
1852 set_mem_alias_set (mem, get_varargs_alias_set ());
1860 seq = gen_movdi (spill_fill_data.iter_reg[iter],
1861 spill_fill_data.init_reg[iter]);
1866 if (! CONST_OK_FOR_I (disp))
1868 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
1869 emit_move_insn (tmp, disp_rtx);
1873 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
1874 spill_fill_data.init_reg[iter],
1881 /* Careful for being the first insn in a sequence. */
1882 if (spill_fill_data.init_after)
1883 insn = emit_insn_after (seq, spill_fill_data.init_after);
1886 rtx first = get_insns ();
1888 insn = emit_insn_before (seq, first);
1890 insn = emit_insn (seq);
1892 spill_fill_data.init_after = insn;
1894 /* If DISP is 0, we may or may not have a further adjustment
1895 afterward. If we do, then the load/store insn may be modified
1896 to be a post-modify. If we don't, then this copy may be
1897 eliminated by copyprop_hardreg_forward, which makes this
1898 insn garbage, which runs afoul of the sanity check in
1899 propagate_one_insn. So mark this insn as legal to delete. */
1901 REG_NOTES(insn) = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx,
1905 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
1907 /* ??? Not all of the spills are for varargs, but some of them are.
1908 The rest of the spills belong in an alias set of their own. But
1909 it doesn't actually hurt to include them here. */
1910 set_mem_alias_set (mem, get_varargs_alias_set ());
1912 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
1913 spill_fill_data.prev_off[iter] = cfa_off;
1915 if (++iter >= spill_fill_data.n_iter)
1917 spill_fill_data.next_iter = iter;
1923 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
1926 int iter = spill_fill_data.next_iter;
1929 mem = spill_restore_mem (reg, cfa_off);
1930 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
1931 spill_fill_data.prev_insn[iter] = insn;
1938 RTX_FRAME_RELATED_P (insn) = 1;
1940 /* Don't even pretend that the unwind code can intuit its way
1941 through a pair of interleaved post_modify iterators. Just
1942 provide the correct answer. */
1944 if (frame_pointer_needed)
1946 base = hard_frame_pointer_rtx;
1951 base = stack_pointer_rtx;
1952 off = current_frame_info.total_size - cfa_off;
1956 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
1957 gen_rtx_SET (VOIDmode,
1958 gen_rtx_MEM (GET_MODE (reg),
1959 plus_constant (base, off)),
1966 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
1968 int iter = spill_fill_data.next_iter;
1971 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
1972 GEN_INT (cfa_off)));
1973 spill_fill_data.prev_insn[iter] = insn;
1976 /* Wrapper functions that discards the CONST_INT spill offset. These
1977 exist so that we can give gr_spill/gr_fill the offset they need and
1978 use a consistent function interface. */
1981 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
1983 return gen_movdi (dest, src);
1987 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
1989 return gen_fr_spill (dest, src);
1993 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
1995 return gen_fr_restore (dest, src);
1998 /* Called after register allocation to add any instructions needed for the
1999 prologue. Using a prologue insn is favored compared to putting all of the
2000 instructions in output_function_prologue(), since it allows the scheduler
2001 to intermix instructions with the saves of the caller saved registers. In
2002 some cases, it might be necessary to emit a barrier instruction as the last
2003 insn to prevent such scheduling.
2005 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2006 so that the debug info generation code can handle them properly.
2008 The register save area is layed out like so:
2010 [ varargs spill area ]
2011 [ fr register spill area ]
2012 [ br register spill area ]
2013 [ ar register spill area ]
2014 [ pr register spill area ]
2015 [ gr register spill area ] */
2017 /* ??? Get inefficient code when the frame size is larger than can fit in an
2018 adds instruction. */
2021 ia64_expand_prologue (void)
2023 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2024 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2027 ia64_compute_frame_size (get_frame_size ());
2028 last_scratch_gr_reg = 15;
2030 /* If there is no epilogue, then we don't need some prologue insns.
2031 We need to avoid emitting the dead prologue insns, because flow
2032 will complain about them. */
2038 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
2039 if ((e->flags & EDGE_FAKE) == 0
2040 && (e->flags & EDGE_FALLTHRU) != 0)
2042 epilogue_p = (e != NULL);
2047 /* Set the local, input, and output register names. We need to do this
2048 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2049 half. If we use in/loc/out register names, then we get assembler errors
2050 in crtn.S because there is no alloc insn or regstk directive in there. */
2051 if (! TARGET_REG_NAMES)
2053 int inputs = current_frame_info.n_input_regs;
2054 int locals = current_frame_info.n_local_regs;
2055 int outputs = current_frame_info.n_output_regs;
2057 for (i = 0; i < inputs; i++)
2058 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2059 for (i = 0; i < locals; i++)
2060 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2061 for (i = 0; i < outputs; i++)
2062 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2065 /* Set the frame pointer register name. The regnum is logically loc79,
2066 but of course we'll not have allocated that many locals. Rather than
2067 worrying about renumbering the existing rtxs, we adjust the name. */
2068 /* ??? This code means that we can never use one local register when
2069 there is a frame pointer. loc79 gets wasted in this case, as it is
2070 renamed to a register that will never be used. See also the try_locals
2071 code in find_gr_spill. */
2072 if (current_frame_info.reg_fp)
2074 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2075 reg_names[HARD_FRAME_POINTER_REGNUM]
2076 = reg_names[current_frame_info.reg_fp];
2077 reg_names[current_frame_info.reg_fp] = tmp;
2080 /* We don't need an alloc instruction if we've used no outputs or locals. */
2081 if (current_frame_info.n_local_regs == 0
2082 && current_frame_info.n_output_regs == 0
2083 && current_frame_info.n_input_regs <= current_function_args_info.int_regs
2084 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2086 /* If there is no alloc, but there are input registers used, then we
2087 need a .regstk directive. */
2088 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
2089 ar_pfs_save_reg = NULL_RTX;
2093 current_frame_info.need_regstk = 0;
2095 if (current_frame_info.reg_save_ar_pfs)
2096 regno = current_frame_info.reg_save_ar_pfs;
2098 regno = next_scratch_gr_reg ();
2099 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
2101 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
2102 GEN_INT (current_frame_info.n_input_regs),
2103 GEN_INT (current_frame_info.n_local_regs),
2104 GEN_INT (current_frame_info.n_output_regs),
2105 GEN_INT (current_frame_info.n_rotate_regs)));
2106 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_pfs != 0);
2109 /* Set up frame pointer, stack pointer, and spill iterators. */
2111 n_varargs = cfun->machine->n_varargs;
2112 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
2113 stack_pointer_rtx, 0);
2115 if (frame_pointer_needed)
2117 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
2118 RTX_FRAME_RELATED_P (insn) = 1;
2121 if (current_frame_info.total_size != 0)
2123 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
2126 if (CONST_OK_FOR_I (- current_frame_info.total_size))
2127 offset = frame_size_rtx;
2130 regno = next_scratch_gr_reg ();
2131 offset = gen_rtx_REG (DImode, regno);
2132 emit_move_insn (offset, frame_size_rtx);
2135 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
2136 stack_pointer_rtx, offset));
2138 if (! frame_pointer_needed)
2140 RTX_FRAME_RELATED_P (insn) = 1;
2141 if (GET_CODE (offset) != CONST_INT)
2144 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2145 gen_rtx_SET (VOIDmode,
2147 gen_rtx_PLUS (DImode,
2154 /* ??? At this point we must generate a magic insn that appears to
2155 modify the stack pointer, the frame pointer, and all spill
2156 iterators. This would allow the most scheduling freedom. For
2157 now, just hard stop. */
2158 emit_insn (gen_blockage ());
2161 /* Must copy out ar.unat before doing any integer spills. */
2162 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2164 if (current_frame_info.reg_save_ar_unat)
2166 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2169 alt_regno = next_scratch_gr_reg ();
2170 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2171 current_frame_info.gr_used_mask |= 1 << alt_regno;
2174 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2175 insn = emit_move_insn (ar_unat_save_reg, reg);
2176 RTX_FRAME_RELATED_P (insn) = (current_frame_info.reg_save_ar_unat != 0);
2178 /* Even if we're not going to generate an epilogue, we still
2179 need to save the register so that EH works. */
2180 if (! epilogue_p && current_frame_info.reg_save_ar_unat)
2181 emit_insn (gen_prologue_use (ar_unat_save_reg));
2184 ar_unat_save_reg = NULL_RTX;
2186 /* Spill all varargs registers. Do this before spilling any GR registers,
2187 since we want the UNAT bits for the GR registers to override the UNAT
2188 bits from varargs, which we don't care about. */
2191 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
2193 reg = gen_rtx_REG (DImode, regno);
2194 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
2197 /* Locate the bottom of the register save area. */
2198 cfa_off = (current_frame_info.spill_cfa_off
2199 + current_frame_info.spill_size
2200 + current_frame_info.extra_spill_size);
2202 /* Save the predicate register block either in a register or in memory. */
2203 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2205 reg = gen_rtx_REG (DImode, PR_REG (0));
2206 if (current_frame_info.reg_save_pr != 0)
2208 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2209 insn = emit_move_insn (alt_reg, reg);
2211 /* ??? Denote pr spill/fill by a DImode move that modifies all
2212 64 hard registers. */
2213 RTX_FRAME_RELATED_P (insn) = 1;
2215 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2216 gen_rtx_SET (VOIDmode, alt_reg, reg),
2219 /* Even if we're not going to generate an epilogue, we still
2220 need to save the register so that EH works. */
2222 emit_insn (gen_prologue_use (alt_reg));
2226 alt_regno = next_scratch_gr_reg ();
2227 alt_reg = gen_rtx_REG (DImode, alt_regno);
2228 insn = emit_move_insn (alt_reg, reg);
2229 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2234 /* Handle AR regs in numerical order. All of them get special handling. */
2235 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
2236 && current_frame_info.reg_save_ar_unat == 0)
2238 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2239 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
2243 /* The alloc insn already copied ar.pfs into a general register. The
2244 only thing we have to do now is copy that register to a stack slot
2245 if we'd not allocated a local register for the job. */
2246 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
2247 && current_frame_info.reg_save_ar_pfs == 0)
2249 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2250 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
2254 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2256 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2257 if (current_frame_info.reg_save_ar_lc != 0)
2259 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2260 insn = emit_move_insn (alt_reg, reg);
2261 RTX_FRAME_RELATED_P (insn) = 1;
2263 /* Even if we're not going to generate an epilogue, we still
2264 need to save the register so that EH works. */
2266 emit_insn (gen_prologue_use (alt_reg));
2270 alt_regno = next_scratch_gr_reg ();
2271 alt_reg = gen_rtx_REG (DImode, alt_regno);
2272 emit_move_insn (alt_reg, reg);
2273 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2278 if (current_frame_info.reg_save_gp)
2280 insn = emit_move_insn (gen_rtx_REG (DImode,
2281 current_frame_info.reg_save_gp),
2282 pic_offset_table_rtx);
2283 /* We don't know for sure yet if this is actually needed, since
2284 we've not split the PIC call patterns. If all of the calls
2285 are indirect, and not followed by any uses of the gp, then
2286 this save is dead. Allow it to go away. */
2288 = gen_rtx_EXPR_LIST (REG_MAYBE_DEAD, const0_rtx, REG_NOTES (insn));
2291 /* We should now be at the base of the gr/br/fr spill area. */
2292 if (cfa_off != (current_frame_info.spill_cfa_off
2293 + current_frame_info.spill_size))
2296 /* Spill all general registers. */
2297 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
2298 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2300 reg = gen_rtx_REG (DImode, regno);
2301 do_spill (gen_gr_spill, reg, cfa_off, reg);
2305 /* Handle BR0 specially -- it may be getting stored permanently in
2306 some GR register. */
2307 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2309 reg = gen_rtx_REG (DImode, BR_REG (0));
2310 if (current_frame_info.reg_save_b0 != 0)
2312 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2313 insn = emit_move_insn (alt_reg, reg);
2314 RTX_FRAME_RELATED_P (insn) = 1;
2316 /* Even if we're not going to generate an epilogue, we still
2317 need to save the register so that EH works. */
2319 emit_insn (gen_prologue_use (alt_reg));
2323 alt_regno = next_scratch_gr_reg ();
2324 alt_reg = gen_rtx_REG (DImode, alt_regno);
2325 emit_move_insn (alt_reg, reg);
2326 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2331 /* Spill the rest of the BR registers. */
2332 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2333 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2335 alt_regno = next_scratch_gr_reg ();
2336 alt_reg = gen_rtx_REG (DImode, alt_regno);
2337 reg = gen_rtx_REG (DImode, regno);
2338 emit_move_insn (alt_reg, reg);
2339 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
2343 /* Align the frame and spill all FR registers. */
2344 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2345 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2349 reg = gen_rtx_REG (XFmode, regno);
2350 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
2354 if (cfa_off != current_frame_info.spill_cfa_off)
2357 finish_spill_pointers ();
2360 /* Called after register allocation to add any instructions needed for the
2361 epilogue. Using an epilogue insn is favored compared to putting all of the
2362 instructions in output_function_prologue(), since it allows the scheduler
2363 to intermix instructions with the saves of the caller saved registers. In
2364 some cases, it might be necessary to emit a barrier instruction as the last
2365 insn to prevent such scheduling. */
2368 ia64_expand_epilogue (int sibcall_p)
2370 rtx insn, reg, alt_reg, ar_unat_save_reg;
2371 int regno, alt_regno, cfa_off;
2373 ia64_compute_frame_size (get_frame_size ());
2375 /* If there is a frame pointer, then we use it instead of the stack
2376 pointer, so that the stack pointer does not need to be valid when
2377 the epilogue starts. See EXIT_IGNORE_STACK. */
2378 if (frame_pointer_needed)
2379 setup_spill_pointers (current_frame_info.n_spilled,
2380 hard_frame_pointer_rtx, 0);
2382 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
2383 current_frame_info.total_size);
2385 if (current_frame_info.total_size != 0)
2387 /* ??? At this point we must generate a magic insn that appears to
2388 modify the spill iterators and the frame pointer. This would
2389 allow the most scheduling freedom. For now, just hard stop. */
2390 emit_insn (gen_blockage ());
2393 /* Locate the bottom of the register save area. */
2394 cfa_off = (current_frame_info.spill_cfa_off
2395 + current_frame_info.spill_size
2396 + current_frame_info.extra_spill_size);
2398 /* Restore the predicate registers. */
2399 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
2401 if (current_frame_info.reg_save_pr != 0)
2402 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_pr);
2405 alt_regno = next_scratch_gr_reg ();
2406 alt_reg = gen_rtx_REG (DImode, alt_regno);
2407 do_restore (gen_movdi_x, alt_reg, cfa_off);
2410 reg = gen_rtx_REG (DImode, PR_REG (0));
2411 emit_move_insn (reg, alt_reg);
2414 /* Restore the application registers. */
2416 /* Load the saved unat from the stack, but do not restore it until
2417 after the GRs have been restored. */
2418 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2420 if (current_frame_info.reg_save_ar_unat != 0)
2422 = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_unat);
2425 alt_regno = next_scratch_gr_reg ();
2426 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
2427 current_frame_info.gr_used_mask |= 1 << alt_regno;
2428 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
2433 ar_unat_save_reg = NULL_RTX;
2435 if (current_frame_info.reg_save_ar_pfs != 0)
2437 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_pfs);
2438 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2439 emit_move_insn (reg, alt_reg);
2441 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
2443 alt_regno = next_scratch_gr_reg ();
2444 alt_reg = gen_rtx_REG (DImode, alt_regno);
2445 do_restore (gen_movdi_x, alt_reg, cfa_off);
2447 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
2448 emit_move_insn (reg, alt_reg);
2451 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
2453 if (current_frame_info.reg_save_ar_lc != 0)
2454 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_ar_lc);
2457 alt_regno = next_scratch_gr_reg ();
2458 alt_reg = gen_rtx_REG (DImode, alt_regno);
2459 do_restore (gen_movdi_x, alt_reg, cfa_off);
2462 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
2463 emit_move_insn (reg, alt_reg);
2466 /* We should now be at the base of the gr/br/fr spill area. */
2467 if (cfa_off != (current_frame_info.spill_cfa_off
2468 + current_frame_info.spill_size))
2471 /* The GP may be stored on the stack in the prologue, but it's
2472 never restored in the epilogue. Skip the stack slot. */
2473 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
2476 /* Restore all general registers. */
2477 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
2478 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2480 reg = gen_rtx_REG (DImode, regno);
2481 do_restore (gen_gr_restore, reg, cfa_off);
2485 /* Restore the branch registers. Handle B0 specially, as it may
2486 have gotten stored in some GR register. */
2487 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2489 if (current_frame_info.reg_save_b0 != 0)
2490 alt_reg = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2493 alt_regno = next_scratch_gr_reg ();
2494 alt_reg = gen_rtx_REG (DImode, alt_regno);
2495 do_restore (gen_movdi_x, alt_reg, cfa_off);
2498 reg = gen_rtx_REG (DImode, BR_REG (0));
2499 emit_move_insn (reg, alt_reg);
2502 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
2503 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2505 alt_regno = next_scratch_gr_reg ();
2506 alt_reg = gen_rtx_REG (DImode, alt_regno);
2507 do_restore (gen_movdi_x, alt_reg, cfa_off);
2509 reg = gen_rtx_REG (DImode, regno);
2510 emit_move_insn (reg, alt_reg);
2513 /* Restore floating point registers. */
2514 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
2515 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2519 reg = gen_rtx_REG (XFmode, regno);
2520 do_restore (gen_fr_restore_x, reg, cfa_off);
2524 /* Restore ar.unat for real. */
2525 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
2527 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
2528 emit_move_insn (reg, ar_unat_save_reg);
2531 if (cfa_off != current_frame_info.spill_cfa_off)
2534 finish_spill_pointers ();
2536 if (current_frame_info.total_size || cfun->machine->ia64_eh_epilogue_sp)
2538 /* ??? At this point we must generate a magic insn that appears to
2539 modify the spill iterators, the stack pointer, and the frame
2540 pointer. This would allow the most scheduling freedom. For now,
2542 emit_insn (gen_blockage ());
2545 if (cfun->machine->ia64_eh_epilogue_sp)
2546 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
2547 else if (frame_pointer_needed)
2549 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
2550 RTX_FRAME_RELATED_P (insn) = 1;
2552 else if (current_frame_info.total_size)
2554 rtx offset, frame_size_rtx;
2556 frame_size_rtx = GEN_INT (current_frame_info.total_size);
2557 if (CONST_OK_FOR_I (current_frame_info.total_size))
2558 offset = frame_size_rtx;
2561 regno = next_scratch_gr_reg ();
2562 offset = gen_rtx_REG (DImode, regno);
2563 emit_move_insn (offset, frame_size_rtx);
2566 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
2569 RTX_FRAME_RELATED_P (insn) = 1;
2570 if (GET_CODE (offset) != CONST_INT)
2573 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2574 gen_rtx_SET (VOIDmode,
2576 gen_rtx_PLUS (DImode,
2583 if (cfun->machine->ia64_eh_epilogue_bsp)
2584 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
2587 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
2590 int fp = GR_REG (2);
2591 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
2592 first available call clobbered register. If there was a frame_pointer
2593 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
2594 so we have to make sure we're using the string "r2" when emitting
2595 the register name for the assembler. */
2596 if (current_frame_info.reg_fp && current_frame_info.reg_fp == GR_REG (2))
2597 fp = HARD_FRAME_POINTER_REGNUM;
2599 /* We must emit an alloc to force the input registers to become output
2600 registers. Otherwise, if the callee tries to pass its parameters
2601 through to another call without an intervening alloc, then these
2603 /* ??? We don't need to preserve all input registers. We only need to
2604 preserve those input registers used as arguments to the sibling call.
2605 It is unclear how to compute that number here. */
2606 if (current_frame_info.n_input_regs != 0)
2607 emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
2608 const0_rtx, const0_rtx,
2609 GEN_INT (current_frame_info.n_input_regs),
2614 /* Return 1 if br.ret can do all the work required to return from a
2618 ia64_direct_return (void)
2620 if (reload_completed && ! frame_pointer_needed)
2622 ia64_compute_frame_size (get_frame_size ());
2624 return (current_frame_info.total_size == 0
2625 && current_frame_info.n_spilled == 0
2626 && current_frame_info.reg_save_b0 == 0
2627 && current_frame_info.reg_save_pr == 0
2628 && current_frame_info.reg_save_ar_pfs == 0
2629 && current_frame_info.reg_save_ar_unat == 0
2630 && current_frame_info.reg_save_ar_lc == 0);
2635 /* Return the magic cookie that we use to hold the return address
2636 during early compilation. */
2639 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
2643 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
2646 /* Split this value after reload, now that we know where the return
2647 address is saved. */
2650 ia64_split_return_addr_rtx (rtx dest)
2654 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
2656 if (current_frame_info.reg_save_b0 != 0)
2657 src = gen_rtx_REG (DImode, current_frame_info.reg_save_b0);
2663 /* Compute offset from CFA for BR0. */
2664 /* ??? Must be kept in sync with ia64_expand_prologue. */
2665 off = (current_frame_info.spill_cfa_off
2666 + current_frame_info.spill_size);
2667 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
2668 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
2671 /* Convert CFA offset to a register based offset. */
2672 if (frame_pointer_needed)
2673 src = hard_frame_pointer_rtx;
2676 src = stack_pointer_rtx;
2677 off += current_frame_info.total_size;
2680 /* Load address into scratch register. */
2681 if (CONST_OK_FOR_I (off))
2682 emit_insn (gen_adddi3 (dest, src, GEN_INT (off)));
2685 emit_move_insn (dest, GEN_INT (off));
2686 emit_insn (gen_adddi3 (dest, src, dest));
2689 src = gen_rtx_MEM (Pmode, dest);
2693 src = gen_rtx_REG (DImode, BR_REG (0));
2695 emit_move_insn (dest, src);
2699 ia64_hard_regno_rename_ok (int from, int to)
2701 /* Don't clobber any of the registers we reserved for the prologue. */
2702 if (to == current_frame_info.reg_fp
2703 || to == current_frame_info.reg_save_b0
2704 || to == current_frame_info.reg_save_pr
2705 || to == current_frame_info.reg_save_ar_pfs
2706 || to == current_frame_info.reg_save_ar_unat
2707 || to == current_frame_info.reg_save_ar_lc)
2710 if (from == current_frame_info.reg_fp
2711 || from == current_frame_info.reg_save_b0
2712 || from == current_frame_info.reg_save_pr
2713 || from == current_frame_info.reg_save_ar_pfs
2714 || from == current_frame_info.reg_save_ar_unat
2715 || from == current_frame_info.reg_save_ar_lc)
2718 /* Don't use output registers outside the register frame. */
2719 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
2722 /* Retain even/oddness on predicate register pairs. */
2723 if (PR_REGNO_P (from) && PR_REGNO_P (to))
2724 return (from & 1) == (to & 1);
2729 /* Target hook for assembling integer objects. Handle word-sized
2730 aligned objects and detect the cases when @fptr is needed. */
2733 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
2735 if (size == POINTER_SIZE / BITS_PER_UNIT
2737 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
2738 && GET_CODE (x) == SYMBOL_REF
2739 && SYMBOL_REF_FUNCTION_P (x))
2741 if (POINTER_SIZE == 32)
2742 fputs ("\tdata4\t@fptr(", asm_out_file);
2744 fputs ("\tdata8\t@fptr(", asm_out_file);
2745 output_addr_const (asm_out_file, x);
2746 fputs (")\n", asm_out_file);
2749 return default_assemble_integer (x, size, aligned_p);
2752 /* Emit the function prologue. */
2755 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
2757 int mask, grsave, grsave_prev;
2759 if (current_frame_info.need_regstk)
2760 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
2761 current_frame_info.n_input_regs,
2762 current_frame_info.n_local_regs,
2763 current_frame_info.n_output_regs,
2764 current_frame_info.n_rotate_regs);
2766 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
2769 /* Emit the .prologue directive. */
2772 grsave = grsave_prev = 0;
2773 if (current_frame_info.reg_save_b0 != 0)
2776 grsave = grsave_prev = current_frame_info.reg_save_b0;
2778 if (current_frame_info.reg_save_ar_pfs != 0
2779 && (grsave_prev == 0
2780 || current_frame_info.reg_save_ar_pfs == grsave_prev + 1))
2783 if (grsave_prev == 0)
2784 grsave = current_frame_info.reg_save_ar_pfs;
2785 grsave_prev = current_frame_info.reg_save_ar_pfs;
2787 if (current_frame_info.reg_fp != 0
2788 && (grsave_prev == 0
2789 || current_frame_info.reg_fp == grsave_prev + 1))
2792 if (grsave_prev == 0)
2793 grsave = HARD_FRAME_POINTER_REGNUM;
2794 grsave_prev = current_frame_info.reg_fp;
2796 if (current_frame_info.reg_save_pr != 0
2797 && (grsave_prev == 0
2798 || current_frame_info.reg_save_pr == grsave_prev + 1))
2801 if (grsave_prev == 0)
2802 grsave = current_frame_info.reg_save_pr;
2805 if (mask && TARGET_GNU_AS)
2806 fprintf (file, "\t.prologue %d, %d\n", mask,
2807 ia64_dbx_register_number (grsave));
2809 fputs ("\t.prologue\n", file);
2811 /* Emit a .spill directive, if necessary, to relocate the base of
2812 the register spill area. */
2813 if (current_frame_info.spill_cfa_off != -16)
2814 fprintf (file, "\t.spill %ld\n",
2815 (long) (current_frame_info.spill_cfa_off
2816 + current_frame_info.spill_size));
2819 /* Emit the .body directive at the scheduled end of the prologue. */
2822 ia64_output_function_end_prologue (FILE *file)
2824 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
2827 fputs ("\t.body\n", file);
2830 /* Emit the function epilogue. */
2833 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
2834 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
2838 if (current_frame_info.reg_fp)
2840 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2841 reg_names[HARD_FRAME_POINTER_REGNUM]
2842 = reg_names[current_frame_info.reg_fp];
2843 reg_names[current_frame_info.reg_fp] = tmp;
2845 if (! TARGET_REG_NAMES)
2847 for (i = 0; i < current_frame_info.n_input_regs; i++)
2848 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
2849 for (i = 0; i < current_frame_info.n_local_regs; i++)
2850 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
2851 for (i = 0; i < current_frame_info.n_output_regs; i++)
2852 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
2855 current_frame_info.initialized = 0;
2859 ia64_dbx_register_number (int regno)
2861 /* In ia64_expand_prologue we quite literally renamed the frame pointer
2862 from its home at loc79 to something inside the register frame. We
2863 must perform the same renumbering here for the debug info. */
2864 if (current_frame_info.reg_fp)
2866 if (regno == HARD_FRAME_POINTER_REGNUM)
2867 regno = current_frame_info.reg_fp;
2868 else if (regno == current_frame_info.reg_fp)
2869 regno = HARD_FRAME_POINTER_REGNUM;
2872 if (IN_REGNO_P (regno))
2873 return 32 + regno - IN_REG (0);
2874 else if (LOC_REGNO_P (regno))
2875 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
2876 else if (OUT_REGNO_P (regno))
2877 return (32 + current_frame_info.n_input_regs
2878 + current_frame_info.n_local_regs + regno - OUT_REG (0));
2884 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
2886 rtx addr_reg, eight = GEN_INT (8);
2888 /* The Intel assembler requires that the global __ia64_trampoline symbol
2889 be declared explicitly */
2892 static bool declared_ia64_trampoline = false;
2894 if (!declared_ia64_trampoline)
2896 declared_ia64_trampoline = true;
2897 (*targetm.asm_out.globalize_label) (asm_out_file,
2898 "__ia64_trampoline");
2902 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
2903 addr = convert_memory_address (Pmode, addr);
2904 fnaddr = convert_memory_address (Pmode, fnaddr);
2905 static_chain = convert_memory_address (Pmode, static_chain);
2907 /* Load up our iterator. */
2908 addr_reg = gen_reg_rtx (Pmode);
2909 emit_move_insn (addr_reg, addr);
2911 /* The first two words are the fake descriptor:
2912 __ia64_trampoline, ADDR+16. */
2913 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
2914 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
2915 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
2917 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
2918 copy_to_reg (plus_constant (addr, 16)));
2919 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
2921 /* The third word is the target descriptor. */
2922 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
2923 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
2925 /* The fourth word is the static chain. */
2926 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
2929 /* Do any needed setup for a variadic function. CUM has not been updated
2930 for the last named argument which has type TYPE and mode MODE.
2932 We generate the actual spill instructions during prologue generation. */
2935 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
2936 tree type, int * pretend_size,
2937 int second_time ATTRIBUTE_UNUSED)
2939 CUMULATIVE_ARGS next_cum = *cum;
2941 /* Skip the current argument. */
2942 ia64_function_arg_advance (&next_cum, mode, type, 1);
2944 if (next_cum.words < MAX_ARGUMENT_SLOTS)
2946 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
2947 *pretend_size = n * UNITS_PER_WORD;
2948 cfun->machine->n_varargs = n;
2952 /* Check whether TYPE is a homogeneous floating point aggregate. If
2953 it is, return the mode of the floating point type that appears
2954 in all leafs. If it is not, return VOIDmode.
2956 An aggregate is a homogeneous floating point aggregate is if all
2957 fields/elements in it have the same floating point type (e.g,
2958 SFmode). 128-bit quad-precision floats are excluded. */
2960 static enum machine_mode
2961 hfa_element_mode (tree type, int nested)
2963 enum machine_mode element_mode = VOIDmode;
2964 enum machine_mode mode;
2965 enum tree_code code = TREE_CODE (type);
2966 int know_element_mode = 0;
2971 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
2972 case BOOLEAN_TYPE: case CHAR_TYPE: case POINTER_TYPE:
2973 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
2974 case FILE_TYPE: case SET_TYPE: case LANG_TYPE:
2978 /* Fortran complex types are supposed to be HFAs, so we need to handle
2979 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
2982 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
2983 && TYPE_MODE (type) != TCmode)
2984 return GET_MODE_INNER (TYPE_MODE (type));
2989 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
2990 mode if this is contained within an aggregate. */
2991 if (nested && TYPE_MODE (type) != TFmode)
2992 return TYPE_MODE (type);
2997 return hfa_element_mode (TREE_TYPE (type), 1);
3001 case QUAL_UNION_TYPE:
3002 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3004 if (TREE_CODE (t) != FIELD_DECL)
3007 mode = hfa_element_mode (TREE_TYPE (t), 1);
3008 if (know_element_mode)
3010 if (mode != element_mode)
3013 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3017 know_element_mode = 1;
3018 element_mode = mode;
3021 return element_mode;
3024 /* If we reach here, we probably have some front-end specific type
3025 that the backend doesn't know about. This can happen via the
3026 aggregate_value_p call in init_function_start. All we can do is
3027 ignore unknown tree types. */
3034 /* Return the number of words required to hold a quantity of TYPE and MODE
3035 when passed as an argument. */
3037 ia64_function_arg_words (tree type, enum machine_mode mode)
3041 if (mode == BLKmode)
3042 words = int_size_in_bytes (type);
3044 words = GET_MODE_SIZE (mode);
3046 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
3049 /* Return the number of registers that should be skipped so the current
3050 argument (described by TYPE and WORDS) will be properly aligned.
3052 Integer and float arguments larger than 8 bytes start at the next
3053 even boundary. Aggregates larger than 8 bytes start at the next
3054 even boundary if the aggregate has 16 byte alignment. Note that
3055 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
3056 but are still to be aligned in registers.
3058 ??? The ABI does not specify how to handle aggregates with
3059 alignment from 9 to 15 bytes, or greater than 16. We handle them
3060 all as if they had 16 byte alignment. Such aggregates can occur
3061 only if gcc extensions are used. */
3063 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
3065 if ((cum->words & 1) == 0)
3069 && TREE_CODE (type) != INTEGER_TYPE
3070 && TREE_CODE (type) != REAL_TYPE)
3071 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
3076 /* Return rtx for register where argument is passed, or zero if it is passed
3078 /* ??? 128-bit quad-precision floats are always passed in general
3082 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
3083 int named, int incoming)
3085 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
3086 int words = ia64_function_arg_words (type, mode);
3087 int offset = ia64_function_arg_offset (cum, type, words);
3088 enum machine_mode hfa_mode = VOIDmode;
3090 /* If all argument slots are used, then it must go on the stack. */
3091 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3094 /* Check for and handle homogeneous FP aggregates. */
3096 hfa_mode = hfa_element_mode (type, 0);
3098 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3099 and unprototyped hfas are passed specially. */
3100 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3104 int fp_regs = cum->fp_regs;
3105 int int_regs = cum->words + offset;
3106 int hfa_size = GET_MODE_SIZE (hfa_mode);
3110 /* If prototyped, pass it in FR regs then GR regs.
3111 If not prototyped, pass it in both FR and GR regs.
3113 If this is an SFmode aggregate, then it is possible to run out of
3114 FR regs while GR regs are still left. In that case, we pass the
3115 remaining part in the GR regs. */
3117 /* Fill the FP regs. We do this always. We stop if we reach the end
3118 of the argument, the last FP register, or the last argument slot. */
3120 byte_size = ((mode == BLKmode)
3121 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3122 args_byte_size = int_regs * UNITS_PER_WORD;
3124 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3125 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
3127 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3128 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
3132 args_byte_size += hfa_size;
3136 /* If no prototype, then the whole thing must go in GR regs. */
3137 if (! cum->prototype)
3139 /* If this is an SFmode aggregate, then we might have some left over
3140 that needs to go in GR regs. */
3141 else if (byte_size != offset)
3142 int_regs += offset / UNITS_PER_WORD;
3144 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
3146 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
3148 enum machine_mode gr_mode = DImode;
3149 unsigned int gr_size;
3151 /* If we have an odd 4 byte hunk because we ran out of FR regs,
3152 then this goes in a GR reg left adjusted/little endian, right
3153 adjusted/big endian. */
3154 /* ??? Currently this is handled wrong, because 4-byte hunks are
3155 always right adjusted/little endian. */
3158 /* If we have an even 4 byte hunk because the aggregate is a
3159 multiple of 4 bytes in size, then this goes in a GR reg right
3160 adjusted/little endian. */
3161 else if (byte_size - offset == 4)
3164 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3165 gen_rtx_REG (gr_mode, (basereg
3169 gr_size = GET_MODE_SIZE (gr_mode);
3171 if (gr_size == UNITS_PER_WORD
3172 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
3174 else if (gr_size > UNITS_PER_WORD)
3175 int_regs += gr_size / UNITS_PER_WORD;
3177 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3180 /* Integral and aggregates go in general registers. If we have run out of
3181 FR registers, then FP values must also go in general registers. This can
3182 happen when we have a SFmode HFA. */
3183 else if (mode == TFmode || mode == TCmode
3184 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
3186 int byte_size = ((mode == BLKmode)
3187 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3188 if (BYTES_BIG_ENDIAN
3189 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
3190 && byte_size < UNITS_PER_WORD
3193 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3194 gen_rtx_REG (DImode,
3195 (basereg + cum->words
3198 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
3201 return gen_rtx_REG (mode, basereg + cum->words + offset);
3205 /* If there is a prototype, then FP values go in a FR register when
3206 named, and in a GR register when unnamed. */
3207 else if (cum->prototype)
3210 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
3211 /* In big-endian mode, an anonymous SFmode value must be represented
3212 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
3213 the value into the high half of the general register. */
3214 else if (BYTES_BIG_ENDIAN && mode == SFmode)
3215 return gen_rtx_PARALLEL (mode,
3217 gen_rtx_EXPR_LIST (VOIDmode,
3218 gen_rtx_REG (DImode, basereg + cum->words + offset),
3221 return gen_rtx_REG (mode, basereg + cum->words + offset);
3223 /* If there is no prototype, then FP values go in both FR and GR
3227 /* See comment above. */
3228 enum machine_mode inner_mode =
3229 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
3231 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
3232 gen_rtx_REG (mode, (FR_ARG_FIRST
3235 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
3236 gen_rtx_REG (inner_mode,
3237 (basereg + cum->words
3241 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
3245 /* Return number of words, at the beginning of the argument, that must be
3246 put in registers. 0 is the argument is entirely in registers or entirely
3250 ia64_function_arg_partial_nregs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3251 tree type, int named ATTRIBUTE_UNUSED)
3253 int words = ia64_function_arg_words (type, mode);
3254 int offset = ia64_function_arg_offset (cum, type, words);
3256 /* If all argument slots are used, then it must go on the stack. */
3257 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
3260 /* It doesn't matter whether the argument goes in FR or GR regs. If
3261 it fits within the 8 argument slots, then it goes entirely in
3262 registers. If it extends past the last argument slot, then the rest
3263 goes on the stack. */
3265 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
3268 return MAX_ARGUMENT_SLOTS - cum->words - offset;
3271 /* Update CUM to point after this argument. This is patterned after
3272 ia64_function_arg. */
3275 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3276 tree type, int named)
3278 int words = ia64_function_arg_words (type, mode);
3279 int offset = ia64_function_arg_offset (cum, type, words);
3280 enum machine_mode hfa_mode = VOIDmode;
3282 /* If all arg slots are already full, then there is nothing to do. */
3283 if (cum->words >= MAX_ARGUMENT_SLOTS)
3286 cum->words += words + offset;
3288 /* Check for and handle homogeneous FP aggregates. */
3290 hfa_mode = hfa_element_mode (type, 0);
3292 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
3293 and unprototyped hfas are passed specially. */
3294 if (hfa_mode != VOIDmode && (! cum->prototype || named))
3296 int fp_regs = cum->fp_regs;
3297 /* This is the original value of cum->words + offset. */
3298 int int_regs = cum->words - words;
3299 int hfa_size = GET_MODE_SIZE (hfa_mode);
3303 /* If prototyped, pass it in FR regs then GR regs.
3304 If not prototyped, pass it in both FR and GR regs.
3306 If this is an SFmode aggregate, then it is possible to run out of
3307 FR regs while GR regs are still left. In that case, we pass the
3308 remaining part in the GR regs. */
3310 /* Fill the FP regs. We do this always. We stop if we reach the end
3311 of the argument, the last FP register, or the last argument slot. */
3313 byte_size = ((mode == BLKmode)
3314 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3315 args_byte_size = int_regs * UNITS_PER_WORD;
3317 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
3318 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
3321 args_byte_size += hfa_size;
3325 cum->fp_regs = fp_regs;
3328 /* Integral and aggregates go in general registers. So do TFmode FP values.
3329 If we have run out of FR registers, then other FP values must also go in
3330 general registers. This can happen when we have a SFmode HFA. */
3331 else if (mode == TFmode || mode == TCmode
3332 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
3333 cum->int_regs = cum->words;
3335 /* If there is a prototype, then FP values go in a FR register when
3336 named, and in a GR register when unnamed. */
3337 else if (cum->prototype)
3340 cum->int_regs = cum->words;
3342 /* ??? Complex types should not reach here. */
3343 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3345 /* If there is no prototype, then FP values go in both FR and GR
3349 /* ??? Complex types should not reach here. */
3350 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
3351 cum->int_regs = cum->words;
3355 /* Arguments with alignment larger than 8 bytes start at the next even
3356 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
3357 even though their normal alignment is 8 bytes. See ia64_function_arg. */
3360 ia64_function_arg_boundary (enum machine_mode mode, tree type)
3363 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
3364 return PARM_BOUNDARY * 2;
3368 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
3369 return PARM_BOUNDARY * 2;
3371 return PARM_BOUNDARY;
3374 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
3375 return PARM_BOUNDARY * 2;
3377 return PARM_BOUNDARY;
3380 /* Variable sized types are passed by reference. */
3381 /* ??? At present this is a GCC extension to the IA-64 ABI. */
3384 ia64_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
3385 enum machine_mode mode ATTRIBUTE_UNUSED,
3386 tree type, bool named ATTRIBUTE_UNUSED)
3388 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
3391 /* True if it is OK to do sibling call optimization for the specified
3392 call expression EXP. DECL will be the called function, or NULL if
3393 this is an indirect call. */
3395 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3397 /* We can't perform a sibcall if the current function has the syscall_linkage
3399 if (lookup_attribute ("syscall_linkage",
3400 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
3403 /* We must always return with our current GP. This means we can
3404 only sibcall to functions defined in the current module. */
3405 return decl && (*targetm.binds_local_p) (decl);
3409 /* Implement va_arg. */
3412 ia64_gimplify_va_arg (tree valist, tree type, tree *pre_p, tree *post_p)
3414 /* Variable sized types are passed by reference. */
3415 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
3417 tree ptrtype = build_pointer_type (type);
3418 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
3419 return build_va_arg_indirect_ref (addr);
3422 /* Aggregate arguments with alignment larger than 8 bytes start at
3423 the next even boundary. Integer and floating point arguments
3424 do so if they are larger than 8 bytes, whether or not they are
3425 also aligned larger than 8 bytes. */
3426 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
3427 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
3429 tree t = build (PLUS_EXPR, TREE_TYPE (valist), valist,
3430 build_int_cst (NULL_TREE, 2 * UNITS_PER_WORD - 1));
3431 t = build (BIT_AND_EXPR, TREE_TYPE (t), t,
3432 build_int_cst (NULL_TREE, -2 * UNITS_PER_WORD));
3433 t = build (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
3434 gimplify_and_add (t, pre_p);
3437 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3440 /* Return 1 if function return value returned in memory. Return 0 if it is
3444 ia64_return_in_memory (tree valtype, tree fntype ATTRIBUTE_UNUSED)
3446 enum machine_mode mode;
3447 enum machine_mode hfa_mode;
3448 HOST_WIDE_INT byte_size;
3450 mode = TYPE_MODE (valtype);
3451 byte_size = GET_MODE_SIZE (mode);
3452 if (mode == BLKmode)
3454 byte_size = int_size_in_bytes (valtype);
3459 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
3461 hfa_mode = hfa_element_mode (valtype, 0);
3462 if (hfa_mode != VOIDmode)
3464 int hfa_size = GET_MODE_SIZE (hfa_mode);
3466 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
3471 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
3477 /* Return rtx for register that holds the function return value. */
3480 ia64_function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
3482 enum machine_mode mode;
3483 enum machine_mode hfa_mode;
3485 mode = TYPE_MODE (valtype);
3486 hfa_mode = hfa_element_mode (valtype, 0);
3488 if (hfa_mode != VOIDmode)
3496 hfa_size = GET_MODE_SIZE (hfa_mode);
3497 byte_size = ((mode == BLKmode)
3498 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
3500 for (i = 0; offset < byte_size; i++)
3502 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3503 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
3507 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3509 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
3510 return gen_rtx_REG (mode, FR_ARG_FIRST);
3513 if (BYTES_BIG_ENDIAN
3514 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
3522 bytesize = int_size_in_bytes (valtype);
3523 for (i = 0; offset < bytesize; i++)
3525 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
3526 gen_rtx_REG (DImode,
3529 offset += UNITS_PER_WORD;
3531 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
3534 return gen_rtx_REG (mode, GR_RET_FIRST);
3538 /* This is called from dwarf2out.c via ASM_OUTPUT_DWARF_DTPREL.
3539 We need to emit DTP-relative relocations. */
3542 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
3546 fputs ("\tdata8.ua\t@dtprel(", file);
3547 output_addr_const (file, x);
3551 /* Print a memory address as an operand to reference that memory location. */
3553 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
3554 also call this from ia64_print_operand for memory addresses. */
3557 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
3558 rtx address ATTRIBUTE_UNUSED)
3562 /* Print an operand to an assembler instruction.
3563 C Swap and print a comparison operator.
3564 D Print an FP comparison operator.
3565 E Print 32 - constant, for SImode shifts as extract.
3566 e Print 64 - constant, for DImode rotates.
3567 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
3568 a floating point register emitted normally.
3569 I Invert a predicate register by adding 1.
3570 J Select the proper predicate register for a condition.
3571 j Select the inverse predicate register for a condition.
3572 O Append .acq for volatile load.
3573 P Postincrement of a MEM.
3574 Q Append .rel for volatile store.
3575 S Shift amount for shladd instruction.
3576 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
3577 for Intel assembler.
3578 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
3579 for Intel assembler.
3580 r Print register name, or constant 0 as r0. HP compatibility for
3583 ia64_print_operand (FILE * file, rtx x, int code)
3590 /* Handled below. */
3595 enum rtx_code c = swap_condition (GET_CODE (x));
3596 fputs (GET_RTX_NAME (c), file);
3601 switch (GET_CODE (x))
3613 str = GET_RTX_NAME (GET_CODE (x));
3620 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
3624 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
3628 if (x == CONST0_RTX (GET_MODE (x)))
3629 str = reg_names [FR_REG (0)];
3630 else if (x == CONST1_RTX (GET_MODE (x)))
3631 str = reg_names [FR_REG (1)];
3632 else if (GET_CODE (x) == REG)
3633 str = reg_names [REGNO (x)];
3640 fputs (reg_names [REGNO (x) + 1], file);
3646 unsigned int regno = REGNO (XEXP (x, 0));
3647 if (GET_CODE (x) == EQ)
3651 fputs (reg_names [regno], file);
3656 if (MEM_VOLATILE_P (x))
3657 fputs(".acq", file);
3662 HOST_WIDE_INT value;
3664 switch (GET_CODE (XEXP (x, 0)))
3670 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
3671 if (GET_CODE (x) == CONST_INT)
3673 else if (GET_CODE (x) == REG)
3675 fprintf (file, ", %s", reg_names[REGNO (x)]);
3683 value = GET_MODE_SIZE (GET_MODE (x));
3687 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
3691 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
3696 if (MEM_VOLATILE_P (x))
3697 fputs(".rel", file);
3701 fprintf (file, "%d", exact_log2 (INTVAL (x)));
3705 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
3707 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
3713 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
3715 const char *prefix = "0x";
3716 if (INTVAL (x) & 0x80000000)
3718 fprintf (file, "0xffffffff");
3721 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
3727 /* If this operand is the constant zero, write it as register zero.
3728 Any register, zero, or CONST_INT value is OK here. */
3729 if (GET_CODE (x) == REG)
3730 fputs (reg_names[REGNO (x)], file);
3731 else if (x == CONST0_RTX (GET_MODE (x)))
3733 else if (GET_CODE (x) == CONST_INT)
3734 output_addr_const (file, x);
3736 output_operand_lossage ("invalid %%r value");
3743 /* For conditional branches, returns or calls, substitute
3744 sptk, dptk, dpnt, or spnt for %s. */
3745 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
3748 int pred_val = INTVAL (XEXP (x, 0));
3750 /* Guess top and bottom 10% statically predicted. */
3751 if (pred_val < REG_BR_PROB_BASE / 50)
3753 else if (pred_val < REG_BR_PROB_BASE / 2)
3755 else if (pred_val < REG_BR_PROB_BASE / 100 * 98)
3760 else if (GET_CODE (current_output_insn) == CALL_INSN)
3765 fputs (which, file);
3770 x = current_insn_predicate;
3773 unsigned int regno = REGNO (XEXP (x, 0));
3774 if (GET_CODE (x) == EQ)
3776 fprintf (file, "(%s) ", reg_names [regno]);
3781 output_operand_lossage ("ia64_print_operand: unknown code");
3785 switch (GET_CODE (x))
3787 /* This happens for the spill/restore instructions. */
3792 /* ... fall through ... */
3795 fputs (reg_names [REGNO (x)], file);
3800 rtx addr = XEXP (x, 0);
3801 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
3802 addr = XEXP (addr, 0);
3803 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
3808 output_addr_const (file, x);
3815 /* Compute a (partial) cost for rtx X. Return true if the complete
3816 cost has been computed, and false if subexpressions should be
3817 scanned. In either case, *TOTAL contains the cost result. */
3818 /* ??? This is incomplete. */
3821 ia64_rtx_costs (rtx x, int code, int outer_code, int *total)
3829 *total = CONST_OK_FOR_J (INTVAL (x)) ? 0 : COSTS_N_INSNS (1);
3832 if (CONST_OK_FOR_I (INTVAL (x)))
3834 else if (CONST_OK_FOR_J (INTVAL (x)))
3837 *total = COSTS_N_INSNS (1);
3840 if (CONST_OK_FOR_K (INTVAL (x)) || CONST_OK_FOR_L (INTVAL (x)))
3843 *total = COSTS_N_INSNS (1);
3848 *total = COSTS_N_INSNS (1);
3854 *total = COSTS_N_INSNS (3);
3858 /* For multiplies wider than HImode, we have to go to the FPU,
3859 which normally involves copies. Plus there's the latency
3860 of the multiply itself, and the latency of the instructions to
3861 transfer integer regs to FP regs. */
3862 /* ??? Check for FP mode. */
3863 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
3864 *total = COSTS_N_INSNS (10);
3866 *total = COSTS_N_INSNS (2);
3874 *total = COSTS_N_INSNS (1);
3881 /* We make divide expensive, so that divide-by-constant will be
3882 optimized to a multiply. */
3883 *total = COSTS_N_INSNS (60);
3891 /* Calculate the cost of moving data from a register in class FROM to
3892 one in class TO, using MODE. */
3895 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
3898 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
3899 if (to == ADDL_REGS)
3901 if (from == ADDL_REGS)
3904 /* All costs are symmetric, so reduce cases by putting the
3905 lower number class as the destination. */
3908 enum reg_class tmp = to;
3909 to = from, from = tmp;
3912 /* Moving from FR<->GR in XFmode must be more expensive than 2,
3913 so that we get secondary memory reloads. Between FR_REGS,
3914 we have to make this at least as expensive as MEMORY_MOVE_COST
3915 to avoid spectacularly poor register class preferencing. */
3918 if (to != GR_REGS || from != GR_REGS)
3919 return MEMORY_MOVE_COST (mode, to, 0);
3927 /* Moving between PR registers takes two insns. */
3928 if (from == PR_REGS)
3930 /* Moving between PR and anything but GR is impossible. */
3931 if (from != GR_REGS)
3932 return MEMORY_MOVE_COST (mode, to, 0);
3936 /* Moving between BR and anything but GR is impossible. */
3937 if (from != GR_REGS && from != GR_AND_BR_REGS)
3938 return MEMORY_MOVE_COST (mode, to, 0);
3943 /* Moving between AR and anything but GR is impossible. */
3944 if (from != GR_REGS)
3945 return MEMORY_MOVE_COST (mode, to, 0);
3950 case GR_AND_FR_REGS:
3951 case GR_AND_BR_REGS:
3962 /* This function returns the register class required for a secondary
3963 register when copying between one of the registers in CLASS, and X,
3964 using MODE. A return value of NO_REGS means that no secondary register
3968 ia64_secondary_reload_class (enum reg_class class,
3969 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
3973 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
3974 regno = true_regnum (x);
3981 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
3982 interaction. We end up with two pseudos with overlapping lifetimes
3983 both of which are equiv to the same constant, and both which need
3984 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
3985 changes depending on the path length, which means the qty_first_reg
3986 check in make_regs_eqv can give different answers at different times.
3987 At some point I'll probably need a reload_indi pattern to handle
3990 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
3991 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
3992 non-general registers for good measure. */
3993 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
3996 /* This is needed if a pseudo used as a call_operand gets spilled to a
3998 if (GET_CODE (x) == MEM)
4003 /* Need to go through general registers to get to other class regs. */
4004 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
4007 /* This can happen when a paradoxical subreg is an operand to the
4009 /* ??? This shouldn't be necessary after instruction scheduling is
4010 enabled, because paradoxical subregs are not accepted by
4011 register_operand when INSN_SCHEDULING is defined. Or alternatively,
4012 stop the paradoxical subreg stupidity in the *_operand functions
4014 if (GET_CODE (x) == MEM
4015 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
4016 || GET_MODE (x) == QImode))
4019 /* This can happen because of the ior/and/etc patterns that accept FP
4020 registers as operands. If the third operand is a constant, then it
4021 needs to be reloaded into a FP register. */
4022 if (GET_CODE (x) == CONST_INT)
4025 /* This can happen because of register elimination in a muldi3 insn.
4026 E.g. `26107 * (unsigned long)&u'. */
4027 if (GET_CODE (x) == PLUS)