1 /* Definitions of target machine for GNU compiler.
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008
3 Free Software Foundation, Inc.
4 Contributed by James E. Wilson <wilson@cygnus.com> and
5 David Mosberger <davidm@hpl.hp.com>.
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-attr.h"
43 #include "basic-block.h"
45 #include "sched-int.h"
48 #include "target-def.h"
51 #include "langhooks.h"
52 #include "cfglayout.h"
59 #include "tm-constrs.h"
61 /* This is used for communication between ASM_OUTPUT_LABEL and
62 ASM_OUTPUT_LABELREF. */
63 int ia64_asm_output_label = 0;
65 /* Define the information needed to generate branch and scc insns. This is
66 stored from the compare operation. */
67 struct rtx_def * ia64_compare_op0;
68 struct rtx_def * ia64_compare_op1;
70 /* Register names for ia64_expand_prologue. */
71 static const char * const ia64_reg_numbers[96] =
72 { "r32", "r33", "r34", "r35", "r36", "r37", "r38", "r39",
73 "r40", "r41", "r42", "r43", "r44", "r45", "r46", "r47",
74 "r48", "r49", "r50", "r51", "r52", "r53", "r54", "r55",
75 "r56", "r57", "r58", "r59", "r60", "r61", "r62", "r63",
76 "r64", "r65", "r66", "r67", "r68", "r69", "r70", "r71",
77 "r72", "r73", "r74", "r75", "r76", "r77", "r78", "r79",
78 "r80", "r81", "r82", "r83", "r84", "r85", "r86", "r87",
79 "r88", "r89", "r90", "r91", "r92", "r93", "r94", "r95",
80 "r96", "r97", "r98", "r99", "r100","r101","r102","r103",
81 "r104","r105","r106","r107","r108","r109","r110","r111",
82 "r112","r113","r114","r115","r116","r117","r118","r119",
83 "r120","r121","r122","r123","r124","r125","r126","r127"};
85 /* ??? These strings could be shared with REGISTER_NAMES. */
86 static const char * const ia64_input_reg_names[8] =
87 { "in0", "in1", "in2", "in3", "in4", "in5", "in6", "in7" };
89 /* ??? These strings could be shared with REGISTER_NAMES. */
90 static const char * const ia64_local_reg_names[80] =
91 { "loc0", "loc1", "loc2", "loc3", "loc4", "loc5", "loc6", "loc7",
92 "loc8", "loc9", "loc10","loc11","loc12","loc13","loc14","loc15",
93 "loc16","loc17","loc18","loc19","loc20","loc21","loc22","loc23",
94 "loc24","loc25","loc26","loc27","loc28","loc29","loc30","loc31",
95 "loc32","loc33","loc34","loc35","loc36","loc37","loc38","loc39",
96 "loc40","loc41","loc42","loc43","loc44","loc45","loc46","loc47",
97 "loc48","loc49","loc50","loc51","loc52","loc53","loc54","loc55",
98 "loc56","loc57","loc58","loc59","loc60","loc61","loc62","loc63",
99 "loc64","loc65","loc66","loc67","loc68","loc69","loc70","loc71",
100 "loc72","loc73","loc74","loc75","loc76","loc77","loc78","loc79" };
102 /* ??? These strings could be shared with REGISTER_NAMES. */
103 static const char * const ia64_output_reg_names[8] =
104 { "out0", "out1", "out2", "out3", "out4", "out5", "out6", "out7" };
106 /* Which cpu are we scheduling for. */
107 enum processor_type ia64_tune = PROCESSOR_ITANIUM2;
109 /* Determines whether we run our final scheduling pass or not. We always
110 avoid the normal second scheduling pass. */
111 static int ia64_flag_schedule_insns2;
113 /* Determines whether we run variable tracking in machine dependent
115 static int ia64_flag_var_tracking;
117 /* Variables which are this size or smaller are put in the sdata/sbss
120 unsigned int ia64_section_threshold;
122 /* The following variable is used by the DFA insn scheduler. The value is
123 TRUE if we do insn bundling instead of insn scheduling. */
135 number_of_ia64_frame_regs
138 /* Structure to be filled in by ia64_compute_frame_size with register
139 save masks and offsets for the current function. */
141 struct ia64_frame_info
143 HOST_WIDE_INT total_size; /* size of the stack frame, not including
144 the caller's scratch area. */
145 HOST_WIDE_INT spill_cfa_off; /* top of the reg spill area from the cfa. */
146 HOST_WIDE_INT spill_size; /* size of the gr/br/fr spill area. */
147 HOST_WIDE_INT extra_spill_size; /* size of spill area for others. */
148 HARD_REG_SET mask; /* mask of saved registers. */
149 unsigned int gr_used_mask; /* mask of registers in use as gr spill
150 registers or long-term scratches. */
151 int n_spilled; /* number of spilled registers. */
152 int r[number_of_ia64_frame_regs]; /* Frame related registers. */
153 int n_input_regs; /* number of input registers used. */
154 int n_local_regs; /* number of local registers used. */
155 int n_output_regs; /* number of output registers used. */
156 int n_rotate_regs; /* number of rotating registers used. */
158 char need_regstk; /* true if a .regstk directive needed. */
159 char initialized; /* true if the data is finalized. */
162 /* Current frame information calculated by ia64_compute_frame_size. */
163 static struct ia64_frame_info current_frame_info;
164 /* The actual registers that are emitted. */
165 static int emitted_frame_related_regs[number_of_ia64_frame_regs];
167 static int ia64_first_cycle_multipass_dfa_lookahead (void);
168 static void ia64_dependencies_evaluation_hook (rtx, rtx);
169 static void ia64_init_dfa_pre_cycle_insn (void);
170 static rtx ia64_dfa_pre_cycle_insn (void);
171 static int ia64_first_cycle_multipass_dfa_lookahead_guard (rtx);
172 static bool ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx);
173 static int ia64_dfa_new_cycle (FILE *, int, rtx, int, int, int *);
174 static void ia64_h_i_d_extended (void);
175 static int ia64_mode_to_int (enum machine_mode);
176 static void ia64_set_sched_flags (spec_info_t);
177 static int ia64_speculate_insn (rtx, ds_t, rtx *);
178 static rtx ia64_gen_spec_insn (rtx, ds_t, int, bool, bool);
179 static bool ia64_needs_block_p (const_rtx);
180 static rtx ia64_gen_check (rtx, rtx, bool);
181 static int ia64_spec_check_p (rtx);
182 static int ia64_spec_check_src_p (rtx);
183 static rtx gen_tls_get_addr (void);
184 static rtx gen_thread_pointer (void);
185 static int find_gr_spill (enum ia64_frame_regs, int);
186 static int next_scratch_gr_reg (void);
187 static void mark_reg_gr_used_mask (rtx, void *);
188 static void ia64_compute_frame_size (HOST_WIDE_INT);
189 static void setup_spill_pointers (int, rtx, HOST_WIDE_INT);
190 static void finish_spill_pointers (void);
191 static rtx spill_restore_mem (rtx, HOST_WIDE_INT);
192 static void do_spill (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT, rtx);
193 static void do_restore (rtx (*)(rtx, rtx, rtx), rtx, HOST_WIDE_INT);
194 static rtx gen_movdi_x (rtx, rtx, rtx);
195 static rtx gen_fr_spill_x (rtx, rtx, rtx);
196 static rtx gen_fr_restore_x (rtx, rtx, rtx);
198 static enum machine_mode hfa_element_mode (const_tree, bool);
199 static void ia64_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
201 static int ia64_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
203 static bool ia64_function_ok_for_sibcall (tree, tree);
204 static bool ia64_return_in_memory (const_tree, const_tree);
205 static bool ia64_rtx_costs (rtx, int, int, int *, bool);
206 static int ia64_unspec_may_trap_p (const_rtx, unsigned);
207 static void fix_range (const char *);
208 static bool ia64_handle_option (size_t, const char *, int);
209 static struct machine_function * ia64_init_machine_status (void);
210 static void emit_insn_group_barriers (FILE *);
211 static void emit_all_insn_group_barriers (FILE *);
212 static void final_emit_insn_group_barriers (FILE *);
213 static void emit_predicate_relation_info (void);
214 static void ia64_reorg (void);
215 static bool ia64_in_small_data_p (const_tree);
216 static void process_epilogue (FILE *, rtx, bool, bool);
217 static int process_set (FILE *, rtx, rtx, bool, bool);
219 static bool ia64_assemble_integer (rtx, unsigned int, int);
220 static void ia64_output_function_prologue (FILE *, HOST_WIDE_INT);
221 static void ia64_output_function_epilogue (FILE *, HOST_WIDE_INT);
222 static void ia64_output_function_end_prologue (FILE *);
224 static int ia64_issue_rate (void);
225 static int ia64_adjust_cost (rtx, rtx, rtx, int);
226 static void ia64_sched_init (FILE *, int, int);
227 static void ia64_sched_init_global (FILE *, int, int);
228 static void ia64_sched_finish_global (FILE *, int);
229 static void ia64_sched_finish (FILE *, int);
230 static int ia64_dfa_sched_reorder (FILE *, int, rtx *, int *, int, int);
231 static int ia64_sched_reorder (FILE *, int, rtx *, int *, int);
232 static int ia64_sched_reorder2 (FILE *, int, rtx *, int *, int);
233 static int ia64_variable_issue (FILE *, int, rtx, int);
235 static struct bundle_state *get_free_bundle_state (void);
236 static void free_bundle_state (struct bundle_state *);
237 static void initiate_bundle_states (void);
238 static void finish_bundle_states (void);
239 static unsigned bundle_state_hash (const void *);
240 static int bundle_state_eq_p (const void *, const void *);
241 static int insert_bundle_state (struct bundle_state *);
242 static void initiate_bundle_state_table (void);
243 static void finish_bundle_state_table (void);
244 static int try_issue_nops (struct bundle_state *, int);
245 static int try_issue_insn (struct bundle_state *, rtx);
246 static void issue_nops_and_insn (struct bundle_state *, int, rtx, int, int);
247 static int get_max_pos (state_t);
248 static int get_template (state_t, int);
250 static rtx get_next_important_insn (rtx, rtx);
251 static void bundling (FILE *, int, rtx, rtx);
253 static void ia64_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
254 HOST_WIDE_INT, tree);
255 static void ia64_file_start (void);
256 static void ia64_globalize_decl_name (FILE *, tree);
258 static int ia64_hpux_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
259 static int ia64_reloc_rw_mask (void) ATTRIBUTE_UNUSED;
260 static section *ia64_select_rtx_section (enum machine_mode, rtx,
261 unsigned HOST_WIDE_INT);
262 static void ia64_output_dwarf_dtprel (FILE *, int, rtx)
264 static unsigned int ia64_section_type_flags (tree, const char *, int);
265 static void ia64_init_libfuncs (void)
267 static void ia64_hpux_init_libfuncs (void)
269 static void ia64_sysv4_init_libfuncs (void)
271 static void ia64_vms_init_libfuncs (void)
274 static tree ia64_handle_model_attribute (tree *, tree, tree, int, bool *);
275 static tree ia64_handle_version_id_attribute (tree *, tree, tree, int, bool *);
276 static void ia64_encode_section_info (tree, rtx, int);
277 static rtx ia64_struct_value_rtx (tree, int);
278 static tree ia64_gimplify_va_arg (tree, tree, gimple_seq *, gimple_seq *);
279 static bool ia64_scalar_mode_supported_p (enum machine_mode mode);
280 static bool ia64_vector_mode_supported_p (enum machine_mode mode);
281 static bool ia64_cannot_force_const_mem (rtx);
282 static const char *ia64_mangle_type (const_tree);
283 static const char *ia64_invalid_conversion (const_tree, const_tree);
284 static const char *ia64_invalid_unary_op (int, const_tree);
285 static const char *ia64_invalid_binary_op (int, const_tree, const_tree);
286 static enum machine_mode ia64_c_mode_for_suffix (char);
288 /* Table of valid machine attributes. */
289 static const struct attribute_spec ia64_attribute_table[] =
291 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
292 { "syscall_linkage", 0, 0, false, true, true, NULL },
293 { "model", 1, 1, true, false, false, ia64_handle_model_attribute },
294 { "version_id", 1, 1, true, false, false,
295 ia64_handle_version_id_attribute },
296 { NULL, 0, 0, false, false, false, NULL }
299 /* Initialize the GCC target structure. */
300 #undef TARGET_ATTRIBUTE_TABLE
301 #define TARGET_ATTRIBUTE_TABLE ia64_attribute_table
303 #undef TARGET_INIT_BUILTINS
304 #define TARGET_INIT_BUILTINS ia64_init_builtins
306 #undef TARGET_EXPAND_BUILTIN
307 #define TARGET_EXPAND_BUILTIN ia64_expand_builtin
309 #undef TARGET_ASM_BYTE_OP
310 #define TARGET_ASM_BYTE_OP "\tdata1\t"
311 #undef TARGET_ASM_ALIGNED_HI_OP
312 #define TARGET_ASM_ALIGNED_HI_OP "\tdata2\t"
313 #undef TARGET_ASM_ALIGNED_SI_OP
314 #define TARGET_ASM_ALIGNED_SI_OP "\tdata4\t"
315 #undef TARGET_ASM_ALIGNED_DI_OP
316 #define TARGET_ASM_ALIGNED_DI_OP "\tdata8\t"
317 #undef TARGET_ASM_UNALIGNED_HI_OP
318 #define TARGET_ASM_UNALIGNED_HI_OP "\tdata2.ua\t"
319 #undef TARGET_ASM_UNALIGNED_SI_OP
320 #define TARGET_ASM_UNALIGNED_SI_OP "\tdata4.ua\t"
321 #undef TARGET_ASM_UNALIGNED_DI_OP
322 #define TARGET_ASM_UNALIGNED_DI_OP "\tdata8.ua\t"
323 #undef TARGET_ASM_INTEGER
324 #define TARGET_ASM_INTEGER ia64_assemble_integer
326 #undef TARGET_ASM_FUNCTION_PROLOGUE
327 #define TARGET_ASM_FUNCTION_PROLOGUE ia64_output_function_prologue
328 #undef TARGET_ASM_FUNCTION_END_PROLOGUE
329 #define TARGET_ASM_FUNCTION_END_PROLOGUE ia64_output_function_end_prologue
330 #undef TARGET_ASM_FUNCTION_EPILOGUE
331 #define TARGET_ASM_FUNCTION_EPILOGUE ia64_output_function_epilogue
333 #undef TARGET_IN_SMALL_DATA_P
334 #define TARGET_IN_SMALL_DATA_P ia64_in_small_data_p
336 #undef TARGET_SCHED_ADJUST_COST
337 #define TARGET_SCHED_ADJUST_COST ia64_adjust_cost
338 #undef TARGET_SCHED_ISSUE_RATE
339 #define TARGET_SCHED_ISSUE_RATE ia64_issue_rate
340 #undef TARGET_SCHED_VARIABLE_ISSUE
341 #define TARGET_SCHED_VARIABLE_ISSUE ia64_variable_issue
342 #undef TARGET_SCHED_INIT
343 #define TARGET_SCHED_INIT ia64_sched_init
344 #undef TARGET_SCHED_FINISH
345 #define TARGET_SCHED_FINISH ia64_sched_finish
346 #undef TARGET_SCHED_INIT_GLOBAL
347 #define TARGET_SCHED_INIT_GLOBAL ia64_sched_init_global
348 #undef TARGET_SCHED_FINISH_GLOBAL
349 #define TARGET_SCHED_FINISH_GLOBAL ia64_sched_finish_global
350 #undef TARGET_SCHED_REORDER
351 #define TARGET_SCHED_REORDER ia64_sched_reorder
352 #undef TARGET_SCHED_REORDER2
353 #define TARGET_SCHED_REORDER2 ia64_sched_reorder2
355 #undef TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK
356 #define TARGET_SCHED_DEPENDENCIES_EVALUATION_HOOK ia64_dependencies_evaluation_hook
358 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
359 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ia64_first_cycle_multipass_dfa_lookahead
361 #undef TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN
362 #define TARGET_SCHED_INIT_DFA_PRE_CYCLE_INSN ia64_init_dfa_pre_cycle_insn
363 #undef TARGET_SCHED_DFA_PRE_CYCLE_INSN
364 #define TARGET_SCHED_DFA_PRE_CYCLE_INSN ia64_dfa_pre_cycle_insn
366 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
367 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD\
368 ia64_first_cycle_multipass_dfa_lookahead_guard
370 #undef TARGET_SCHED_DFA_NEW_CYCLE
371 #define TARGET_SCHED_DFA_NEW_CYCLE ia64_dfa_new_cycle
373 #undef TARGET_SCHED_H_I_D_EXTENDED
374 #define TARGET_SCHED_H_I_D_EXTENDED ia64_h_i_d_extended
376 #undef TARGET_SCHED_SET_SCHED_FLAGS
377 #define TARGET_SCHED_SET_SCHED_FLAGS ia64_set_sched_flags
379 #undef TARGET_SCHED_SPECULATE_INSN
380 #define TARGET_SCHED_SPECULATE_INSN ia64_speculate_insn
382 #undef TARGET_SCHED_NEEDS_BLOCK_P
383 #define TARGET_SCHED_NEEDS_BLOCK_P ia64_needs_block_p
385 #undef TARGET_SCHED_GEN_SPEC_CHECK
386 #define TARGET_SCHED_GEN_SPEC_CHECK ia64_gen_check
388 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC
389 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD_SPEC\
390 ia64_first_cycle_multipass_dfa_lookahead_guard_spec
392 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
393 #define TARGET_FUNCTION_OK_FOR_SIBCALL ia64_function_ok_for_sibcall
394 #undef TARGET_ARG_PARTIAL_BYTES
395 #define TARGET_ARG_PARTIAL_BYTES ia64_arg_partial_bytes
397 #undef TARGET_ASM_OUTPUT_MI_THUNK
398 #define TARGET_ASM_OUTPUT_MI_THUNK ia64_output_mi_thunk
399 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
400 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
402 #undef TARGET_ASM_FILE_START
403 #define TARGET_ASM_FILE_START ia64_file_start
405 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
406 #define TARGET_ASM_GLOBALIZE_DECL_NAME ia64_globalize_decl_name
408 #undef TARGET_RTX_COSTS
409 #define TARGET_RTX_COSTS ia64_rtx_costs
410 #undef TARGET_ADDRESS_COST
411 #define TARGET_ADDRESS_COST hook_int_rtx_0
413 #undef TARGET_UNSPEC_MAY_TRAP_P
414 #define TARGET_UNSPEC_MAY_TRAP_P ia64_unspec_may_trap_p
416 #undef TARGET_MACHINE_DEPENDENT_REORG
417 #define TARGET_MACHINE_DEPENDENT_REORG ia64_reorg
419 #undef TARGET_ENCODE_SECTION_INFO
420 #define TARGET_ENCODE_SECTION_INFO ia64_encode_section_info
422 #undef TARGET_SECTION_TYPE_FLAGS
423 #define TARGET_SECTION_TYPE_FLAGS ia64_section_type_flags
426 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
427 #define TARGET_ASM_OUTPUT_DWARF_DTPREL ia64_output_dwarf_dtprel
430 /* ??? ABI doesn't allow us to define this. */
432 #undef TARGET_PROMOTE_FUNCTION_ARGS
433 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
436 /* ??? ABI doesn't allow us to define this. */
438 #undef TARGET_PROMOTE_FUNCTION_RETURN
439 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
442 /* ??? Investigate. */
444 #undef TARGET_PROMOTE_PROTOTYPES
445 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
448 #undef TARGET_STRUCT_VALUE_RTX
449 #define TARGET_STRUCT_VALUE_RTX ia64_struct_value_rtx
450 #undef TARGET_RETURN_IN_MEMORY
451 #define TARGET_RETURN_IN_MEMORY ia64_return_in_memory
452 #undef TARGET_SETUP_INCOMING_VARARGS
453 #define TARGET_SETUP_INCOMING_VARARGS ia64_setup_incoming_varargs
454 #undef TARGET_STRICT_ARGUMENT_NAMING
455 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
456 #undef TARGET_MUST_PASS_IN_STACK
457 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
459 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
460 #define TARGET_GIMPLIFY_VA_ARG_EXPR ia64_gimplify_va_arg
462 #undef TARGET_UNWIND_EMIT
463 #define TARGET_UNWIND_EMIT process_for_unwind_directive
465 #undef TARGET_SCALAR_MODE_SUPPORTED_P
466 #define TARGET_SCALAR_MODE_SUPPORTED_P ia64_scalar_mode_supported_p
467 #undef TARGET_VECTOR_MODE_SUPPORTED_P
468 #define TARGET_VECTOR_MODE_SUPPORTED_P ia64_vector_mode_supported_p
470 /* ia64 architecture manual 4.4.7: ... reads, writes, and flushes may occur
471 in an order different from the specified program order. */
472 #undef TARGET_RELAXED_ORDERING
473 #define TARGET_RELAXED_ORDERING true
475 #undef TARGET_DEFAULT_TARGET_FLAGS
476 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
477 #undef TARGET_HANDLE_OPTION
478 #define TARGET_HANDLE_OPTION ia64_handle_option
480 #undef TARGET_CANNOT_FORCE_CONST_MEM
481 #define TARGET_CANNOT_FORCE_CONST_MEM ia64_cannot_force_const_mem
483 #undef TARGET_MANGLE_TYPE
484 #define TARGET_MANGLE_TYPE ia64_mangle_type
486 #undef TARGET_INVALID_CONVERSION
487 #define TARGET_INVALID_CONVERSION ia64_invalid_conversion
488 #undef TARGET_INVALID_UNARY_OP
489 #define TARGET_INVALID_UNARY_OP ia64_invalid_unary_op
490 #undef TARGET_INVALID_BINARY_OP
491 #define TARGET_INVALID_BINARY_OP ia64_invalid_binary_op
493 #undef TARGET_C_MODE_FOR_SUFFIX
494 #define TARGET_C_MODE_FOR_SUFFIX ia64_c_mode_for_suffix
496 struct gcc_target targetm = TARGET_INITIALIZER;
500 ADDR_AREA_NORMAL, /* normal address area */
501 ADDR_AREA_SMALL /* addressable by "addl" (-2MB < addr < 2MB) */
505 static GTY(()) tree small_ident1;
506 static GTY(()) tree small_ident2;
511 if (small_ident1 == 0)
513 small_ident1 = get_identifier ("small");
514 small_ident2 = get_identifier ("__small__");
518 /* Retrieve the address area that has been chosen for the given decl. */
520 static ia64_addr_area
521 ia64_get_addr_area (tree decl)
525 model_attr = lookup_attribute ("model", DECL_ATTRIBUTES (decl));
531 id = TREE_VALUE (TREE_VALUE (model_attr));
532 if (id == small_ident1 || id == small_ident2)
533 return ADDR_AREA_SMALL;
535 return ADDR_AREA_NORMAL;
539 ia64_handle_model_attribute (tree *node, tree name, tree args,
540 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
542 ia64_addr_area addr_area = ADDR_AREA_NORMAL;
544 tree arg, decl = *node;
547 arg = TREE_VALUE (args);
548 if (arg == small_ident1 || arg == small_ident2)
550 addr_area = ADDR_AREA_SMALL;
554 warning (OPT_Wattributes, "invalid argument of %qs attribute",
555 IDENTIFIER_POINTER (name));
556 *no_add_attrs = true;
559 switch (TREE_CODE (decl))
562 if ((DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl))
564 && !TREE_STATIC (decl))
566 error ("%Jan address area attribute cannot be specified for "
567 "local variables", decl);
568 *no_add_attrs = true;
570 area = ia64_get_addr_area (decl);
571 if (area != ADDR_AREA_NORMAL && addr_area != area)
573 error ("address area of %q+D conflicts with previous "
574 "declaration", decl);
575 *no_add_attrs = true;
580 error ("%Jaddress area attribute cannot be specified for functions",
582 *no_add_attrs = true;
586 warning (OPT_Wattributes, "%qs attribute ignored",
587 IDENTIFIER_POINTER (name));
588 *no_add_attrs = true;
596 ia64_encode_addr_area (tree decl, rtx symbol)
600 flags = SYMBOL_REF_FLAGS (symbol);
601 switch (ia64_get_addr_area (decl))
603 case ADDR_AREA_NORMAL: break;
604 case ADDR_AREA_SMALL: flags |= SYMBOL_FLAG_SMALL_ADDR; break;
605 default: gcc_unreachable ();
607 SYMBOL_REF_FLAGS (symbol) = flags;
611 ia64_encode_section_info (tree decl, rtx rtl, int first)
613 default_encode_section_info (decl, rtl, first);
615 /* Careful not to prod global register variables. */
616 if (TREE_CODE (decl) == VAR_DECL
617 && GET_CODE (DECL_RTL (decl)) == MEM
618 && GET_CODE (XEXP (DECL_RTL (decl), 0)) == SYMBOL_REF
619 && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)))
620 ia64_encode_addr_area (decl, XEXP (rtl, 0));
623 /* Return 1 if the operands of a move are ok. */
626 ia64_move_ok (rtx dst, rtx src)
628 /* If we're under init_recog_no_volatile, we'll not be able to use
629 memory_operand. So check the code directly and don't worry about
630 the validity of the underlying address, which should have been
631 checked elsewhere anyway. */
632 if (GET_CODE (dst) != MEM)
634 if (GET_CODE (src) == MEM)
636 if (register_operand (src, VOIDmode))
639 /* Otherwise, this must be a constant, and that either 0 or 0.0 or 1.0. */
640 if (INTEGRAL_MODE_P (GET_MODE (dst)))
641 return src == const0_rtx;
643 return satisfies_constraint_G (src);
646 /* Return 1 if the operands are ok for a floating point load pair. */
649 ia64_load_pair_ok (rtx dst, rtx src)
651 if (GET_CODE (dst) != REG || !FP_REGNO_P (REGNO (dst)))
653 if (GET_CODE (src) != MEM || MEM_VOLATILE_P (src))
655 switch (GET_CODE (XEXP (src, 0)))
664 rtx adjust = XEXP (XEXP (XEXP (src, 0), 1), 1);
666 if (GET_CODE (adjust) != CONST_INT
667 || INTVAL (adjust) != GET_MODE_SIZE (GET_MODE (src)))
678 addp4_optimize_ok (rtx op1, rtx op2)
680 return (basereg_operand (op1, GET_MODE(op1)) !=
681 basereg_operand (op2, GET_MODE(op2)));
684 /* Check if OP is a mask suitable for use with SHIFT in a dep.z instruction.
685 Return the length of the field, or <= 0 on failure. */
688 ia64_depz_field_mask (rtx rop, rtx rshift)
690 unsigned HOST_WIDE_INT op = INTVAL (rop);
691 unsigned HOST_WIDE_INT shift = INTVAL (rshift);
693 /* Get rid of the zero bits we're shifting in. */
696 /* We must now have a solid block of 1's at bit 0. */
697 return exact_log2 (op + 1);
700 /* Return the TLS model to use for ADDR. */
702 static enum tls_model
703 tls_symbolic_operand_type (rtx addr)
705 enum tls_model tls_kind = 0;
707 if (GET_CODE (addr) == CONST)
709 if (GET_CODE (XEXP (addr, 0)) == PLUS
710 && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF)
711 tls_kind = SYMBOL_REF_TLS_MODEL (XEXP (XEXP (addr, 0), 0));
713 else if (GET_CODE (addr) == SYMBOL_REF)
714 tls_kind = SYMBOL_REF_TLS_MODEL (addr);
719 /* Return true if X is a constant that is valid for some immediate
720 field in an instruction. */
723 ia64_legitimate_constant_p (rtx x)
725 switch (GET_CODE (x))
732 if (GET_MODE (x) == VOIDmode)
734 return satisfies_constraint_G (x);
738 /* ??? Short term workaround for PR 28490. We must make the code here
739 match the code in ia64_expand_move and move_operand, even though they
740 are both technically wrong. */
741 if (tls_symbolic_operand_type (x) == 0)
743 HOST_WIDE_INT addend = 0;
746 if (GET_CODE (op) == CONST
747 && GET_CODE (XEXP (op, 0)) == PLUS
748 && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT)
750 addend = INTVAL (XEXP (XEXP (op, 0), 1));
751 op = XEXP (XEXP (op, 0), 0);
754 if (any_offset_symbol_operand (op, GET_MODE (op))
755 || function_operand (op, GET_MODE (op)))
757 if (aligned_offset_symbol_operand (op, GET_MODE (op)))
758 return (addend & 0x3fff) == 0;
765 enum machine_mode mode = GET_MODE (x);
767 if (mode == V2SFmode)
768 return satisfies_constraint_Y (x);
770 return (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
771 && GET_MODE_SIZE (mode) <= 8);
779 /* Don't allow TLS addresses to get spilled to memory. */
782 ia64_cannot_force_const_mem (rtx x)
784 if (GET_MODE (x) == RFmode)
786 return tls_symbolic_operand_type (x) != 0;
789 /* Expand a symbolic constant load. */
792 ia64_expand_load_address (rtx dest, rtx src)
794 gcc_assert (GET_CODE (dest) == REG);
796 /* ILP32 mode still loads 64-bits of data from the GOT. This avoids
797 having to pointer-extend the value afterward. Other forms of address
798 computation below are also more natural to compute as 64-bit quantities.
799 If we've been given an SImode destination register, change it. */
800 if (GET_MODE (dest) != Pmode)
801 dest = gen_rtx_REG_offset (dest, Pmode, REGNO (dest),
802 byte_lowpart_offset (Pmode, GET_MODE (dest)));
806 if (small_addr_symbolic_operand (src, VOIDmode))
810 emit_insn (gen_load_gprel64 (dest, src));
811 else if (GET_CODE (src) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (src))
812 emit_insn (gen_load_fptr (dest, src));
813 else if (sdata_symbolic_operand (src, VOIDmode))
814 emit_insn (gen_load_gprel (dest, src));
817 HOST_WIDE_INT addend = 0;
820 /* We did split constant offsets in ia64_expand_move, and we did try
821 to keep them split in move_operand, but we also allowed reload to
822 rematerialize arbitrary constants rather than spill the value to
823 the stack and reload it. So we have to be prepared here to split
825 if (GET_CODE (src) == CONST)
827 HOST_WIDE_INT hi, lo;
829 hi = INTVAL (XEXP (XEXP (src, 0), 1));
830 lo = ((hi & 0x3fff) ^ 0x2000) - 0x2000;
836 src = plus_constant (XEXP (XEXP (src, 0), 0), hi);
840 tmp = gen_rtx_HIGH (Pmode, src);
841 tmp = gen_rtx_PLUS (Pmode, tmp, pic_offset_table_rtx);
842 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
844 tmp = gen_rtx_LO_SUM (Pmode, dest, src);
845 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
849 tmp = gen_rtx_PLUS (Pmode, dest, GEN_INT (addend));
850 emit_insn (gen_rtx_SET (VOIDmode, dest, tmp));
857 static GTY(()) rtx gen_tls_tga;
859 gen_tls_get_addr (void)
862 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
866 static GTY(()) rtx thread_pointer_rtx;
868 gen_thread_pointer (void)
870 if (!thread_pointer_rtx)
871 thread_pointer_rtx = gen_rtx_REG (Pmode, 13);
872 return thread_pointer_rtx;
876 ia64_expand_tls_address (enum tls_model tls_kind, rtx op0, rtx op1,
877 rtx orig_op1, HOST_WIDE_INT addend)
879 rtx tga_op1, tga_op2, tga_ret, tga_eqv, tmp, insns;
881 HOST_WIDE_INT addend_lo, addend_hi;
885 case TLS_MODEL_GLOBAL_DYNAMIC:
888 tga_op1 = gen_reg_rtx (Pmode);
889 emit_insn (gen_load_dtpmod (tga_op1, op1));
891 tga_op2 = gen_reg_rtx (Pmode);
892 emit_insn (gen_load_dtprel (tga_op2, op1));
894 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
895 LCT_CONST, Pmode, 2, tga_op1,
896 Pmode, tga_op2, Pmode);
898 insns = get_insns ();
901 if (GET_MODE (op0) != Pmode)
903 emit_libcall_block (insns, op0, tga_ret, op1);
906 case TLS_MODEL_LOCAL_DYNAMIC:
907 /* ??? This isn't the completely proper way to do local-dynamic
908 If the call to __tls_get_addr is used only by a single symbol,
909 then we should (somehow) move the dtprel to the second arg
910 to avoid the extra add. */
913 tga_op1 = gen_reg_rtx (Pmode);
914 emit_insn (gen_load_dtpmod (tga_op1, op1));
916 tga_op2 = const0_rtx;
918 tga_ret = emit_library_call_value (gen_tls_get_addr (), NULL_RTX,
919 LCT_CONST, Pmode, 2, tga_op1,
920 Pmode, tga_op2, Pmode);
922 insns = get_insns ();
925 tga_eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
927 tmp = gen_reg_rtx (Pmode);
928 emit_libcall_block (insns, tmp, tga_ret, tga_eqv);
930 if (!register_operand (op0, Pmode))
931 op0 = gen_reg_rtx (Pmode);
934 emit_insn (gen_load_dtprel (op0, op1));
935 emit_insn (gen_adddi3 (op0, tmp, op0));
938 emit_insn (gen_add_dtprel (op0, op1, tmp));
941 case TLS_MODEL_INITIAL_EXEC:
942 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
943 addend_hi = addend - addend_lo;
945 op1 = plus_constant (op1, addend_hi);
948 tmp = gen_reg_rtx (Pmode);
949 emit_insn (gen_load_tprel (tmp, op1));
951 if (!register_operand (op0, Pmode))
952 op0 = gen_reg_rtx (Pmode);
953 emit_insn (gen_adddi3 (op0, tmp, gen_thread_pointer ()));
956 case TLS_MODEL_LOCAL_EXEC:
957 if (!register_operand (op0, Pmode))
958 op0 = gen_reg_rtx (Pmode);
964 emit_insn (gen_load_tprel (op0, op1));
965 emit_insn (gen_adddi3 (op0, op0, gen_thread_pointer ()));
968 emit_insn (gen_add_tprel (op0, op1, gen_thread_pointer ()));
976 op0 = expand_simple_binop (Pmode, PLUS, op0, GEN_INT (addend),
977 orig_op0, 1, OPTAB_DIRECT);
980 if (GET_MODE (orig_op0) == Pmode)
982 return gen_lowpart (GET_MODE (orig_op0), op0);
986 ia64_expand_move (rtx op0, rtx op1)
988 enum machine_mode mode = GET_MODE (op0);
990 if (!reload_in_progress && !reload_completed && !ia64_move_ok (op0, op1))
991 op1 = force_reg (mode, op1);
993 if ((mode == Pmode || mode == ptr_mode) && symbolic_operand (op1, VOIDmode))
995 HOST_WIDE_INT addend = 0;
996 enum tls_model tls_kind;
999 if (GET_CODE (op1) == CONST
1000 && GET_CODE (XEXP (op1, 0)) == PLUS
1001 && GET_CODE (XEXP (XEXP (op1, 0), 1)) == CONST_INT)
1003 addend = INTVAL (XEXP (XEXP (op1, 0), 1));
1004 sym = XEXP (XEXP (op1, 0), 0);
1007 tls_kind = tls_symbolic_operand_type (sym);
1009 return ia64_expand_tls_address (tls_kind, op0, sym, op1, addend);
1011 if (any_offset_symbol_operand (sym, mode))
1013 else if (aligned_offset_symbol_operand (sym, mode))
1015 HOST_WIDE_INT addend_lo, addend_hi;
1017 addend_lo = ((addend & 0x3fff) ^ 0x2000) - 0x2000;
1018 addend_hi = addend - addend_lo;
1022 op1 = plus_constant (sym, addend_hi);
1031 if (reload_completed)
1033 /* We really should have taken care of this offset earlier. */
1034 gcc_assert (addend == 0);
1035 if (ia64_expand_load_address (op0, op1))
1041 rtx subtarget = !can_create_pseudo_p () ? op0 : gen_reg_rtx (mode);
1043 emit_insn (gen_rtx_SET (VOIDmode, subtarget, op1));
1045 op1 = expand_simple_binop (mode, PLUS, subtarget,
1046 GEN_INT (addend), op0, 1, OPTAB_DIRECT);
1055 /* Split a move from OP1 to OP0 conditional on COND. */
1058 ia64_emit_cond_move (rtx op0, rtx op1, rtx cond)
1060 rtx insn, first = get_last_insn ();
1062 emit_move_insn (op0, op1);
1064 for (insn = get_last_insn (); insn != first; insn = PREV_INSN (insn))
1066 PATTERN (insn) = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond),
1070 /* Split a post-reload TImode or TFmode reference into two DImode
1071 components. This is made extra difficult by the fact that we do
1072 not get any scratch registers to work with, because reload cannot
1073 be prevented from giving us a scratch that overlaps the register
1074 pair involved. So instead, when addressing memory, we tweak the
1075 pointer register up and back down with POST_INCs. Or up and not
1076 back down when we can get away with it.
1078 REVERSED is true when the loads must be done in reversed order
1079 (high word first) for correctness. DEAD is true when the pointer
1080 dies with the second insn we generate and therefore the second
1081 address must not carry a postmodify.
1083 May return an insn which is to be emitted after the moves. */
1086 ia64_split_tmode (rtx out[2], rtx in, bool reversed, bool dead)
1090 switch (GET_CODE (in))
1093 out[reversed] = gen_rtx_REG (DImode, REGNO (in));
1094 out[!reversed] = gen_rtx_REG (DImode, REGNO (in) + 1);
1099 /* Cannot occur reversed. */
1100 gcc_assert (!reversed);
1102 if (GET_MODE (in) != TFmode)
1103 split_double (in, &out[0], &out[1]);
1105 /* split_double does not understand how to split a TFmode
1106 quantity into a pair of DImode constants. */
1109 unsigned HOST_WIDE_INT p[2];
1110 long l[4]; /* TFmode is 128 bits */
1112 REAL_VALUE_FROM_CONST_DOUBLE (r, in);
1113 real_to_target (l, &r, TFmode);
1115 if (FLOAT_WORDS_BIG_ENDIAN)
1117 p[0] = (((unsigned HOST_WIDE_INT) l[0]) << 32) + l[1];
1118 p[1] = (((unsigned HOST_WIDE_INT) l[2]) << 32) + l[3];
1122 p[0] = (((unsigned HOST_WIDE_INT) l[1]) << 32) + l[0];
1123 p[1] = (((unsigned HOST_WIDE_INT) l[3]) << 32) + l[2];
1125 out[0] = GEN_INT (p[0]);
1126 out[1] = GEN_INT (p[1]);
1132 rtx base = XEXP (in, 0);
1135 switch (GET_CODE (base))
1140 out[0] = adjust_automodify_address
1141 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1142 out[1] = adjust_automodify_address
1143 (in, DImode, dead ? 0 : gen_rtx_POST_DEC (Pmode, base), 8);
1147 /* Reversal requires a pre-increment, which can only
1148 be done as a separate insn. */
1149 emit_insn (gen_adddi3 (base, base, GEN_INT (8)));
1150 out[0] = adjust_automodify_address
1151 (in, DImode, gen_rtx_POST_DEC (Pmode, base), 8);
1152 out[1] = adjust_address (in, DImode, 0);
1157 gcc_assert (!reversed && !dead);
1159 /* Just do the increment in two steps. */
1160 out[0] = adjust_automodify_address (in, DImode, 0, 0);
1161 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1165 gcc_assert (!reversed && !dead);
1167 /* Add 8, subtract 24. */
1168 base = XEXP (base, 0);
1169 out[0] = adjust_automodify_address
1170 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1171 out[1] = adjust_automodify_address
1173 gen_rtx_POST_MODIFY (Pmode, base, plus_constant (base, -24)),
1178 gcc_assert (!reversed && !dead);
1180 /* Extract and adjust the modification. This case is
1181 trickier than the others, because we might have an
1182 index register, or we might have a combined offset that
1183 doesn't fit a signed 9-bit displacement field. We can
1184 assume the incoming expression is already legitimate. */
1185 offset = XEXP (base, 1);
1186 base = XEXP (base, 0);
1188 out[0] = adjust_automodify_address
1189 (in, DImode, gen_rtx_POST_INC (Pmode, base), 0);
1191 if (GET_CODE (XEXP (offset, 1)) == REG)
1193 /* Can't adjust the postmodify to match. Emit the
1194 original, then a separate addition insn. */
1195 out[1] = adjust_automodify_address (in, DImode, 0, 8);
1196 fixup = gen_adddi3 (base, base, GEN_INT (-8));
1200 gcc_assert (GET_CODE (XEXP (offset, 1)) == CONST_INT);
1201 if (INTVAL (XEXP (offset, 1)) < -256 + 8)
1203 /* Again the postmodify cannot be made to match,
1204 but in this case it's more efficient to get rid
1205 of the postmodify entirely and fix up with an
1207 out[1] = adjust_automodify_address (in, DImode, base, 8);
1209 (base, base, GEN_INT (INTVAL (XEXP (offset, 1)) - 8));
1213 /* Combined offset still fits in the displacement field.
1214 (We cannot overflow it at the high end.) */
1215 out[1] = adjust_automodify_address
1216 (in, DImode, gen_rtx_POST_MODIFY
1217 (Pmode, base, gen_rtx_PLUS
1219 GEN_INT (INTVAL (XEXP (offset, 1)) - 8))),
1238 /* Split a TImode or TFmode move instruction after reload.
1239 This is used by *movtf_internal and *movti_internal. */
1241 ia64_split_tmode_move (rtx operands[])
1243 rtx in[2], out[2], insn;
1246 bool reversed = false;
1248 /* It is possible for reload to decide to overwrite a pointer with
1249 the value it points to. In that case we have to do the loads in
1250 the appropriate order so that the pointer is not destroyed too
1251 early. Also we must not generate a postmodify for that second
1252 load, or rws_access_regno will die. */
1253 if (GET_CODE (operands[1]) == MEM
1254 && reg_overlap_mentioned_p (operands[0], operands[1]))
1256 rtx base = XEXP (operands[1], 0);
1257 while (GET_CODE (base) != REG)
1258 base = XEXP (base, 0);
1260 if (REGNO (base) == REGNO (operands[0]))
1264 /* Another reason to do the moves in reversed order is if the first
1265 element of the target register pair is also the second element of
1266 the source register pair. */
1267 if (GET_CODE (operands[0]) == REG && GET_CODE (operands[1]) == REG
1268 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
1271 fixup[0] = ia64_split_tmode (in, operands[1], reversed, dead);
1272 fixup[1] = ia64_split_tmode (out, operands[0], reversed, dead);
1274 #define MAYBE_ADD_REG_INC_NOTE(INSN, EXP) \
1275 if (GET_CODE (EXP) == MEM \
1276 && (GET_CODE (XEXP (EXP, 0)) == POST_MODIFY \
1277 || GET_CODE (XEXP (EXP, 0)) == POST_INC \
1278 || GET_CODE (XEXP (EXP, 0)) == POST_DEC)) \
1279 REG_NOTES (INSN) = gen_rtx_EXPR_LIST (REG_INC, \
1280 XEXP (XEXP (EXP, 0), 0), \
1283 insn = emit_insn (gen_rtx_SET (VOIDmode, out[0], in[0]));
1284 MAYBE_ADD_REG_INC_NOTE (insn, in[0]);
1285 MAYBE_ADD_REG_INC_NOTE (insn, out[0]);
1287 insn = emit_insn (gen_rtx_SET (VOIDmode, out[1], in[1]));
1288 MAYBE_ADD_REG_INC_NOTE (insn, in[1]);
1289 MAYBE_ADD_REG_INC_NOTE (insn, out[1]);
1292 emit_insn (fixup[0]);
1294 emit_insn (fixup[1]);
1296 #undef MAYBE_ADD_REG_INC_NOTE
1299 /* ??? Fixing GR->FR XFmode moves during reload is hard. You need to go
1300 through memory plus an extra GR scratch register. Except that you can
1301 either get the first from SECONDARY_MEMORY_NEEDED or the second from
1302 SECONDARY_RELOAD_CLASS, but not both.
1304 We got into problems in the first place by allowing a construct like
1305 (subreg:XF (reg:TI)), which we got from a union containing a long double.
1306 This solution attempts to prevent this situation from occurring. When
1307 we see something like the above, we spill the inner register to memory. */
1310 spill_xfmode_rfmode_operand (rtx in, int force, enum machine_mode mode)
1312 if (GET_CODE (in) == SUBREG
1313 && GET_MODE (SUBREG_REG (in)) == TImode
1314 && GET_CODE (SUBREG_REG (in)) == REG)
1316 rtx memt = assign_stack_temp (TImode, 16, 0);
1317 emit_move_insn (memt, SUBREG_REG (in));
1318 return adjust_address (memt, mode, 0);
1320 else if (force && GET_CODE (in) == REG)
1322 rtx memx = assign_stack_temp (mode, 16, 0);
1323 emit_move_insn (memx, in);
1330 /* Expand the movxf or movrf pattern (MODE says which) with the given
1331 OPERANDS, returning true if the pattern should then invoke
1335 ia64_expand_movxf_movrf (enum machine_mode mode, rtx operands[])
1337 rtx op0 = operands[0];
1339 if (GET_CODE (op0) == SUBREG)
1340 op0 = SUBREG_REG (op0);
1342 /* We must support XFmode loads into general registers for stdarg/vararg,
1343 unprototyped calls, and a rare case where a long double is passed as
1344 an argument after a float HFA fills the FP registers. We split them into
1345 DImode loads for convenience. We also need to support XFmode stores
1346 for the last case. This case does not happen for stdarg/vararg routines,
1347 because we do a block store to memory of unnamed arguments. */
1349 if (GET_CODE (op0) == REG && GR_REGNO_P (REGNO (op0)))
1353 /* We're hoping to transform everything that deals with XFmode
1354 quantities and GR registers early in the compiler. */
1355 gcc_assert (can_create_pseudo_p ());
1357 /* Struct to register can just use TImode instead. */
1358 if ((GET_CODE (operands[1]) == SUBREG
1359 && GET_MODE (SUBREG_REG (operands[1])) == TImode)
1360 || (GET_CODE (operands[1]) == REG
1361 && GR_REGNO_P (REGNO (operands[1]))))
1363 rtx op1 = operands[1];
1365 if (GET_CODE (op1) == SUBREG)
1366 op1 = SUBREG_REG (op1);
1368 op1 = gen_rtx_REG (TImode, REGNO (op1));
1370 emit_move_insn (gen_rtx_REG (TImode, REGNO (op0)), op1);
1374 if (GET_CODE (operands[1]) == CONST_DOUBLE)
1376 /* Don't word-swap when reading in the constant. */
1377 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0)),
1378 operand_subword (operands[1], WORDS_BIG_ENDIAN,
1380 emit_move_insn (gen_rtx_REG (DImode, REGNO (op0) + 1),
1381 operand_subword (operands[1], !WORDS_BIG_ENDIAN,
1386 /* If the quantity is in a register not known to be GR, spill it. */
1387 if (register_operand (operands[1], mode))
1388 operands[1] = spill_xfmode_rfmode_operand (operands[1], 1, mode);
1390 gcc_assert (GET_CODE (operands[1]) == MEM);
1392 /* Don't word-swap when reading in the value. */
1393 out[0] = gen_rtx_REG (DImode, REGNO (op0));
1394 out[1] = gen_rtx_REG (DImode, REGNO (op0) + 1);
1396 emit_move_insn (out[0], adjust_address (operands[1], DImode, 0));
1397 emit_move_insn (out[1], adjust_address (operands[1], DImode, 8));
1401 if (GET_CODE (operands[1]) == REG && GR_REGNO_P (REGNO (operands[1])))
1403 /* We're hoping to transform everything that deals with XFmode
1404 quantities and GR registers early in the compiler. */
1405 gcc_assert (can_create_pseudo_p ());
1407 /* Op0 can't be a GR_REG here, as that case is handled above.
1408 If op0 is a register, then we spill op1, so that we now have a
1409 MEM operand. This requires creating an XFmode subreg of a TImode reg
1410 to force the spill. */
1411 if (register_operand (operands[0], mode))
1413 rtx op1 = gen_rtx_REG (TImode, REGNO (operands[1]));
1414 op1 = gen_rtx_SUBREG (mode, op1, 0);
1415 operands[1] = spill_xfmode_rfmode_operand (op1, 0, mode);
1422 gcc_assert (GET_CODE (operands[0]) == MEM);
1424 /* Don't word-swap when writing out the value. */
1425 in[0] = gen_rtx_REG (DImode, REGNO (operands[1]));
1426 in[1] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1);
1428 emit_move_insn (adjust_address (operands[0], DImode, 0), in[0]);
1429 emit_move_insn (adjust_address (operands[0], DImode, 8), in[1]);
1434 if (!reload_in_progress && !reload_completed)
1436 operands[1] = spill_xfmode_rfmode_operand (operands[1], 0, mode);
1438 if (GET_MODE (op0) == TImode && GET_CODE (op0) == REG)
1440 rtx memt, memx, in = operands[1];
1441 if (CONSTANT_P (in))
1442 in = validize_mem (force_const_mem (mode, in));
1443 if (GET_CODE (in) == MEM)
1444 memt = adjust_address (in, TImode, 0);
1447 memt = assign_stack_temp (TImode, 16, 0);
1448 memx = adjust_address (memt, mode, 0);
1449 emit_move_insn (memx, in);
1451 emit_move_insn (op0, memt);
1455 if (!ia64_move_ok (operands[0], operands[1]))
1456 operands[1] = force_reg (mode, operands[1]);
1462 /* Emit comparison instruction if necessary, returning the expression
1463 that holds the compare result in the proper mode. */
1465 static GTY(()) rtx cmptf_libfunc;
1468 ia64_expand_compare (enum rtx_code code, enum machine_mode mode)
1470 rtx op0 = ia64_compare_op0, op1 = ia64_compare_op1;
1473 /* If we have a BImode input, then we already have a compare result, and
1474 do not need to emit another comparison. */
1475 if (GET_MODE (op0) == BImode)
1477 gcc_assert ((code == NE || code == EQ) && op1 == const0_rtx);
1480 /* HPUX TFmode compare requires a library call to _U_Qfcmp, which takes a
1481 magic number as its third argument, that indicates what to do.
1482 The return value is an integer to be compared against zero. */
1483 else if (GET_MODE (op0) == TFmode)
1486 QCMP_INV = 1, /* Raise FP_INVALID on SNaN as a side effect. */
1492 enum rtx_code ncode;
1495 gcc_assert (cmptf_libfunc && GET_MODE (op1) == TFmode);
1498 /* 1 = equal, 0 = not equal. Equality operators do
1499 not raise FP_INVALID when given an SNaN operand. */
1500 case EQ: magic = QCMP_EQ; ncode = NE; break;
1501 case NE: magic = QCMP_EQ; ncode = EQ; break;
1502 /* isunordered() from C99. */
1503 case UNORDERED: magic = QCMP_UNORD; ncode = NE; break;
1504 case ORDERED: magic = QCMP_UNORD; ncode = EQ; break;
1505 /* Relational operators raise FP_INVALID when given
1507 case LT: magic = QCMP_LT |QCMP_INV; ncode = NE; break;
1508 case LE: magic = QCMP_LT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1509 case GT: magic = QCMP_GT |QCMP_INV; ncode = NE; break;
1510 case GE: magic = QCMP_GT|QCMP_EQ|QCMP_INV; ncode = NE; break;
1511 /* FUTURE: Implement UNEQ, UNLT, UNLE, UNGT, UNGE, LTGT.
1512 Expanders for buneq etc. weuld have to be added to ia64.md
1513 for this to be useful. */
1514 default: gcc_unreachable ();
1519 ret = emit_library_call_value (cmptf_libfunc, 0, LCT_CONST, DImode, 3,
1520 op0, TFmode, op1, TFmode,
1521 GEN_INT (magic), DImode);
1522 cmp = gen_reg_rtx (BImode);
1523 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1524 gen_rtx_fmt_ee (ncode, BImode,
1527 insns = get_insns ();
1530 emit_libcall_block (insns, cmp, cmp,
1531 gen_rtx_fmt_ee (code, BImode, op0, op1));
1536 cmp = gen_reg_rtx (BImode);
1537 emit_insn (gen_rtx_SET (VOIDmode, cmp,
1538 gen_rtx_fmt_ee (code, BImode, op0, op1)));
1542 return gen_rtx_fmt_ee (code, mode, cmp, const0_rtx);
1545 /* Generate an integral vector comparison. Return true if the condition has
1546 been reversed, and so the sense of the comparison should be inverted. */
1549 ia64_expand_vecint_compare (enum rtx_code code, enum machine_mode mode,
1550 rtx dest, rtx op0, rtx op1)
1552 bool negate = false;
1555 /* Canonicalize the comparison to EQ, GT, GTU. */
1566 code = reverse_condition (code);
1572 code = reverse_condition (code);
1578 code = swap_condition (code);
1579 x = op0, op0 = op1, op1 = x;
1586 /* Unsigned parallel compare is not supported by the hardware. Play some
1587 tricks to turn this into a signed comparison against 0. */
1596 /* Perform a parallel modulo subtraction. */
1597 t1 = gen_reg_rtx (V2SImode);
1598 emit_insn (gen_subv2si3 (t1, op0, op1));
1600 /* Extract the original sign bit of op0. */
1601 mask = GEN_INT (-0x80000000);
1602 mask = gen_rtx_CONST_VECTOR (V2SImode, gen_rtvec (2, mask, mask));
1603 mask = force_reg (V2SImode, mask);
1604 t2 = gen_reg_rtx (V2SImode);
1605 emit_insn (gen_andv2si3 (t2, op0, mask));
1607 /* XOR it back into the result of the subtraction. This results
1608 in the sign bit set iff we saw unsigned underflow. */
1609 x = gen_reg_rtx (V2SImode);
1610 emit_insn (gen_xorv2si3 (x, t1, t2));
1614 op1 = CONST0_RTX (mode);
1620 /* Perform a parallel unsigned saturating subtraction. */
1621 x = gen_reg_rtx (mode);
1622 emit_insn (gen_rtx_SET (VOIDmode, x,
1623 gen_rtx_US_MINUS (mode, op0, op1)));
1627 op1 = CONST0_RTX (mode);
1636 x = gen_rtx_fmt_ee (code, mode, op0, op1);
1637 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1642 /* Emit an integral vector conditional move. */
1645 ia64_expand_vecint_cmov (rtx operands[])
1647 enum machine_mode mode = GET_MODE (operands[0]);
1648 enum rtx_code code = GET_CODE (operands[3]);
1652 cmp = gen_reg_rtx (mode);
1653 negate = ia64_expand_vecint_compare (code, mode, cmp,
1654 operands[4], operands[5]);
1656 ot = operands[1+negate];
1657 of = operands[2-negate];
1659 if (ot == CONST0_RTX (mode))
1661 if (of == CONST0_RTX (mode))
1663 emit_move_insn (operands[0], ot);
1667 x = gen_rtx_NOT (mode, cmp);
1668 x = gen_rtx_AND (mode, x, of);
1669 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1671 else if (of == CONST0_RTX (mode))
1673 x = gen_rtx_AND (mode, cmp, ot);
1674 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1680 t = gen_reg_rtx (mode);
1681 x = gen_rtx_AND (mode, cmp, operands[1+negate]);
1682 emit_insn (gen_rtx_SET (VOIDmode, t, x));
1684 f = gen_reg_rtx (mode);
1685 x = gen_rtx_NOT (mode, cmp);
1686 x = gen_rtx_AND (mode, x, operands[2-negate]);
1687 emit_insn (gen_rtx_SET (VOIDmode, f, x));
1689 x = gen_rtx_IOR (mode, t, f);
1690 emit_insn (gen_rtx_SET (VOIDmode, operands[0], x));
1694 /* Emit an integral vector min or max operation. Return true if all done. */
1697 ia64_expand_vecint_minmax (enum rtx_code code, enum machine_mode mode,
1702 /* These four combinations are supported directly. */
1703 if (mode == V8QImode && (code == UMIN || code == UMAX))
1705 if (mode == V4HImode && (code == SMIN || code == SMAX))
1708 /* This combination can be implemented with only saturating subtraction. */
1709 if (mode == V4HImode && code == UMAX)
1711 rtx x, tmp = gen_reg_rtx (mode);
1713 x = gen_rtx_US_MINUS (mode, operands[1], operands[2]);
1714 emit_insn (gen_rtx_SET (VOIDmode, tmp, x));
1716 emit_insn (gen_addv4hi3 (operands[0], tmp, operands[2]));
1720 /* Everything else implemented via vector comparisons. */
1721 xops[0] = operands[0];
1722 xops[4] = xops[1] = operands[1];
1723 xops[5] = xops[2] = operands[2];
1742 xops[3] = gen_rtx_fmt_ee (code, VOIDmode, operands[1], operands[2]);
1744 ia64_expand_vecint_cmov (xops);
1748 /* Emit an integral vector widening sum operations. */
1751 ia64_expand_widen_sum (rtx operands[3], bool unsignedp)
1754 enum machine_mode wmode, mode;
1755 rtx (*unpack_l) (rtx, rtx, rtx);
1756 rtx (*unpack_h) (rtx, rtx, rtx);
1757 rtx (*plus) (rtx, rtx, rtx);
1759 wmode = GET_MODE (operands[0]);
1760 mode = GET_MODE (operands[1]);
1765 unpack_l = gen_unpack1_l;
1766 unpack_h = gen_unpack1_h;
1767 plus = gen_addv4hi3;
1770 unpack_l = gen_unpack2_l;
1771 unpack_h = gen_unpack2_h;
1772 plus = gen_addv2si3;
1778 /* Fill in x with the sign extension of each element in op1. */
1780 x = CONST0_RTX (mode);
1785 x = gen_reg_rtx (mode);
1787 neg = ia64_expand_vecint_compare (LT, mode, x, operands[1],
1792 l = gen_reg_rtx (wmode);
1793 h = gen_reg_rtx (wmode);
1794 s = gen_reg_rtx (wmode);
1796 emit_insn (unpack_l (gen_lowpart (mode, l), operands[1], x));
1797 emit_insn (unpack_h (gen_lowpart (mode, h), operands[1], x));
1798 emit_insn (plus (s, l, operands[2]));
1799 emit_insn (plus (operands[0], h, s));
1802 /* Emit a signed or unsigned V8QI dot product operation. */
1805 ia64_expand_dot_prod_v8qi (rtx operands[4], bool unsignedp)
1807 rtx l1, l2, h1, h2, x1, x2, p1, p2, p3, p4, s1, s2, s3;
1809 /* Fill in x1 and x2 with the sign extension of each element. */
1811 x1 = x2 = CONST0_RTX (V8QImode);
1816 x1 = gen_reg_rtx (V8QImode);
1817 x2 = gen_reg_rtx (V8QImode);
1819 neg = ia64_expand_vecint_compare (LT, V8QImode, x1, operands[1],
1820 CONST0_RTX (V8QImode));
1822 neg = ia64_expand_vecint_compare (LT, V8QImode, x2, operands[2],
1823 CONST0_RTX (V8QImode));
1827 l1 = gen_reg_rtx (V4HImode);
1828 l2 = gen_reg_rtx (V4HImode);
1829 h1 = gen_reg_rtx (V4HImode);
1830 h2 = gen_reg_rtx (V4HImode);
1832 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l1), operands[1], x1));
1833 emit_insn (gen_unpack1_l (gen_lowpart (V8QImode, l2), operands[2], x2));
1834 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h1), operands[1], x1));
1835 emit_insn (gen_unpack1_h (gen_lowpart (V8QImode, h2), operands[2], x2));
1837 p1 = gen_reg_rtx (V2SImode);
1838 p2 = gen_reg_rtx (V2SImode);
1839 p3 = gen_reg_rtx (V2SImode);
1840 p4 = gen_reg_rtx (V2SImode);
1841 emit_insn (gen_pmpy2_r (p1, l1, l2));
1842 emit_insn (gen_pmpy2_l (p2, l1, l2));
1843 emit_insn (gen_pmpy2_r (p3, h1, h2));
1844 emit_insn (gen_pmpy2_l (p4, h1, h2));
1846 s1 = gen_reg_rtx (V2SImode);
1847 s2 = gen_reg_rtx (V2SImode);
1848 s3 = gen_reg_rtx (V2SImode);
1849 emit_insn (gen_addv2si3 (s1, p1, p2));
1850 emit_insn (gen_addv2si3 (s2, p3, p4));
1851 emit_insn (gen_addv2si3 (s3, s1, operands[3]));
1852 emit_insn (gen_addv2si3 (operands[0], s2, s3));
1855 /* Emit the appropriate sequence for a call. */
1858 ia64_expand_call (rtx retval, rtx addr, rtx nextarg ATTRIBUTE_UNUSED,
1863 addr = XEXP (addr, 0);
1864 addr = convert_memory_address (DImode, addr);
1865 b0 = gen_rtx_REG (DImode, R_BR (0));
1867 /* ??? Should do this for functions known to bind local too. */
1868 if (TARGET_NO_PIC || TARGET_AUTO_PIC)
1871 insn = gen_sibcall_nogp (addr);
1873 insn = gen_call_nogp (addr, b0);
1875 insn = gen_call_value_nogp (retval, addr, b0);
1876 insn = emit_call_insn (insn);
1881 insn = gen_sibcall_gp (addr);
1883 insn = gen_call_gp (addr, b0);
1885 insn = gen_call_value_gp (retval, addr, b0);
1886 insn = emit_call_insn (insn);
1888 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
1892 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), b0);
1896 reg_emitted (enum ia64_frame_regs r)
1898 if (emitted_frame_related_regs[r] == 0)
1899 emitted_frame_related_regs[r] = current_frame_info.r[r];
1901 gcc_assert (emitted_frame_related_regs[r] == current_frame_info.r[r]);
1905 get_reg (enum ia64_frame_regs r)
1908 return current_frame_info.r[r];
1912 is_emitted (int regno)
1914 enum ia64_frame_regs r;
1916 for (r = reg_fp; r < number_of_ia64_frame_regs; r++)
1917 if (emitted_frame_related_regs[r] == regno)
1923 ia64_reload_gp (void)
1927 if (current_frame_info.r[reg_save_gp])
1929 tmp = gen_rtx_REG (DImode, get_reg (reg_save_gp));
1933 HOST_WIDE_INT offset;
1936 offset = (current_frame_info.spill_cfa_off
1937 + current_frame_info.spill_size);
1938 if (frame_pointer_needed)
1940 tmp = hard_frame_pointer_rtx;
1945 tmp = stack_pointer_rtx;
1946 offset = current_frame_info.total_size - offset;
1949 offset_r = GEN_INT (offset);
1950 if (satisfies_constraint_I (offset_r))
1951 emit_insn (gen_adddi3 (pic_offset_table_rtx, tmp, offset_r));
1954 emit_move_insn (pic_offset_table_rtx, offset_r);
1955 emit_insn (gen_adddi3 (pic_offset_table_rtx,
1956 pic_offset_table_rtx, tmp));
1959 tmp = gen_rtx_MEM (DImode, pic_offset_table_rtx);
1962 emit_move_insn (pic_offset_table_rtx, tmp);
1966 ia64_split_call (rtx retval, rtx addr, rtx retaddr, rtx scratch_r,
1967 rtx scratch_b, int noreturn_p, int sibcall_p)
1970 bool is_desc = false;
1972 /* If we find we're calling through a register, then we're actually
1973 calling through a descriptor, so load up the values. */
1974 if (REG_P (addr) && GR_REGNO_P (REGNO (addr)))
1979 /* ??? We are currently constrained to *not* use peep2, because
1980 we can legitimately change the global lifetime of the GP
1981 (in the form of killing where previously live). This is
1982 because a call through a descriptor doesn't use the previous
1983 value of the GP, while a direct call does, and we do not
1984 commit to either form until the split here.
1986 That said, this means that we lack precise life info for
1987 whether ADDR is dead after this call. This is not terribly
1988 important, since we can fix things up essentially for free
1989 with the POST_DEC below, but it's nice to not use it when we
1990 can immediately tell it's not necessary. */
1991 addr_dead_p = ((noreturn_p || sibcall_p
1992 || TEST_HARD_REG_BIT (regs_invalidated_by_call,
1994 && !FUNCTION_ARG_REGNO_P (REGNO (addr)));
1996 /* Load the code address into scratch_b. */
1997 tmp = gen_rtx_POST_INC (Pmode, addr);
1998 tmp = gen_rtx_MEM (Pmode, tmp);
1999 emit_move_insn (scratch_r, tmp);
2000 emit_move_insn (scratch_b, scratch_r);
2002 /* Load the GP address. If ADDR is not dead here, then we must
2003 revert the change made above via the POST_INCREMENT. */
2005 tmp = gen_rtx_POST_DEC (Pmode, addr);
2008 tmp = gen_rtx_MEM (Pmode, tmp);
2009 emit_move_insn (pic_offset_table_rtx, tmp);
2016 insn = gen_sibcall_nogp (addr);
2018 insn = gen_call_value_nogp (retval, addr, retaddr);
2020 insn = gen_call_nogp (addr, retaddr);
2021 emit_call_insn (insn);
2023 if ((!TARGET_CONST_GP || is_desc) && !noreturn_p && !sibcall_p)
2027 /* Expand an atomic operation. We want to perform MEM <CODE>= VAL atomically.
2029 This differs from the generic code in that we know about the zero-extending
2030 properties of cmpxchg, and the zero-extending requirements of ar.ccv. We
2031 also know that ld.acq+cmpxchg.rel equals a full barrier.
2033 The loop we want to generate looks like
2038 new_reg = cmp_reg op val;
2039 cmp_reg = compare-and-swap(mem, old_reg, new_reg)
2040 if (cmp_reg != old_reg)
2043 Note that we only do the plain load from memory once. Subsequent
2044 iterations use the value loaded by the compare-and-swap pattern. */
2047 ia64_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
2048 rtx old_dst, rtx new_dst)
2050 enum machine_mode mode = GET_MODE (mem);
2051 rtx old_reg, new_reg, cmp_reg, ar_ccv, label;
2052 enum insn_code icode;
2054 /* Special case for using fetchadd. */
2055 if ((mode == SImode || mode == DImode)
2056 && (code == PLUS || code == MINUS)
2057 && fetchadd_operand (val, mode))
2060 val = GEN_INT (-INTVAL (val));
2063 old_dst = gen_reg_rtx (mode);
2065 emit_insn (gen_memory_barrier ());
2068 icode = CODE_FOR_fetchadd_acq_si;
2070 icode = CODE_FOR_fetchadd_acq_di;
2071 emit_insn (GEN_FCN (icode) (old_dst, mem, val));
2075 new_reg = expand_simple_binop (mode, PLUS, old_dst, val, new_dst,
2077 if (new_reg != new_dst)
2078 emit_move_insn (new_dst, new_reg);
2083 /* Because of the volatile mem read, we get an ld.acq, which is the
2084 front half of the full barrier. The end half is the cmpxchg.rel. */
2085 gcc_assert (MEM_VOLATILE_P (mem));
2087 old_reg = gen_reg_rtx (DImode);
2088 cmp_reg = gen_reg_rtx (DImode);
2089 label = gen_label_rtx ();
2093 val = simplify_gen_subreg (DImode, val, mode, 0);
2094 emit_insn (gen_extend_insn (cmp_reg, mem, DImode, mode, 1));
2097 emit_move_insn (cmp_reg, mem);
2101 ar_ccv = gen_rtx_REG (DImode, AR_CCV_REGNUM);
2102 emit_move_insn (old_reg, cmp_reg);
2103 emit_move_insn (ar_ccv, cmp_reg);
2106 emit_move_insn (old_dst, gen_lowpart (mode, cmp_reg));
2111 new_reg = expand_simple_unop (DImode, NOT, new_reg, NULL_RTX, true);
2114 new_reg = expand_simple_binop (DImode, code, new_reg, val, NULL_RTX,
2115 true, OPTAB_DIRECT);
2118 new_reg = gen_lowpart (mode, new_reg);
2120 emit_move_insn (new_dst, new_reg);
2124 case QImode: icode = CODE_FOR_cmpxchg_rel_qi; break;
2125 case HImode: icode = CODE_FOR_cmpxchg_rel_hi; break;
2126 case SImode: icode = CODE_FOR_cmpxchg_rel_si; break;
2127 case DImode: icode = CODE_FOR_cmpxchg_rel_di; break;
2132 emit_insn (GEN_FCN (icode) (cmp_reg, mem, ar_ccv, new_reg));
2134 emit_cmp_and_jump_insns (cmp_reg, old_reg, NE, NULL, DImode, true, label);
2137 /* Begin the assembly file. */
2140 ia64_file_start (void)
2142 /* Variable tracking should be run after all optimizations which change order
2143 of insns. It also needs a valid CFG. This can't be done in
2144 ia64_override_options, because flag_var_tracking is finalized after
2146 ia64_flag_var_tracking = flag_var_tracking;
2147 flag_var_tracking = 0;
2149 default_file_start ();
2150 emit_safe_across_calls ();
2154 emit_safe_across_calls (void)
2156 unsigned int rs, re;
2163 while (rs < 64 && call_used_regs[PR_REG (rs)])
2167 for (re = rs + 1; re < 64 && ! call_used_regs[PR_REG (re)]; re++)
2171 fputs ("\t.pred.safe_across_calls ", asm_out_file);
2175 fputc (',', asm_out_file);
2177 fprintf (asm_out_file, "p%u", rs);
2179 fprintf (asm_out_file, "p%u-p%u", rs, re - 1);
2183 fputc ('\n', asm_out_file);
2186 /* Globalize a declaration. */
2189 ia64_globalize_decl_name (FILE * stream, tree decl)
2191 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
2192 tree version_attr = lookup_attribute ("version_id", DECL_ATTRIBUTES (decl));
2195 tree v = TREE_VALUE (TREE_VALUE (version_attr));
2196 const char *p = TREE_STRING_POINTER (v);
2197 fprintf (stream, "\t.alias %s#, \"%s{%s}\"\n", name, name, p);
2199 targetm.asm_out.globalize_label (stream, name);
2200 if (TREE_CODE (decl) == FUNCTION_DECL)
2201 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "function");
2204 /* Helper function for ia64_compute_frame_size: find an appropriate general
2205 register to spill some special register to. SPECIAL_SPILL_MASK contains
2206 bits in GR0 to GR31 that have already been allocated by this routine.
2207 TRY_LOCALS is true if we should attempt to locate a local regnum. */
2210 find_gr_spill (enum ia64_frame_regs r, int try_locals)
2214 if (emitted_frame_related_regs[r] != 0)
2216 regno = emitted_frame_related_regs[r];
2217 if (regno >= LOC_REG (0) && regno < LOC_REG (80 - frame_pointer_needed)
2218 && current_frame_info.n_local_regs < regno - LOC_REG (0) + 1)
2219 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2220 else if (current_function_is_leaf
2221 && regno >= GR_REG (1) && regno <= GR_REG (31))
2222 current_frame_info.gr_used_mask |= 1 << regno;
2227 /* If this is a leaf function, first try an otherwise unused
2228 call-clobbered register. */
2229 if (current_function_is_leaf)
2231 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2232 if (! df_regs_ever_live_p (regno)
2233 && call_used_regs[regno]
2234 && ! fixed_regs[regno]
2235 && ! global_regs[regno]
2236 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0
2237 && ! is_emitted (regno))
2239 current_frame_info.gr_used_mask |= 1 << regno;
2246 regno = current_frame_info.n_local_regs;
2247 /* If there is a frame pointer, then we can't use loc79, because
2248 that is HARD_FRAME_POINTER_REGNUM. In particular, see the
2249 reg_name switching code in ia64_expand_prologue. */
2250 while (regno < (80 - frame_pointer_needed))
2251 if (! is_emitted (LOC_REG (regno++)))
2253 current_frame_info.n_local_regs = regno;
2254 return LOC_REG (regno - 1);
2258 /* Failed to find a general register to spill to. Must use stack. */
2262 /* In order to make for nice schedules, we try to allocate every temporary
2263 to a different register. We must of course stay away from call-saved,
2264 fixed, and global registers. We must also stay away from registers
2265 allocated in current_frame_info.gr_used_mask, since those include regs
2266 used all through the prologue.
2268 Any register allocated here must be used immediately. The idea is to
2269 aid scheduling, not to solve data flow problems. */
2271 static int last_scratch_gr_reg;
2274 next_scratch_gr_reg (void)
2278 for (i = 0; i < 32; ++i)
2280 regno = (last_scratch_gr_reg + i + 1) & 31;
2281 if (call_used_regs[regno]
2282 && ! fixed_regs[regno]
2283 && ! global_regs[regno]
2284 && ((current_frame_info.gr_used_mask >> regno) & 1) == 0)
2286 last_scratch_gr_reg = regno;
2291 /* There must be _something_ available. */
2295 /* Helper function for ia64_compute_frame_size, called through
2296 diddle_return_value. Mark REG in current_frame_info.gr_used_mask. */
2299 mark_reg_gr_used_mask (rtx reg, void *data ATTRIBUTE_UNUSED)
2301 unsigned int regno = REGNO (reg);
2304 unsigned int i, n = hard_regno_nregs[regno][GET_MODE (reg)];
2305 for (i = 0; i < n; ++i)
2306 current_frame_info.gr_used_mask |= 1 << (regno + i);
2311 /* Returns the number of bytes offset between the frame pointer and the stack
2312 pointer for the current function. SIZE is the number of bytes of space
2313 needed for local variables. */
2316 ia64_compute_frame_size (HOST_WIDE_INT size)
2318 HOST_WIDE_INT total_size;
2319 HOST_WIDE_INT spill_size = 0;
2320 HOST_WIDE_INT extra_spill_size = 0;
2321 HOST_WIDE_INT pretend_args_size;
2324 int spilled_gr_p = 0;
2325 int spilled_fr_p = 0;
2331 if (current_frame_info.initialized)
2334 memset (¤t_frame_info, 0, sizeof current_frame_info);
2335 CLEAR_HARD_REG_SET (mask);
2337 /* Don't allocate scratches to the return register. */
2338 diddle_return_value (mark_reg_gr_used_mask, NULL);
2340 /* Don't allocate scratches to the EH scratch registers. */
2341 if (cfun->machine->ia64_eh_epilogue_sp)
2342 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_sp, NULL);
2343 if (cfun->machine->ia64_eh_epilogue_bsp)
2344 mark_reg_gr_used_mask (cfun->machine->ia64_eh_epilogue_bsp, NULL);
2346 /* Find the size of the register stack frame. We have only 80 local
2347 registers, because we reserve 8 for the inputs and 8 for the
2350 /* Skip HARD_FRAME_POINTER_REGNUM (loc79) when frame_pointer_needed,
2351 since we'll be adjusting that down later. */
2352 regno = LOC_REG (78) + ! frame_pointer_needed;
2353 for (; regno >= LOC_REG (0); regno--)
2354 if (df_regs_ever_live_p (regno) && !is_emitted (regno))
2356 current_frame_info.n_local_regs = regno - LOC_REG (0) + 1;
2358 /* For functions marked with the syscall_linkage attribute, we must mark
2359 all eight input registers as in use, so that locals aren't visible to
2362 if (cfun->machine->n_varargs > 0
2363 || lookup_attribute ("syscall_linkage",
2364 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
2365 current_frame_info.n_input_regs = 8;
2368 for (regno = IN_REG (7); regno >= IN_REG (0); regno--)
2369 if (df_regs_ever_live_p (regno))
2371 current_frame_info.n_input_regs = regno - IN_REG (0) + 1;
2374 for (regno = OUT_REG (7); regno >= OUT_REG (0); regno--)
2375 if (df_regs_ever_live_p (regno))
2377 i = regno - OUT_REG (0) + 1;
2379 #ifndef PROFILE_HOOK
2380 /* When -p profiling, we need one output register for the mcount argument.
2381 Likewise for -a profiling for the bb_init_func argument. For -ax
2382 profiling, we need two output registers for the two bb_init_trace_func
2387 current_frame_info.n_output_regs = i;
2389 /* ??? No rotating register support yet. */
2390 current_frame_info.n_rotate_regs = 0;
2392 /* Discover which registers need spilling, and how much room that
2393 will take. Begin with floating point and general registers,
2394 which will always wind up on the stack. */
2396 for (regno = FR_REG (2); regno <= FR_REG (127); regno++)
2397 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2399 SET_HARD_REG_BIT (mask, regno);
2405 for (regno = GR_REG (1); regno <= GR_REG (31); regno++)
2406 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2408 SET_HARD_REG_BIT (mask, regno);
2414 for (regno = BR_REG (1); regno <= BR_REG (7); regno++)
2415 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2417 SET_HARD_REG_BIT (mask, regno);
2422 /* Now come all special registers that might get saved in other
2423 general registers. */
2425 if (frame_pointer_needed)
2427 current_frame_info.r[reg_fp] = find_gr_spill (reg_fp, 1);
2428 /* If we did not get a register, then we take LOC79. This is guaranteed
2429 to be free, even if regs_ever_live is already set, because this is
2430 HARD_FRAME_POINTER_REGNUM. This requires incrementing n_local_regs,
2431 as we don't count loc79 above. */
2432 if (current_frame_info.r[reg_fp] == 0)
2434 current_frame_info.r[reg_fp] = LOC_REG (79);
2435 current_frame_info.n_local_regs = LOC_REG (79) - LOC_REG (0) + 1;
2439 if (! current_function_is_leaf)
2441 /* Emit a save of BR0 if we call other functions. Do this even
2442 if this function doesn't return, as EH depends on this to be
2443 able to unwind the stack. */
2444 SET_HARD_REG_BIT (mask, BR_REG (0));
2446 current_frame_info.r[reg_save_b0] = find_gr_spill (reg_save_b0, 1);
2447 if (current_frame_info.r[reg_save_b0] == 0)
2449 extra_spill_size += 8;
2453 /* Similarly for ar.pfs. */
2454 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2455 current_frame_info.r[reg_save_ar_pfs] = find_gr_spill (reg_save_ar_pfs, 1);
2456 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2458 extra_spill_size += 8;
2462 /* Similarly for gp. Note that if we're calling setjmp, the stacked
2463 registers are clobbered, so we fall back to the stack. */
2464 current_frame_info.r[reg_save_gp]
2465 = (cfun->calls_setjmp ? 0 : find_gr_spill (reg_save_gp, 1));
2466 if (current_frame_info.r[reg_save_gp] == 0)
2468 SET_HARD_REG_BIT (mask, GR_REG (1));
2475 if (df_regs_ever_live_p (BR_REG (0)) && ! call_used_regs[BR_REG (0)])
2477 SET_HARD_REG_BIT (mask, BR_REG (0));
2478 extra_spill_size += 8;
2482 if (df_regs_ever_live_p (AR_PFS_REGNUM))
2484 SET_HARD_REG_BIT (mask, AR_PFS_REGNUM);
2485 current_frame_info.r[reg_save_ar_pfs]
2486 = find_gr_spill (reg_save_ar_pfs, 1);
2487 if (current_frame_info.r[reg_save_ar_pfs] == 0)
2489 extra_spill_size += 8;
2495 /* Unwind descriptor hackery: things are most efficient if we allocate
2496 consecutive GR save registers for RP, PFS, FP in that order. However,
2497 it is absolutely critical that FP get the only hard register that's
2498 guaranteed to be free, so we allocated it first. If all three did
2499 happen to be allocated hard regs, and are consecutive, rearrange them
2500 into the preferred order now.
2502 If we have already emitted code for any of those registers,
2503 then it's already too late to change. */
2504 min_regno = MIN (current_frame_info.r[reg_fp],
2505 MIN (current_frame_info.r[reg_save_b0],
2506 current_frame_info.r[reg_save_ar_pfs]));
2507 max_regno = MAX (current_frame_info.r[reg_fp],
2508 MAX (current_frame_info.r[reg_save_b0],
2509 current_frame_info.r[reg_save_ar_pfs]));
2511 && min_regno + 2 == max_regno
2512 && (current_frame_info.r[reg_fp] == min_regno + 1
2513 || current_frame_info.r[reg_save_b0] == min_regno + 1
2514 || current_frame_info.r[reg_save_ar_pfs] == min_regno + 1)
2515 && (emitted_frame_related_regs[reg_save_b0] == 0
2516 || emitted_frame_related_regs[reg_save_b0] == min_regno)
2517 && (emitted_frame_related_regs[reg_save_ar_pfs] == 0
2518 || emitted_frame_related_regs[reg_save_ar_pfs] == min_regno + 1)
2519 && (emitted_frame_related_regs[reg_fp] == 0
2520 || emitted_frame_related_regs[reg_fp] == min_regno + 2))
2522 current_frame_info.r[reg_save_b0] = min_regno;
2523 current_frame_info.r[reg_save_ar_pfs] = min_regno + 1;
2524 current_frame_info.r[reg_fp] = min_regno + 2;
2527 /* See if we need to store the predicate register block. */
2528 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2529 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
2531 if (regno <= PR_REG (63))
2533 SET_HARD_REG_BIT (mask, PR_REG (0));
2534 current_frame_info.r[reg_save_pr] = find_gr_spill (reg_save_pr, 1);
2535 if (current_frame_info.r[reg_save_pr] == 0)
2537 extra_spill_size += 8;
2541 /* ??? Mark them all as used so that register renaming and such
2542 are free to use them. */
2543 for (regno = PR_REG (0); regno <= PR_REG (63); regno++)
2544 df_set_regs_ever_live (regno, true);
2547 /* If we're forced to use st8.spill, we're forced to save and restore
2548 ar.unat as well. The check for existing liveness allows inline asm
2549 to touch ar.unat. */
2550 if (spilled_gr_p || cfun->machine->n_varargs
2551 || df_regs_ever_live_p (AR_UNAT_REGNUM))
2553 df_set_regs_ever_live (AR_UNAT_REGNUM, true);
2554 SET_HARD_REG_BIT (mask, AR_UNAT_REGNUM);
2555 current_frame_info.r[reg_save_ar_unat]
2556 = find_gr_spill (reg_save_ar_unat, spill_size == 0);
2557 if (current_frame_info.r[reg_save_ar_unat] == 0)
2559 extra_spill_size += 8;
2564 if (df_regs_ever_live_p (AR_LC_REGNUM))
2566 SET_HARD_REG_BIT (mask, AR_LC_REGNUM);
2567 current_frame_info.r[reg_save_ar_lc]
2568 = find_gr_spill (reg_save_ar_lc, spill_size == 0);
2569 if (current_frame_info.r[reg_save_ar_lc] == 0)
2571 extra_spill_size += 8;
2576 /* If we have an odd number of words of pretend arguments written to
2577 the stack, then the FR save area will be unaligned. We round the
2578 size of this area up to keep things 16 byte aligned. */
2580 pretend_args_size = IA64_STACK_ALIGN (crtl->args.pretend_args_size);
2582 pretend_args_size = crtl->args.pretend_args_size;
2584 total_size = (spill_size + extra_spill_size + size + pretend_args_size
2585 + crtl->outgoing_args_size);
2586 total_size = IA64_STACK_ALIGN (total_size);
2588 /* We always use the 16-byte scratch area provided by the caller, but
2589 if we are a leaf function, there's no one to which we need to provide
2591 if (current_function_is_leaf)
2592 total_size = MAX (0, total_size - 16);
2594 current_frame_info.total_size = total_size;
2595 current_frame_info.spill_cfa_off = pretend_args_size - 16;
2596 current_frame_info.spill_size = spill_size;
2597 current_frame_info.extra_spill_size = extra_spill_size;
2598 COPY_HARD_REG_SET (current_frame_info.mask, mask);
2599 current_frame_info.n_spilled = n_spilled;
2600 current_frame_info.initialized = reload_completed;
2603 /* Compute the initial difference between the specified pair of registers. */
2606 ia64_initial_elimination_offset (int from, int to)
2608 HOST_WIDE_INT offset;
2610 ia64_compute_frame_size (get_frame_size ());
2613 case FRAME_POINTER_REGNUM:
2616 case HARD_FRAME_POINTER_REGNUM:
2617 if (current_function_is_leaf)
2618 offset = -current_frame_info.total_size;
2620 offset = -(current_frame_info.total_size
2621 - crtl->outgoing_args_size - 16);
2624 case STACK_POINTER_REGNUM:
2625 if (current_function_is_leaf)
2628 offset = 16 + crtl->outgoing_args_size;
2636 case ARG_POINTER_REGNUM:
2637 /* Arguments start above the 16 byte save area, unless stdarg
2638 in which case we store through the 16 byte save area. */
2641 case HARD_FRAME_POINTER_REGNUM:
2642 offset = 16 - crtl->args.pretend_args_size;
2645 case STACK_POINTER_REGNUM:
2646 offset = (current_frame_info.total_size
2647 + 16 - crtl->args.pretend_args_size);
2662 /* If there are more than a trivial number of register spills, we use
2663 two interleaved iterators so that we can get two memory references
2666 In order to simplify things in the prologue and epilogue expanders,
2667 we use helper functions to fix up the memory references after the
2668 fact with the appropriate offsets to a POST_MODIFY memory mode.
2669 The following data structure tracks the state of the two iterators
2670 while insns are being emitted. */
2672 struct spill_fill_data
2674 rtx init_after; /* point at which to emit initializations */
2675 rtx init_reg[2]; /* initial base register */
2676 rtx iter_reg[2]; /* the iterator registers */
2677 rtx *prev_addr[2]; /* address of last memory use */
2678 rtx prev_insn[2]; /* the insn corresponding to prev_addr */
2679 HOST_WIDE_INT prev_off[2]; /* last offset */
2680 int n_iter; /* number of iterators in use */
2681 int next_iter; /* next iterator to use */
2682 unsigned int save_gr_used_mask;
2685 static struct spill_fill_data spill_fill_data;
2688 setup_spill_pointers (int n_spills, rtx init_reg, HOST_WIDE_INT cfa_off)
2692 spill_fill_data.init_after = get_last_insn ();
2693 spill_fill_data.init_reg[0] = init_reg;
2694 spill_fill_data.init_reg[1] = init_reg;
2695 spill_fill_data.prev_addr[0] = NULL;
2696 spill_fill_data.prev_addr[1] = NULL;
2697 spill_fill_data.prev_insn[0] = NULL;
2698 spill_fill_data.prev_insn[1] = NULL;
2699 spill_fill_data.prev_off[0] = cfa_off;
2700 spill_fill_data.prev_off[1] = cfa_off;
2701 spill_fill_data.next_iter = 0;
2702 spill_fill_data.save_gr_used_mask = current_frame_info.gr_used_mask;
2704 spill_fill_data.n_iter = 1 + (n_spills > 2);
2705 for (i = 0; i < spill_fill_data.n_iter; ++i)
2707 int regno = next_scratch_gr_reg ();
2708 spill_fill_data.iter_reg[i] = gen_rtx_REG (DImode, regno);
2709 current_frame_info.gr_used_mask |= 1 << regno;
2714 finish_spill_pointers (void)
2716 current_frame_info.gr_used_mask = spill_fill_data.save_gr_used_mask;
2720 spill_restore_mem (rtx reg, HOST_WIDE_INT cfa_off)
2722 int iter = spill_fill_data.next_iter;
2723 HOST_WIDE_INT disp = spill_fill_data.prev_off[iter] - cfa_off;
2724 rtx disp_rtx = GEN_INT (disp);
2727 if (spill_fill_data.prev_addr[iter])
2729 if (satisfies_constraint_N (disp_rtx))
2731 *spill_fill_data.prev_addr[iter]
2732 = gen_rtx_POST_MODIFY (DImode, spill_fill_data.iter_reg[iter],
2733 gen_rtx_PLUS (DImode,
2734 spill_fill_data.iter_reg[iter],
2736 REG_NOTES (spill_fill_data.prev_insn[iter])
2737 = gen_rtx_EXPR_LIST (REG_INC, spill_fill_data.iter_reg[iter],
2738 REG_NOTES (spill_fill_data.prev_insn[iter]));
2742 /* ??? Could use register post_modify for loads. */
2743 if (!satisfies_constraint_I (disp_rtx))
2745 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2746 emit_move_insn (tmp, disp_rtx);
2749 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2750 spill_fill_data.iter_reg[iter], disp_rtx));
2753 /* Micro-optimization: if we've created a frame pointer, it's at
2754 CFA 0, which may allow the real iterator to be initialized lower,
2755 slightly increasing parallelism. Also, if there are few saves
2756 it may eliminate the iterator entirely. */
2758 && spill_fill_data.init_reg[iter] == stack_pointer_rtx
2759 && frame_pointer_needed)
2761 mem = gen_rtx_MEM (GET_MODE (reg), hard_frame_pointer_rtx);
2762 set_mem_alias_set (mem, get_varargs_alias_set ());
2770 seq = gen_movdi (spill_fill_data.iter_reg[iter],
2771 spill_fill_data.init_reg[iter]);
2776 if (!satisfies_constraint_I (disp_rtx))
2778 rtx tmp = gen_rtx_REG (DImode, next_scratch_gr_reg ());
2779 emit_move_insn (tmp, disp_rtx);
2783 emit_insn (gen_adddi3 (spill_fill_data.iter_reg[iter],
2784 spill_fill_data.init_reg[iter],
2791 /* Careful for being the first insn in a sequence. */
2792 if (spill_fill_data.init_after)
2793 insn = emit_insn_after (seq, spill_fill_data.init_after);
2796 rtx first = get_insns ();
2798 insn = emit_insn_before (seq, first);
2800 insn = emit_insn (seq);
2802 spill_fill_data.init_after = insn;
2805 mem = gen_rtx_MEM (GET_MODE (reg), spill_fill_data.iter_reg[iter]);
2807 /* ??? Not all of the spills are for varargs, but some of them are.
2808 The rest of the spills belong in an alias set of their own. But
2809 it doesn't actually hurt to include them here. */
2810 set_mem_alias_set (mem, get_varargs_alias_set ());
2812 spill_fill_data.prev_addr[iter] = &XEXP (mem, 0);
2813 spill_fill_data.prev_off[iter] = cfa_off;
2815 if (++iter >= spill_fill_data.n_iter)
2817 spill_fill_data.next_iter = iter;
2823 do_spill (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off,
2826 int iter = spill_fill_data.next_iter;
2829 mem = spill_restore_mem (reg, cfa_off);
2830 insn = emit_insn ((*move_fn) (mem, reg, GEN_INT (cfa_off)));
2831 spill_fill_data.prev_insn[iter] = insn;
2838 RTX_FRAME_RELATED_P (insn) = 1;
2840 /* Don't even pretend that the unwind code can intuit its way
2841 through a pair of interleaved post_modify iterators. Just
2842 provide the correct answer. */
2844 if (frame_pointer_needed)
2846 base = hard_frame_pointer_rtx;
2851 base = stack_pointer_rtx;
2852 off = current_frame_info.total_size - cfa_off;
2856 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
2857 gen_rtx_SET (VOIDmode,
2858 gen_rtx_MEM (GET_MODE (reg),
2859 plus_constant (base, off)),
2866 do_restore (rtx (*move_fn) (rtx, rtx, rtx), rtx reg, HOST_WIDE_INT cfa_off)
2868 int iter = spill_fill_data.next_iter;
2871 insn = emit_insn ((*move_fn) (reg, spill_restore_mem (reg, cfa_off),
2872 GEN_INT (cfa_off)));
2873 spill_fill_data.prev_insn[iter] = insn;
2876 /* Wrapper functions that discards the CONST_INT spill offset. These
2877 exist so that we can give gr_spill/gr_fill the offset they need and
2878 use a consistent function interface. */
2881 gen_movdi_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2883 return gen_movdi (dest, src);
2887 gen_fr_spill_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2889 return gen_fr_spill (dest, src);
2893 gen_fr_restore_x (rtx dest, rtx src, rtx offset ATTRIBUTE_UNUSED)
2895 return gen_fr_restore (dest, src);
2898 /* Called after register allocation to add any instructions needed for the
2899 prologue. Using a prologue insn is favored compared to putting all of the
2900 instructions in output_function_prologue(), since it allows the scheduler
2901 to intermix instructions with the saves of the caller saved registers. In
2902 some cases, it might be necessary to emit a barrier instruction as the last
2903 insn to prevent such scheduling.
2905 Also any insns generated here should have RTX_FRAME_RELATED_P(insn) = 1
2906 so that the debug info generation code can handle them properly.
2908 The register save area is layed out like so:
2910 [ varargs spill area ]
2911 [ fr register spill area ]
2912 [ br register spill area ]
2913 [ ar register spill area ]
2914 [ pr register spill area ]
2915 [ gr register spill area ] */
2917 /* ??? Get inefficient code when the frame size is larger than can fit in an
2918 adds instruction. */
2921 ia64_expand_prologue (void)
2923 rtx insn, ar_pfs_save_reg, ar_unat_save_reg;
2924 int i, epilogue_p, regno, alt_regno, cfa_off, n_varargs;
2927 ia64_compute_frame_size (get_frame_size ());
2928 last_scratch_gr_reg = 15;
2932 fprintf (dump_file, "ia64 frame related registers "
2933 "recorded in current_frame_info.r[]:\n");
2934 #define PRINTREG(a) if (current_frame_info.r[a]) \
2935 fprintf(dump_file, "%s = %d\n", #a, current_frame_info.r[a])
2937 PRINTREG(reg_save_b0);
2938 PRINTREG(reg_save_pr);
2939 PRINTREG(reg_save_ar_pfs);
2940 PRINTREG(reg_save_ar_unat);
2941 PRINTREG(reg_save_ar_lc);
2942 PRINTREG(reg_save_gp);
2946 /* If there is no epilogue, then we don't need some prologue insns.
2947 We need to avoid emitting the dead prologue insns, because flow
2948 will complain about them. */
2954 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR->preds)
2955 if ((e->flags & EDGE_FAKE) == 0
2956 && (e->flags & EDGE_FALLTHRU) != 0)
2958 epilogue_p = (e != NULL);
2963 /* Set the local, input, and output register names. We need to do this
2964 for GNU libc, which creates crti.S/crtn.S by splitting initfini.c in
2965 half. If we use in/loc/out register names, then we get assembler errors
2966 in crtn.S because there is no alloc insn or regstk directive in there. */
2967 if (! TARGET_REG_NAMES)
2969 int inputs = current_frame_info.n_input_regs;
2970 int locals = current_frame_info.n_local_regs;
2971 int outputs = current_frame_info.n_output_regs;
2973 for (i = 0; i < inputs; i++)
2974 reg_names[IN_REG (i)] = ia64_reg_numbers[i];
2975 for (i = 0; i < locals; i++)
2976 reg_names[LOC_REG (i)] = ia64_reg_numbers[inputs + i];
2977 for (i = 0; i < outputs; i++)
2978 reg_names[OUT_REG (i)] = ia64_reg_numbers[inputs + locals + i];
2981 /* Set the frame pointer register name. The regnum is logically loc79,
2982 but of course we'll not have allocated that many locals. Rather than
2983 worrying about renumbering the existing rtxs, we adjust the name. */
2984 /* ??? This code means that we can never use one local register when
2985 there is a frame pointer. loc79 gets wasted in this case, as it is
2986 renamed to a register that will never be used. See also the try_locals
2987 code in find_gr_spill. */
2988 if (current_frame_info.r[reg_fp])
2990 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
2991 reg_names[HARD_FRAME_POINTER_REGNUM]
2992 = reg_names[current_frame_info.r[reg_fp]];
2993 reg_names[current_frame_info.r[reg_fp]] = tmp;
2996 /* We don't need an alloc instruction if we've used no outputs or locals. */
2997 if (current_frame_info.n_local_regs == 0
2998 && current_frame_info.n_output_regs == 0
2999 && current_frame_info.n_input_regs <= crtl->args.info.int_regs
3000 && !TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3002 /* If there is no alloc, but there are input registers used, then we
3003 need a .regstk directive. */
3004 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
3005 ar_pfs_save_reg = NULL_RTX;
3009 current_frame_info.need_regstk = 0;
3011 if (current_frame_info.r[reg_save_ar_pfs])
3013 regno = current_frame_info.r[reg_save_ar_pfs];
3014 reg_emitted (reg_save_ar_pfs);
3017 regno = next_scratch_gr_reg ();
3018 ar_pfs_save_reg = gen_rtx_REG (DImode, regno);
3020 insn = emit_insn (gen_alloc (ar_pfs_save_reg,
3021 GEN_INT (current_frame_info.n_input_regs),
3022 GEN_INT (current_frame_info.n_local_regs),
3023 GEN_INT (current_frame_info.n_output_regs),
3024 GEN_INT (current_frame_info.n_rotate_regs)));
3025 RTX_FRAME_RELATED_P (insn) = (current_frame_info.r[reg_save_ar_pfs] != 0);
3028 /* Set up frame pointer, stack pointer, and spill iterators. */
3030 n_varargs = cfun->machine->n_varargs;
3031 setup_spill_pointers (current_frame_info.n_spilled + n_varargs,
3032 stack_pointer_rtx, 0);
3034 if (frame_pointer_needed)
3036 insn = emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
3037 RTX_FRAME_RELATED_P (insn) = 1;
3040 if (current_frame_info.total_size != 0)
3042 rtx frame_size_rtx = GEN_INT (- current_frame_info.total_size);
3045 if (satisfies_constraint_I (frame_size_rtx))
3046 offset = frame_size_rtx;
3049 regno = next_scratch_gr_reg ();
3050 offset = gen_rtx_REG (DImode, regno);
3051 emit_move_insn (offset, frame_size_rtx);
3054 insn = emit_insn (gen_adddi3 (stack_pointer_rtx,
3055 stack_pointer_rtx, offset));
3057 if (! frame_pointer_needed)
3059 RTX_FRAME_RELATED_P (insn) = 1;
3060 if (GET_CODE (offset) != CONST_INT)
3063 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3064 gen_rtx_SET (VOIDmode,
3066 gen_rtx_PLUS (DImode,
3073 /* ??? At this point we must generate a magic insn that appears to
3074 modify the stack pointer, the frame pointer, and all spill
3075 iterators. This would allow the most scheduling freedom. For
3076 now, just hard stop. */
3077 emit_insn (gen_blockage ());
3080 /* Must copy out ar.unat before doing any integer spills. */
3081 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3083 if (current_frame_info.r[reg_save_ar_unat])
3086 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3087 reg_emitted (reg_save_ar_unat);
3091 alt_regno = next_scratch_gr_reg ();
3092 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3093 current_frame_info.gr_used_mask |= 1 << alt_regno;
3096 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3097 insn = emit_move_insn (ar_unat_save_reg, reg);
3098 RTX_FRAME_RELATED_P (insn) = (current_frame_info.r[reg_save_ar_unat] != 0);
3100 /* Even if we're not going to generate an epilogue, we still
3101 need to save the register so that EH works. */
3102 if (! epilogue_p && current_frame_info.r[reg_save_ar_unat])
3103 emit_insn (gen_prologue_use (ar_unat_save_reg));
3106 ar_unat_save_reg = NULL_RTX;
3108 /* Spill all varargs registers. Do this before spilling any GR registers,
3109 since we want the UNAT bits for the GR registers to override the UNAT
3110 bits from varargs, which we don't care about. */
3113 for (regno = GR_ARG_FIRST + 7; n_varargs > 0; --n_varargs, --regno)
3115 reg = gen_rtx_REG (DImode, regno);
3116 do_spill (gen_gr_spill, reg, cfa_off += 8, NULL_RTX);
3119 /* Locate the bottom of the register save area. */
3120 cfa_off = (current_frame_info.spill_cfa_off
3121 + current_frame_info.spill_size
3122 + current_frame_info.extra_spill_size);
3124 /* Save the predicate register block either in a register or in memory. */
3125 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3127 reg = gen_rtx_REG (DImode, PR_REG (0));
3128 if (current_frame_info.r[reg_save_pr] != 0)
3130 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3131 reg_emitted (reg_save_pr);
3132 insn = emit_move_insn (alt_reg, reg);
3134 /* ??? Denote pr spill/fill by a DImode move that modifies all
3135 64 hard registers. */
3136 RTX_FRAME_RELATED_P (insn) = 1;
3138 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3139 gen_rtx_SET (VOIDmode, alt_reg, reg),
3142 /* Even if we're not going to generate an epilogue, we still
3143 need to save the register so that EH works. */
3145 emit_insn (gen_prologue_use (alt_reg));
3149 alt_regno = next_scratch_gr_reg ();
3150 alt_reg = gen_rtx_REG (DImode, alt_regno);
3151 insn = emit_move_insn (alt_reg, reg);
3152 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3157 /* Handle AR regs in numerical order. All of them get special handling. */
3158 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM)
3159 && current_frame_info.r[reg_save_ar_unat] == 0)
3161 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3162 do_spill (gen_movdi_x, ar_unat_save_reg, cfa_off, reg);
3166 /* The alloc insn already copied ar.pfs into a general register. The
3167 only thing we have to do now is copy that register to a stack slot
3168 if we'd not allocated a local register for the job. */
3169 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM)
3170 && current_frame_info.r[reg_save_ar_pfs] == 0)
3172 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3173 do_spill (gen_movdi_x, ar_pfs_save_reg, cfa_off, reg);
3177 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3179 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3180 if (current_frame_info.r[reg_save_ar_lc] != 0)
3182 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3183 reg_emitted (reg_save_ar_lc);
3184 insn = emit_move_insn (alt_reg, reg);
3185 RTX_FRAME_RELATED_P (insn) = 1;
3187 /* Even if we're not going to generate an epilogue, we still
3188 need to save the register so that EH works. */
3190 emit_insn (gen_prologue_use (alt_reg));
3194 alt_regno = next_scratch_gr_reg ();
3195 alt_reg = gen_rtx_REG (DImode, alt_regno);
3196 emit_move_insn (alt_reg, reg);
3197 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3202 /* Save the return pointer. */
3203 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3205 reg = gen_rtx_REG (DImode, BR_REG (0));
3206 if (current_frame_info.r[reg_save_b0] != 0)
3208 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3209 reg_emitted (reg_save_b0);
3210 insn = emit_move_insn (alt_reg, reg);
3211 RTX_FRAME_RELATED_P (insn) = 1;
3213 /* Even if we're not going to generate an epilogue, we still
3214 need to save the register so that EH works. */
3216 emit_insn (gen_prologue_use (alt_reg));
3220 alt_regno = next_scratch_gr_reg ();
3221 alt_reg = gen_rtx_REG (DImode, alt_regno);
3222 emit_move_insn (alt_reg, reg);
3223 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3228 if (current_frame_info.r[reg_save_gp])
3230 reg_emitted (reg_save_gp);
3231 insn = emit_move_insn (gen_rtx_REG (DImode,
3232 current_frame_info.r[reg_save_gp]),
3233 pic_offset_table_rtx);
3236 /* We should now be at the base of the gr/br/fr spill area. */
3237 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3238 + current_frame_info.spill_size));
3240 /* Spill all general registers. */
3241 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3242 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3244 reg = gen_rtx_REG (DImode, regno);
3245 do_spill (gen_gr_spill, reg, cfa_off, reg);
3249 /* Spill the rest of the BR registers. */
3250 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3251 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3253 alt_regno = next_scratch_gr_reg ();
3254 alt_reg = gen_rtx_REG (DImode, alt_regno);
3255 reg = gen_rtx_REG (DImode, regno);
3256 emit_move_insn (alt_reg, reg);
3257 do_spill (gen_movdi_x, alt_reg, cfa_off, reg);
3261 /* Align the frame and spill all FR registers. */
3262 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3263 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3265 gcc_assert (!(cfa_off & 15));
3266 reg = gen_rtx_REG (XFmode, regno);
3267 do_spill (gen_fr_spill_x, reg, cfa_off, reg);
3271 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3273 finish_spill_pointers ();
3276 /* Called after register allocation to add any instructions needed for the
3277 epilogue. Using an epilogue insn is favored compared to putting all of the
3278 instructions in output_function_prologue(), since it allows the scheduler
3279 to intermix instructions with the saves of the caller saved registers. In
3280 some cases, it might be necessary to emit a barrier instruction as the last
3281 insn to prevent such scheduling. */
3284 ia64_expand_epilogue (int sibcall_p)
3286 rtx insn, reg, alt_reg, ar_unat_save_reg;
3287 int regno, alt_regno, cfa_off;
3289 ia64_compute_frame_size (get_frame_size ());
3291 /* If there is a frame pointer, then we use it instead of the stack
3292 pointer, so that the stack pointer does not need to be valid when
3293 the epilogue starts. See EXIT_IGNORE_STACK. */
3294 if (frame_pointer_needed)
3295 setup_spill_pointers (current_frame_info.n_spilled,
3296 hard_frame_pointer_rtx, 0);
3298 setup_spill_pointers (current_frame_info.n_spilled, stack_pointer_rtx,
3299 current_frame_info.total_size);
3301 if (current_frame_info.total_size != 0)
3303 /* ??? At this point we must generate a magic insn that appears to
3304 modify the spill iterators and the frame pointer. This would
3305 allow the most scheduling freedom. For now, just hard stop. */
3306 emit_insn (gen_blockage ());
3309 /* Locate the bottom of the register save area. */
3310 cfa_off = (current_frame_info.spill_cfa_off
3311 + current_frame_info.spill_size
3312 + current_frame_info.extra_spill_size);
3314 /* Restore the predicate registers. */
3315 if (TEST_HARD_REG_BIT (current_frame_info.mask, PR_REG (0)))
3317 if (current_frame_info.r[reg_save_pr] != 0)
3319 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_pr]);
3320 reg_emitted (reg_save_pr);
3324 alt_regno = next_scratch_gr_reg ();
3325 alt_reg = gen_rtx_REG (DImode, alt_regno);
3326 do_restore (gen_movdi_x, alt_reg, cfa_off);
3329 reg = gen_rtx_REG (DImode, PR_REG (0));
3330 emit_move_insn (reg, alt_reg);
3333 /* Restore the application registers. */
3335 /* Load the saved unat from the stack, but do not restore it until
3336 after the GRs have been restored. */
3337 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3339 if (current_frame_info.r[reg_save_ar_unat] != 0)
3342 = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_unat]);
3343 reg_emitted (reg_save_ar_unat);
3347 alt_regno = next_scratch_gr_reg ();
3348 ar_unat_save_reg = gen_rtx_REG (DImode, alt_regno);
3349 current_frame_info.gr_used_mask |= 1 << alt_regno;
3350 do_restore (gen_movdi_x, ar_unat_save_reg, cfa_off);
3355 ar_unat_save_reg = NULL_RTX;
3357 if (current_frame_info.r[reg_save_ar_pfs] != 0)
3359 reg_emitted (reg_save_ar_pfs);
3360 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_pfs]);
3361 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3362 emit_move_insn (reg, alt_reg);
3364 else if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_PFS_REGNUM))
3366 alt_regno = next_scratch_gr_reg ();
3367 alt_reg = gen_rtx_REG (DImode, alt_regno);
3368 do_restore (gen_movdi_x, alt_reg, cfa_off);
3370 reg = gen_rtx_REG (DImode, AR_PFS_REGNUM);
3371 emit_move_insn (reg, alt_reg);
3374 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_LC_REGNUM))
3376 if (current_frame_info.r[reg_save_ar_lc] != 0)
3378 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_ar_lc]);
3379 reg_emitted (reg_save_ar_lc);
3383 alt_regno = next_scratch_gr_reg ();
3384 alt_reg = gen_rtx_REG (DImode, alt_regno);
3385 do_restore (gen_movdi_x, alt_reg, cfa_off);
3388 reg = gen_rtx_REG (DImode, AR_LC_REGNUM);
3389 emit_move_insn (reg, alt_reg);
3392 /* Restore the return pointer. */
3393 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3395 if (current_frame_info.r[reg_save_b0] != 0)
3397 alt_reg = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3398 reg_emitted (reg_save_b0);
3402 alt_regno = next_scratch_gr_reg ();
3403 alt_reg = gen_rtx_REG (DImode, alt_regno);
3404 do_restore (gen_movdi_x, alt_reg, cfa_off);
3407 reg = gen_rtx_REG (DImode, BR_REG (0));
3408 emit_move_insn (reg, alt_reg);
3411 /* We should now be at the base of the gr/br/fr spill area. */
3412 gcc_assert (cfa_off == (current_frame_info.spill_cfa_off
3413 + current_frame_info.spill_size));
3415 /* The GP may be stored on the stack in the prologue, but it's
3416 never restored in the epilogue. Skip the stack slot. */
3417 if (TEST_HARD_REG_BIT (current_frame_info.mask, GR_REG (1)))
3420 /* Restore all general registers. */
3421 for (regno = GR_REG (2); regno <= GR_REG (31); ++regno)
3422 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3424 reg = gen_rtx_REG (DImode, regno);
3425 do_restore (gen_gr_restore, reg, cfa_off);
3429 /* Restore the branch registers. */
3430 for (regno = BR_REG (1); regno <= BR_REG (7); ++regno)
3431 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3433 alt_regno = next_scratch_gr_reg ();
3434 alt_reg = gen_rtx_REG (DImode, alt_regno);
3435 do_restore (gen_movdi_x, alt_reg, cfa_off);
3437 reg = gen_rtx_REG (DImode, regno);
3438 emit_move_insn (reg, alt_reg);
3441 /* Restore floating point registers. */
3442 for (regno = FR_REG (2); regno <= FR_REG (127); ++regno)
3443 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3445 gcc_assert (!(cfa_off & 15));
3446 reg = gen_rtx_REG (XFmode, regno);
3447 do_restore (gen_fr_restore_x, reg, cfa_off);
3451 /* Restore ar.unat for real. */
3452 if (TEST_HARD_REG_BIT (current_frame_info.mask, AR_UNAT_REGNUM))
3454 reg = gen_rtx_REG (DImode, AR_UNAT_REGNUM);
3455 emit_move_insn (reg, ar_unat_save_reg);
3458 gcc_assert (cfa_off == current_frame_info.spill_cfa_off);
3460 finish_spill_pointers ();
3462 if (current_frame_info.total_size
3463 || cfun->machine->ia64_eh_epilogue_sp
3464 || frame_pointer_needed)
3466 /* ??? At this point we must generate a magic insn that appears to
3467 modify the spill iterators, the stack pointer, and the frame
3468 pointer. This would allow the most scheduling freedom. For now,
3470 emit_insn (gen_blockage ());
3473 if (cfun->machine->ia64_eh_epilogue_sp)
3474 emit_move_insn (stack_pointer_rtx, cfun->machine->ia64_eh_epilogue_sp);
3475 else if (frame_pointer_needed)
3477 insn = emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
3478 RTX_FRAME_RELATED_P (insn) = 1;
3480 else if (current_frame_info.total_size)
3482 rtx offset, frame_size_rtx;
3484 frame_size_rtx = GEN_INT (current_frame_info.total_size);
3485 if (satisfies_constraint_I (frame_size_rtx))
3486 offset = frame_size_rtx;
3489 regno = next_scratch_gr_reg ();
3490 offset = gen_rtx_REG (DImode, regno);
3491 emit_move_insn (offset, frame_size_rtx);
3494 insn = emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx,
3497 RTX_FRAME_RELATED_P (insn) = 1;
3498 if (GET_CODE (offset) != CONST_INT)
3501 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3502 gen_rtx_SET (VOIDmode,
3504 gen_rtx_PLUS (DImode,
3511 if (cfun->machine->ia64_eh_epilogue_bsp)
3512 emit_insn (gen_set_bsp (cfun->machine->ia64_eh_epilogue_bsp));
3515 emit_jump_insn (gen_return_internal (gen_rtx_REG (DImode, BR_REG (0))));
3518 int fp = GR_REG (2);
3519 /* We need a throw away register here, r0 and r1 are reserved, so r2 is the
3520 first available call clobbered register. If there was a frame_pointer
3521 register, we may have swapped the names of r2 and HARD_FRAME_POINTER_REGNUM,
3522 so we have to make sure we're using the string "r2" when emitting
3523 the register name for the assembler. */
3524 if (current_frame_info.r[reg_fp]
3525 && current_frame_info.r[reg_fp] == GR_REG (2))
3526 fp = HARD_FRAME_POINTER_REGNUM;
3528 /* We must emit an alloc to force the input registers to become output
3529 registers. Otherwise, if the callee tries to pass its parameters
3530 through to another call without an intervening alloc, then these
3532 /* ??? We don't need to preserve all input registers. We only need to
3533 preserve those input registers used as arguments to the sibling call.
3534 It is unclear how to compute that number here. */
3535 if (current_frame_info.n_input_regs != 0)
3537 rtx n_inputs = GEN_INT (current_frame_info.n_input_regs);
3538 insn = emit_insn (gen_alloc (gen_rtx_REG (DImode, fp),
3539 const0_rtx, const0_rtx,
3540 n_inputs, const0_rtx));
3541 RTX_FRAME_RELATED_P (insn) = 1;
3546 /* Return 1 if br.ret can do all the work required to return from a
3550 ia64_direct_return (void)
3552 if (reload_completed && ! frame_pointer_needed)
3554 ia64_compute_frame_size (get_frame_size ());
3556 return (current_frame_info.total_size == 0
3557 && current_frame_info.n_spilled == 0
3558 && current_frame_info.r[reg_save_b0] == 0
3559 && current_frame_info.r[reg_save_pr] == 0
3560 && current_frame_info.r[reg_save_ar_pfs] == 0
3561 && current_frame_info.r[reg_save_ar_unat] == 0
3562 && current_frame_info.r[reg_save_ar_lc] == 0);
3567 /* Return the magic cookie that we use to hold the return address
3568 during early compilation. */
3571 ia64_return_addr_rtx (HOST_WIDE_INT count, rtx frame ATTRIBUTE_UNUSED)
3575 return gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), UNSPEC_RET_ADDR);
3578 /* Split this value after reload, now that we know where the return
3579 address is saved. */
3582 ia64_split_return_addr_rtx (rtx dest)
3586 if (TEST_HARD_REG_BIT (current_frame_info.mask, BR_REG (0)))
3588 if (current_frame_info.r[reg_save_b0] != 0)
3590 src = gen_rtx_REG (DImode, current_frame_info.r[reg_save_b0]);
3591 reg_emitted (reg_save_b0);
3599 /* Compute offset from CFA for BR0. */
3600 /* ??? Must be kept in sync with ia64_expand_prologue. */
3601 off = (current_frame_info.spill_cfa_off
3602 + current_frame_info.spill_size);
3603 for (regno = GR_REG (1); regno <= GR_REG (31); ++regno)
3604 if (TEST_HARD_REG_BIT (current_frame_info.mask, regno))
3607 /* Convert CFA offset to a register based offset. */
3608 if (frame_pointer_needed)
3609 src = hard_frame_pointer_rtx;
3612 src = stack_pointer_rtx;
3613 off += current_frame_info.total_size;
3616 /* Load address into scratch register. */
3617 off_r = GEN_INT (off);
3618 if (satisfies_constraint_I (off_r))
3619 emit_insn (gen_adddi3 (dest, src, off_r));
3622 emit_move_insn (dest, off_r);
3623 emit_insn (gen_adddi3 (dest, src, dest));
3626 src = gen_rtx_MEM (Pmode, dest);
3630 src = gen_rtx_REG (DImode, BR_REG (0));
3632 emit_move_insn (dest, src);
3636 ia64_hard_regno_rename_ok (int from, int to)
3638 /* Don't clobber any of the registers we reserved for the prologue. */
3639 enum ia64_frame_regs r;
3641 for (r = reg_fp; r <= reg_save_ar_lc; r++)
3642 if (to == current_frame_info.r[r]
3643 || from == current_frame_info.r[r]
3644 || to == emitted_frame_related_regs[r]
3645 || from == emitted_frame_related_regs[r])
3648 /* Don't use output registers outside the register frame. */
3649 if (OUT_REGNO_P (to) && to >= OUT_REG (current_frame_info.n_output_regs))
3652 /* Retain even/oddness on predicate register pairs. */
3653 if (PR_REGNO_P (from) && PR_REGNO_P (to))
3654 return (from & 1) == (to & 1);
3659 /* Target hook for assembling integer objects. Handle word-sized
3660 aligned objects and detect the cases when @fptr is needed. */
3663 ia64_assemble_integer (rtx x, unsigned int size, int aligned_p)
3665 if (size == POINTER_SIZE / BITS_PER_UNIT
3666 && !(TARGET_NO_PIC || TARGET_AUTO_PIC)
3667 && GET_CODE (x) == SYMBOL_REF
3668 && SYMBOL_REF_FUNCTION_P (x))
3670 static const char * const directive[2][2] = {
3671 /* 64-bit pointer */ /* 32-bit pointer */
3672 { "\tdata8.ua\t@fptr(", "\tdata4.ua\t@fptr("}, /* unaligned */
3673 { "\tdata8\t@fptr(", "\tdata4\t@fptr("} /* aligned */
3675 fputs (directive[(aligned_p != 0)][POINTER_SIZE == 32], asm_out_file);
3676 output_addr_const (asm_out_file, x);
3677 fputs (")\n", asm_out_file);
3680 return default_assemble_integer (x, size, aligned_p);
3683 /* Emit the function prologue. */
3686 ia64_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3688 int mask, grsave, grsave_prev;
3690 if (current_frame_info.need_regstk)
3691 fprintf (file, "\t.regstk %d, %d, %d, %d\n",
3692 current_frame_info.n_input_regs,
3693 current_frame_info.n_local_regs,
3694 current_frame_info.n_output_regs,
3695 current_frame_info.n_rotate_regs);
3697 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3700 /* Emit the .prologue directive. */
3703 grsave = grsave_prev = 0;
3704 if (current_frame_info.r[reg_save_b0] != 0)
3707 grsave = grsave_prev = current_frame_info.r[reg_save_b0];
3709 if (current_frame_info.r[reg_save_ar_pfs] != 0
3710 && (grsave_prev == 0
3711 || current_frame_info.r[reg_save_ar_pfs] == grsave_prev + 1))
3714 if (grsave_prev == 0)
3715 grsave = current_frame_info.r[reg_save_ar_pfs];
3716 grsave_prev = current_frame_info.r[reg_save_ar_pfs];
3718 if (current_frame_info.r[reg_fp] != 0
3719 && (grsave_prev == 0
3720 || current_frame_info.r[reg_fp] == grsave_prev + 1))
3723 if (grsave_prev == 0)
3724 grsave = HARD_FRAME_POINTER_REGNUM;
3725 grsave_prev = current_frame_info.r[reg_fp];
3727 if (current_frame_info.r[reg_save_pr] != 0
3728 && (grsave_prev == 0
3729 || current_frame_info.r[reg_save_pr] == grsave_prev + 1))
3732 if (grsave_prev == 0)
3733 grsave = current_frame_info.r[reg_save_pr];
3736 if (mask && TARGET_GNU_AS)
3737 fprintf (file, "\t.prologue %d, %d\n", mask,
3738 ia64_dbx_register_number (grsave));
3740 fputs ("\t.prologue\n", file);
3742 /* Emit a .spill directive, if necessary, to relocate the base of
3743 the register spill area. */
3744 if (current_frame_info.spill_cfa_off != -16)
3745 fprintf (file, "\t.spill %ld\n",
3746 (long) (current_frame_info.spill_cfa_off
3747 + current_frame_info.spill_size));
3750 /* Emit the .body directive at the scheduled end of the prologue. */
3753 ia64_output_function_end_prologue (FILE *file)
3755 if (!flag_unwind_tables && (!flag_exceptions || USING_SJLJ_EXCEPTIONS))
3758 fputs ("\t.body\n", file);
3761 /* Emit the function epilogue. */
3764 ia64_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
3765 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3769 if (current_frame_info.r[reg_fp])
3771 const char *tmp = reg_names[HARD_FRAME_POINTER_REGNUM];
3772 reg_names[HARD_FRAME_POINTER_REGNUM]
3773 = reg_names[current_frame_info.r[reg_fp]];
3774 reg_names[current_frame_info.r[reg_fp]] = tmp;
3775 reg_emitted (reg_fp);
3777 if (! TARGET_REG_NAMES)
3779 for (i = 0; i < current_frame_info.n_input_regs; i++)
3780 reg_names[IN_REG (i)] = ia64_input_reg_names[i];
3781 for (i = 0; i < current_frame_info.n_local_regs; i++)
3782 reg_names[LOC_REG (i)] = ia64_local_reg_names[i];
3783 for (i = 0; i < current_frame_info.n_output_regs; i++)
3784 reg_names[OUT_REG (i)] = ia64_output_reg_names[i];
3787 current_frame_info.initialized = 0;
3791 ia64_dbx_register_number (int regno)
3793 /* In ia64_expand_prologue we quite literally renamed the frame pointer
3794 from its home at loc79 to something inside the register frame. We
3795 must perform the same renumbering here for the debug info. */
3796 if (current_frame_info.r[reg_fp])
3798 if (regno == HARD_FRAME_POINTER_REGNUM)
3799 regno = current_frame_info.r[reg_fp];
3800 else if (regno == current_frame_info.r[reg_fp])
3801 regno = HARD_FRAME_POINTER_REGNUM;
3804 if (IN_REGNO_P (regno))
3805 return 32 + regno - IN_REG (0);
3806 else if (LOC_REGNO_P (regno))
3807 return 32 + current_frame_info.n_input_regs + regno - LOC_REG (0);
3808 else if (OUT_REGNO_P (regno))
3809 return (32 + current_frame_info.n_input_regs
3810 + current_frame_info.n_local_regs + regno - OUT_REG (0));
3816 ia64_initialize_trampoline (rtx addr, rtx fnaddr, rtx static_chain)
3818 rtx addr_reg, eight = GEN_INT (8);
3820 /* The Intel assembler requires that the global __ia64_trampoline symbol
3821 be declared explicitly */
3824 static bool declared_ia64_trampoline = false;
3826 if (!declared_ia64_trampoline)
3828 declared_ia64_trampoline = true;
3829 (*targetm.asm_out.globalize_label) (asm_out_file,
3830 "__ia64_trampoline");
3834 /* Make sure addresses are Pmode even if we are in ILP32 mode. */
3835 addr = convert_memory_address (Pmode, addr);
3836 fnaddr = convert_memory_address (Pmode, fnaddr);
3837 static_chain = convert_memory_address (Pmode, static_chain);
3839 /* Load up our iterator. */
3840 addr_reg = gen_reg_rtx (Pmode);
3841 emit_move_insn (addr_reg, addr);
3843 /* The first two words are the fake descriptor:
3844 __ia64_trampoline, ADDR+16. */
3845 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3846 gen_rtx_SYMBOL_REF (Pmode, "__ia64_trampoline"));
3847 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3849 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg),
3850 copy_to_reg (plus_constant (addr, 16)));
3851 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3853 /* The third word is the target descriptor. */
3854 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), fnaddr);
3855 emit_insn (gen_adddi3 (addr_reg, addr_reg, eight));
3857 /* The fourth word is the static chain. */
3858 emit_move_insn (gen_rtx_MEM (Pmode, addr_reg), static_chain);
3861 /* Do any needed setup for a variadic function. CUM has not been updated
3862 for the last named argument which has type TYPE and mode MODE.
3864 We generate the actual spill instructions during prologue generation. */
3867 ia64_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3868 tree type, int * pretend_size,
3869 int second_time ATTRIBUTE_UNUSED)
3871 CUMULATIVE_ARGS next_cum = *cum;
3873 /* Skip the current argument. */
3874 ia64_function_arg_advance (&next_cum, mode, type, 1);
3876 if (next_cum.words < MAX_ARGUMENT_SLOTS)
3878 int n = MAX_ARGUMENT_SLOTS - next_cum.words;
3879 *pretend_size = n * UNITS_PER_WORD;
3880 cfun->machine->n_varargs = n;
3884 /* Check whether TYPE is a homogeneous floating point aggregate. If
3885 it is, return the mode of the floating point type that appears
3886 in all leafs. If it is not, return VOIDmode.
3888 An aggregate is a homogeneous floating point aggregate is if all
3889 fields/elements in it have the same floating point type (e.g,
3890 SFmode). 128-bit quad-precision floats are excluded.
3892 Variable sized aggregates should never arrive here, since we should
3893 have already decided to pass them by reference. Top-level zero-sized
3894 aggregates are excluded because our parallels crash the middle-end. */
3896 static enum machine_mode
3897 hfa_element_mode (const_tree type, bool nested)
3899 enum machine_mode element_mode = VOIDmode;
3900 enum machine_mode mode;
3901 enum tree_code code = TREE_CODE (type);
3902 int know_element_mode = 0;
3905 if (!nested && (!TYPE_SIZE (type) || integer_zerop (TYPE_SIZE (type))))
3910 case VOID_TYPE: case INTEGER_TYPE: case ENUMERAL_TYPE:
3911 case BOOLEAN_TYPE: case POINTER_TYPE:
3912 case OFFSET_TYPE: case REFERENCE_TYPE: case METHOD_TYPE:
3913 case LANG_TYPE: case FUNCTION_TYPE:
3916 /* Fortran complex types are supposed to be HFAs, so we need to handle
3917 gcc's COMPLEX_TYPEs as HFAs. We need to exclude the integral complex
3920 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_COMPLEX_FLOAT
3921 && TYPE_MODE (type) != TCmode)
3922 return GET_MODE_INNER (TYPE_MODE (type));
3927 /* We want to return VOIDmode for raw REAL_TYPEs, but the actual
3928 mode if this is contained within an aggregate. */
3929 if (nested && TYPE_MODE (type) != TFmode)
3930 return TYPE_MODE (type);
3935 return hfa_element_mode (TREE_TYPE (type), 1);
3939 case QUAL_UNION_TYPE:
3940 for (t = TYPE_FIELDS (type); t; t = TREE_CHAIN (t))
3942 if (TREE_CODE (t) != FIELD_DECL)
3945 mode = hfa_element_mode (TREE_TYPE (t), 1);
3946 if (know_element_mode)
3948 if (mode != element_mode)
3951 else if (GET_MODE_CLASS (mode) != MODE_FLOAT)
3955 know_element_mode = 1;
3956 element_mode = mode;
3959 return element_mode;
3962 /* If we reach here, we probably have some front-end specific type
3963 that the backend doesn't know about. This can happen via the
3964 aggregate_value_p call in init_function_start. All we can do is
3965 ignore unknown tree types. */
3972 /* Return the number of words required to hold a quantity of TYPE and MODE
3973 when passed as an argument. */
3975 ia64_function_arg_words (tree type, enum machine_mode mode)
3979 if (mode == BLKmode)
3980 words = int_size_in_bytes (type);
3982 words = GET_MODE_SIZE (mode);
3984 return (words + UNITS_PER_WORD - 1) / UNITS_PER_WORD; /* round up */
3987 /* Return the number of registers that should be skipped so the current
3988 argument (described by TYPE and WORDS) will be properly aligned.
3990 Integer and float arguments larger than 8 bytes start at the next
3991 even boundary. Aggregates larger than 8 bytes start at the next
3992 even boundary if the aggregate has 16 byte alignment. Note that
3993 in the 32-bit ABI, TImode and TFmode have only 8-byte alignment
3994 but are still to be aligned in registers.
3996 ??? The ABI does not specify how to handle aggregates with
3997 alignment from 9 to 15 bytes, or greater than 16. We handle them
3998 all as if they had 16 byte alignment. Such aggregates can occur
3999 only if gcc extensions are used. */
4001 ia64_function_arg_offset (CUMULATIVE_ARGS *cum, tree type, int words)
4003 if ((cum->words & 1) == 0)
4007 && TREE_CODE (type) != INTEGER_TYPE
4008 && TREE_CODE (type) != REAL_TYPE)
4009 return TYPE_ALIGN (type) > 8 * BITS_PER_UNIT;
4014 /* Return rtx for register where argument is passed, or zero if it is passed
4016 /* ??? 128-bit quad-precision floats are always passed in general
4020 ia64_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
4021 int named, int incoming)
4023 int basereg = (incoming ? GR_ARG_FIRST : AR_ARG_FIRST);
4024 int words = ia64_function_arg_words (type, mode);
4025 int offset = ia64_function_arg_offset (cum, type, words);
4026 enum machine_mode hfa_mode = VOIDmode;
4028 /* If all argument slots are used, then it must go on the stack. */
4029 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4032 /* Check for and handle homogeneous FP aggregates. */
4034 hfa_mode = hfa_element_mode (type, 0);
4036 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4037 and unprototyped hfas are passed specially. */
4038 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4042 int fp_regs = cum->fp_regs;
4043 int int_regs = cum->words + offset;
4044 int hfa_size = GET_MODE_SIZE (hfa_mode);
4048 /* If prototyped, pass it in FR regs then GR regs.
4049 If not prototyped, pass it in both FR and GR regs.
4051 If this is an SFmode aggregate, then it is possible to run out of
4052 FR regs while GR regs are still left. In that case, we pass the
4053 remaining part in the GR regs. */
4055 /* Fill the FP regs. We do this always. We stop if we reach the end
4056 of the argument, the last FP register, or the last argument slot. */
4058 byte_size = ((mode == BLKmode)
4059 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4060 args_byte_size = int_regs * UNITS_PER_WORD;
4062 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4063 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD)); i++)
4065 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4066 gen_rtx_REG (hfa_mode, (FR_ARG_FIRST
4070 args_byte_size += hfa_size;
4074 /* If no prototype, then the whole thing must go in GR regs. */
4075 if (! cum->prototype)
4077 /* If this is an SFmode aggregate, then we might have some left over
4078 that needs to go in GR regs. */
4079 else if (byte_size != offset)
4080 int_regs += offset / UNITS_PER_WORD;
4082 /* Fill in the GR regs. We must use DImode here, not the hfa mode. */
4084 for (; offset < byte_size && int_regs < MAX_ARGUMENT_SLOTS; i++)
4086 enum machine_mode gr_mode = DImode;
4087 unsigned int gr_size;
4089 /* If we have an odd 4 byte hunk because we ran out of FR regs,
4090 then this goes in a GR reg left adjusted/little endian, right
4091 adjusted/big endian. */
4092 /* ??? Currently this is handled wrong, because 4-byte hunks are
4093 always right adjusted/little endian. */
4096 /* If we have an even 4 byte hunk because the aggregate is a
4097 multiple of 4 bytes in size, then this goes in a GR reg right
4098 adjusted/little endian. */
4099 else if (byte_size - offset == 4)
4102 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4103 gen_rtx_REG (gr_mode, (basereg
4107 gr_size = GET_MODE_SIZE (gr_mode);
4109 if (gr_size == UNITS_PER_WORD
4110 || (gr_size < UNITS_PER_WORD && offset % UNITS_PER_WORD == 0))
4112 else if (gr_size > UNITS_PER_WORD)
4113 int_regs += gr_size / UNITS_PER_WORD;
4115 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4118 /* Integral and aggregates go in general registers. If we have run out of
4119 FR registers, then FP values must also go in general registers. This can
4120 happen when we have a SFmode HFA. */
4121 else if (mode == TFmode || mode == TCmode
4122 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4124 int byte_size = ((mode == BLKmode)
4125 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4126 if (BYTES_BIG_ENDIAN
4127 && (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
4128 && byte_size < UNITS_PER_WORD
4131 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4132 gen_rtx_REG (DImode,
4133 (basereg + cum->words
4136 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
4139 return gen_rtx_REG (mode, basereg + cum->words + offset);
4143 /* If there is a prototype, then FP values go in a FR register when
4144 named, and in a GR register when unnamed. */
4145 else if (cum->prototype)
4148 return gen_rtx_REG (mode, FR_ARG_FIRST + cum->fp_regs);
4149 /* In big-endian mode, an anonymous SFmode value must be represented
4150 as (parallel:SF [(expr_list (reg:DI n) (const_int 0))]) to force
4151 the value into the high half of the general register. */
4152 else if (BYTES_BIG_ENDIAN && mode == SFmode)
4153 return gen_rtx_PARALLEL (mode,
4155 gen_rtx_EXPR_LIST (VOIDmode,
4156 gen_rtx_REG (DImode, basereg + cum->words + offset),
4159 return gen_rtx_REG (mode, basereg + cum->words + offset);
4161 /* If there is no prototype, then FP values go in both FR and GR
4165 /* See comment above. */
4166 enum machine_mode inner_mode =
4167 (BYTES_BIG_ENDIAN && mode == SFmode) ? DImode : mode;
4169 rtx fp_reg = gen_rtx_EXPR_LIST (VOIDmode,
4170 gen_rtx_REG (mode, (FR_ARG_FIRST
4173 rtx gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
4174 gen_rtx_REG (inner_mode,
4175 (basereg + cum->words
4179 return gen_rtx_PARALLEL (mode, gen_rtvec (2, fp_reg, gr_reg));
4183 /* Return number of bytes, at the beginning of the argument, that must be
4184 put in registers. 0 is the argument is entirely in registers or entirely
4188 ia64_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4189 tree type, bool named ATTRIBUTE_UNUSED)
4191 int words = ia64_function_arg_words (type, mode);
4192 int offset = ia64_function_arg_offset (cum, type, words);
4194 /* If all argument slots are used, then it must go on the stack. */
4195 if (cum->words + offset >= MAX_ARGUMENT_SLOTS)
4198 /* It doesn't matter whether the argument goes in FR or GR regs. If
4199 it fits within the 8 argument slots, then it goes entirely in
4200 registers. If it extends past the last argument slot, then the rest
4201 goes on the stack. */
4203 if (words + cum->words + offset <= MAX_ARGUMENT_SLOTS)
4206 return (MAX_ARGUMENT_SLOTS - cum->words - offset) * UNITS_PER_WORD;
4209 /* Update CUM to point after this argument. This is patterned after
4210 ia64_function_arg. */
4213 ia64_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4214 tree type, int named)
4216 int words = ia64_function_arg_words (type, mode);
4217 int offset = ia64_function_arg_offset (cum, type, words);
4218 enum machine_mode hfa_mode = VOIDmode;
4220 /* If all arg slots are already full, then there is nothing to do. */
4221 if (cum->words >= MAX_ARGUMENT_SLOTS)
4224 cum->words += words + offset;
4226 /* Check for and handle homogeneous FP aggregates. */
4228 hfa_mode = hfa_element_mode (type, 0);
4230 /* Unnamed prototyped hfas are passed as usual. Named prototyped hfas
4231 and unprototyped hfas are passed specially. */
4232 if (hfa_mode != VOIDmode && (! cum->prototype || named))
4234 int fp_regs = cum->fp_regs;
4235 /* This is the original value of cum->words + offset. */
4236 int int_regs = cum->words - words;
4237 int hfa_size = GET_MODE_SIZE (hfa_mode);
4241 /* If prototyped, pass it in FR regs then GR regs.
4242 If not prototyped, pass it in both FR and GR regs.
4244 If this is an SFmode aggregate, then it is possible to run out of
4245 FR regs while GR regs are still left. In that case, we pass the
4246 remaining part in the GR regs. */
4248 /* Fill the FP regs. We do this always. We stop if we reach the end
4249 of the argument, the last FP register, or the last argument slot. */
4251 byte_size = ((mode == BLKmode)
4252 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
4253 args_byte_size = int_regs * UNITS_PER_WORD;
4255 for (; (offset < byte_size && fp_regs < MAX_ARGUMENT_SLOTS
4256 && args_byte_size < (MAX_ARGUMENT_SLOTS * UNITS_PER_WORD));)
4259 args_byte_size += hfa_size;
4263 cum->fp_regs = fp_regs;
4266 /* Integral and aggregates go in general registers. So do TFmode FP values.
4267 If we have run out of FR registers, then other FP values must also go in
4268 general registers. This can happen when we have a SFmode HFA. */
4269 else if (mode == TFmode || mode == TCmode
4270 || (! FLOAT_MODE_P (mode) || cum->fp_regs == MAX_ARGUMENT_SLOTS))
4271 cum->int_regs = cum->words;
4273 /* If there is a prototype, then FP values go in a FR register when
4274 named, and in a GR register when unnamed. */
4275 else if (cum->prototype)
4278 cum->int_regs = cum->words;
4280 /* ??? Complex types should not reach here. */
4281 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4283 /* If there is no prototype, then FP values go in both FR and GR
4287 /* ??? Complex types should not reach here. */
4288 cum->fp_regs += (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT ? 2 : 1);
4289 cum->int_regs = cum->words;
4293 /* Arguments with alignment larger than 8 bytes start at the next even
4294 boundary. On ILP32 HPUX, TFmode arguments start on next even boundary
4295 even though their normal alignment is 8 bytes. See ia64_function_arg. */
4298 ia64_function_arg_boundary (enum machine_mode mode, tree type)
4301 if (mode == TFmode && TARGET_HPUX && TARGET_ILP32)
4302 return PARM_BOUNDARY * 2;
4306 if (TYPE_ALIGN (type) > PARM_BOUNDARY)
4307 return PARM_BOUNDARY * 2;
4309 return PARM_BOUNDARY;
4312 if (GET_MODE_BITSIZE (mode) > PARM_BOUNDARY)
4313 return PARM_BOUNDARY * 2;
4315 return PARM_BOUNDARY;
4318 /* True if it is OK to do sibling call optimization for the specified
4319 call expression EXP. DECL will be the called function, or NULL if
4320 this is an indirect call. */
4322 ia64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
4324 /* We can't perform a sibcall if the current function has the syscall_linkage
4326 if (lookup_attribute ("syscall_linkage",
4327 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))))
4330 /* We must always return with our current GP. This means we can
4331 only sibcall to functions defined in the current module. */
4332 return decl && (*targetm.binds_local_p) (decl);
4336 /* Implement va_arg. */
4339 ia64_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
4342 /* Variable sized types are passed by reference. */
4343 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
4345 tree ptrtype = build_pointer_type (type);
4346 tree addr = std_gimplify_va_arg_expr (valist, ptrtype, pre_p, post_p);
4347 return build_va_arg_indirect_ref (addr);
4350 /* Aggregate arguments with alignment larger than 8 bytes start at
4351 the next even boundary. Integer and floating point arguments
4352 do so if they are larger than 8 bytes, whether or not they are
4353 also aligned larger than 8 bytes. */
4354 if ((TREE_CODE (type) == REAL_TYPE || TREE_CODE (type) == INTEGER_TYPE)
4355 ? int_size_in_bytes (type) > 8 : TYPE_ALIGN (type) > 8 * BITS_PER_UNIT)
4357 tree t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (valist), valist,
4358 size_int (2 * UNITS_PER_WORD - 1));
4359 t = fold_convert (sizetype, t);
4360 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
4361 size_int (-2 * UNITS_PER_WORD));
4362 t = fold_convert (TREE_TYPE (valist), t);
4363 gimplify_assign (unshare_expr (valist), t, pre_p);
4366 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4369 /* Return 1 if function return value returned in memory. Return 0 if it is
4373 ia64_return_in_memory (const_tree valtype, const_tree fntype ATTRIBUTE_UNUSED)
4375 enum machine_mode mode;
4376 enum machine_mode hfa_mode;
4377 HOST_WIDE_INT byte_size;
4379 mode = TYPE_MODE (valtype);
4380 byte_size = GET_MODE_SIZE (mode);
4381 if (mode == BLKmode)
4383 byte_size = int_size_in_bytes (valtype);
4388 /* Hfa's with up to 8 elements are returned in the FP argument registers. */
4390 hfa_mode = hfa_element_mode (valtype, 0);
4391 if (hfa_mode != VOIDmode)
4393 int hfa_size = GET_MODE_SIZE (hfa_mode);
4395 if (byte_size / hfa_size > MAX_ARGUMENT_SLOTS)
4400 else if (byte_size > UNITS_PER_WORD * MAX_INT_RETURN_SLOTS)
4406 /* Return rtx for register that holds the function return value. */
4409 ia64_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
4411 enum machine_mode mode;
4412 enum machine_mode hfa_mode;
4414 mode = TYPE_MODE (valtype);
4415 hfa_mode = hfa_element_mode (valtype, 0);
4417 if (hfa_mode != VOIDmode)
4425 hfa_size = GET_MODE_SIZE (hfa_mode);
4426 byte_size = ((mode == BLKmode)
4427 ? int_size_in_bytes (valtype) : GET_MODE_SIZE (mode));
4429 for (i = 0; offset < byte_size; i++)
4431 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4432 gen_rtx_REG (hfa_mode, FR_ARG_FIRST + i),
4436 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4438 else if (FLOAT_TYPE_P (valtype) && mode != TFmode && mode != TCmode)
4439 return gen_rtx_REG (mode, FR_ARG_FIRST);
4442 bool need_parallel = false;
4444 /* In big-endian mode, we need to manage the layout of aggregates
4445 in the registers so that we get the bits properly aligned in
4446 the highpart of the registers. */
4447 if (BYTES_BIG_ENDIAN
4448 && (mode == BLKmode || (valtype && AGGREGATE_TYPE_P (valtype))))
4449 need_parallel = true;
4451 /* Something like struct S { long double x; char a[0] } is not an
4452 HFA structure, and therefore doesn't go in fp registers. But
4453 the middle-end will give it XFmode anyway, and XFmode values
4454 don't normally fit in integer registers. So we need to smuggle
4455 the value inside a parallel. */
4456 else if (mode == XFmode || mode == XCmode || mode == RFmode)
4457 need_parallel = true;
4467 bytesize = int_size_in_bytes (valtype);
4468 /* An empty PARALLEL is invalid here, but the return value
4469 doesn't matter for empty structs. */
4471 return gen_rtx_REG (mode, GR_RET_FIRST);
4472 for (i = 0; offset < bytesize; i++)
4474 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
4475 gen_rtx_REG (DImode,
4478 offset += UNITS_PER_WORD;
4480 return gen_rtx_PARALLEL (mode, gen_rtvec_v (i, loc));
4483 return gen_rtx_REG (mode, GR_RET_FIRST);
4487 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
4488 We need to emit DTP-relative relocations. */
4491 ia64_output_dwarf_dtprel (FILE *file, int size, rtx x)
4493 gcc_assert (size == 4 || size == 8);
4495 fputs ("\tdata4.ua\t@dtprel(", file);
4497 fputs ("\tdata8.ua\t@dtprel(", file);
4498 output_addr_const (file, x);
4502 /* Print a memory address as an operand to reference that memory location. */
4504 /* ??? Do we need this? It gets used only for 'a' operands. We could perhaps
4505 also call this from ia64_print_operand for memory addresses. */
4508 ia64_print_operand_address (FILE * stream ATTRIBUTE_UNUSED,
4509 rtx address ATTRIBUTE_UNUSED)
4513 /* Print an operand to an assembler instruction.
4514 C Swap and print a comparison operator.
4515 D Print an FP comparison operator.
4516 E Print 32 - constant, for SImode shifts as extract.
4517 e Print 64 - constant, for DImode rotates.
4518 F A floating point constant 0.0 emitted as f0, or 1.0 emitted as f1, or
4519 a floating point register emitted normally.
4520 I Invert a predicate register by adding 1.
4521 J Select the proper predicate register for a condition.
4522 j Select the inverse predicate register for a condition.
4523 O Append .acq for volatile load.
4524 P Postincrement of a MEM.
4525 Q Append .rel for volatile store.
4526 R Print .s .d or nothing for a single, double or no truncation.
4527 S Shift amount for shladd instruction.
4528 T Print an 8-bit sign extended number (K) as a 32-bit unsigned number
4529 for Intel assembler.
4530 U Print an 8-bit sign extended number (K) as a 64-bit unsigned number
4531 for Intel assembler.
4532 X A pair of floating point registers.
4533 r Print register name, or constant 0 as r0. HP compatibility for
4535 v Print vector constant value as an 8-byte integer value. */
4538 ia64_print_operand (FILE * file, rtx x, int code)
4545 /* Handled below. */
4550 enum rtx_code c = swap_condition (GET_CODE (x));
4551 fputs (GET_RTX_NAME (c), file);
4556 switch (GET_CODE (x))
4580 str = GET_RTX_NAME (GET_CODE (x));
4587 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - INTVAL (x));
4591 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - INTVAL (x));
4595 if (x == CONST0_RTX (GET_MODE (x)))
4596 str = reg_names [FR_REG (0)];
4597 else if (x == CONST1_RTX (GET_MODE (x)))
4598 str = reg_names [FR_REG (1)];
4601 gcc_assert (GET_CODE (x) == REG);
4602 str = reg_names [REGNO (x)];
4608 fputs (reg_names [REGNO (x) + 1], file);
4614 unsigned int regno = REGNO (XEXP (x, 0));
4615 if (GET_CODE (x) == EQ)
4619 fputs (reg_names [regno], file);
4624 if (MEM_VOLATILE_P (x))
4625 fputs(".acq", file);
4630 HOST_WIDE_INT value;
4632 switch (GET_CODE (XEXP (x, 0)))
4638 x = XEXP (XEXP (XEXP (x, 0), 1), 1);
4639 if (GET_CODE (x) == CONST_INT)
4643 gcc_assert (GET_CODE (x) == REG);
4644 fprintf (file, ", %s", reg_names[REGNO (x)]);
4650 value = GET_MODE_SIZE (GET_MODE (x));
4654 value = - (HOST_WIDE_INT) GET_MODE_SIZE (GET_MODE (x));
4658 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC, value);
4663 if (MEM_VOLATILE_P (x))
4664 fputs(".rel", file);
4668 if (x == CONST0_RTX (GET_MODE (x)))
4670 else if (x == CONST1_RTX (GET_MODE (x)))
4672 else if (x == CONST2_RTX (GET_MODE (x)))
4675 output_operand_lossage ("invalid %%R value");
4679 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4683 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4685 fprintf (file, "0x%x", (int) INTVAL (x) & 0xffffffff);
4691 if (! TARGET_GNU_AS && GET_CODE (x) == CONST_INT)
4693 const char *prefix = "0x";
4694 if (INTVAL (x) & 0x80000000)
4696 fprintf (file, "0xffffffff");
4699 fprintf (file, "%s%x", prefix, (int) INTVAL (x) & 0xffffffff);
4706 unsigned int regno = REGNO (x);
4707 fprintf (file, "%s, %s", reg_names [regno], reg_names [regno + 1]);
4712 /* If this operand is the constant zero, write it as register zero.
4713 Any register, zero, or CONST_INT value is OK here. */
4714 if (GET_CODE (x) == REG)
4715 fputs (reg_names[REGNO (x)], file);
4716 else if (x == CONST0_RTX (GET_MODE (x)))
4718 else if (GET_CODE (x) == CONST_INT)
4719 output_addr_const (file, x);
4721 output_operand_lossage ("invalid %%r value");
4725 gcc_assert (GET_CODE (x) == CONST_VECTOR);
4726 x = simplify_subreg (DImode, x, GET_MODE (x), 0);
4733 /* For conditional branches, returns or calls, substitute
4734 sptk, dptk, dpnt, or spnt for %s. */
4735 x = find_reg_note (current_output_insn, REG_BR_PROB, 0);
4738 int pred_val = INTVAL (XEXP (x, 0));
4740 /* Guess top and bottom 10% statically predicted. */
4741 if (pred_val < REG_BR_PROB_BASE / 50
4742 && br_prob_note_reliable_p (x))
4744 else if (pred_val < REG_BR_PROB_BASE / 2)
4746 else if (pred_val < REG_BR_PROB_BASE / 100 * 98
4747 || !br_prob_note_reliable_p (x))
4752 else if (GET_CODE (current_output_insn) == CALL_INSN)
4757 fputs (which, file);
4762 x = current_insn_predicate;
4765 unsigned int regno = REGNO (XEXP (x, 0));
4766 if (GET_CODE (x) == EQ)
4768 fprintf (file, "(%s) ", reg_names [regno]);
4773 output_operand_lossage ("ia64_print_operand: unknown code");
4777 switch (GET_CODE (x))
4779 /* This happens for the spill/restore instructions. */
4784 /* ... fall through ... */
4787 fputs (reg_names [REGNO (x)], file);
4792 rtx addr = XEXP (x, 0);
4793 if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
4794 addr = XEXP (addr, 0);
4795 fprintf (file, "[%s]", reg_names [REGNO (addr)]);
4800 output_addr_const (file, x);
4807 /* Compute a (partial) cost for rtx X. Return true if the complete
4808 cost has been computed, and false if subexpressions should be
4809 scanned. In either case, *TOTAL contains the cost result. */
4810 /* ??? This is incomplete. */
4813 ia64_rtx_costs (rtx x, int code, int outer_code, int *total,
4814 bool speed ATTRIBUTE_UNUSED)
4822 *total = satisfies_constraint_J (x) ? 0 : COSTS_N_INSNS (1);
4825 if (satisfies_constraint_I (x))
4827 else if (satisfies_constraint_J (x))
4830 *total = COSTS_N_INSNS (1);
4833 if (satisfies_constraint_K (x) || satisfies_constraint_L (x))
4836 *total = COSTS_N_INSNS (1);
4841 *total = COSTS_N_INSNS (1);
4847 *total = COSTS_N_INSNS (3);
4851 /* For multiplies wider than HImode, we have to go to the FPU,
4852 which normally involves copies. Plus there's the latency
4853 of the multiply itself, and the latency of the instructions to
4854 transfer integer regs to FP regs. */
4855 /* ??? Check for FP mode. */
4856 if (GET_MODE_SIZE (GET_MODE (x)) > 2)
4857 *total = COSTS_N_INSNS (10);
4859 *total = COSTS_N_INSNS (2);
4867 *total = COSTS_N_INSNS (1);
4874 /* We make divide expensive, so that divide-by-constant will be
4875 optimized to a multiply. */
4876 *total = COSTS_N_INSNS (60);
4884 /* Calculate the cost of moving data from a register in class FROM to
4885 one in class TO, using MODE. */
4888 ia64_register_move_cost (enum machine_mode mode, enum reg_class from,
4891 /* ADDL_REGS is the same as GR_REGS for movement purposes. */
4892 if (to == ADDL_REGS)
4894 if (from == ADDL_REGS)
4897 /* All costs are symmetric, so reduce cases by putting the
4898 lower number class as the destination. */
4901 enum reg_class tmp = to;
4902 to = from, from = tmp;
4905 /* Moving from FR<->GR in XFmode must be more expensive than 2,
4906 so that we get secondary memory reloads. Between FR_REGS,
4907 we have to make this at least as expensive as MEMORY_MOVE_COST
4908 to avoid spectacularly poor register class preferencing. */
4909 if (mode == XFmode || mode == RFmode)
4911 if (to != GR_REGS || from != GR_REGS)
4912 return MEMORY_MOVE_COST (mode, to, 0);
4920 /* Moving between PR registers takes two insns. */
4921 if (from == PR_REGS)
4923 /* Moving between PR and anything but GR is impossible. */
4924 if (from != GR_REGS)
4925 return MEMORY_MOVE_COST (mode, to, 0);
4929 /* Moving between BR and anything but GR is impossible. */
4930 if (from != GR_REGS && from != GR_AND_BR_REGS)
4931 return MEMORY_MOVE_COST (mode, to, 0);
4936 /* Moving between AR and anything but GR is impossible. */
4937 if (from != GR_REGS)
4938 return MEMORY_MOVE_COST (mode, to, 0);
4944 case GR_AND_FR_REGS:
4945 case GR_AND_BR_REGS:
4956 /* Implement PREFERRED_RELOAD_CLASS. Place additional restrictions on RCLASS
4957 to use when copying X into that class. */
4960 ia64_preferred_reload_class (rtx x, enum reg_class rclass)
4966 /* Don't allow volatile mem reloads into floating point registers.
4967 This is defined to force reload to choose the r/m case instead
4968 of the f/f case when reloading (set (reg fX) (mem/v)). */
4969 if (MEM_P (x) && MEM_VOLATILE_P (x))
4972 /* Force all unrecognized constants into the constant pool. */
4990 /* This function returns the register class required for a secondary
4991 register when copying between one of the registers in RCLASS, and X,
4992 using MODE. A return value of NO_REGS means that no secondary register
4996 ia64_secondary_reload_class (enum reg_class rclass,
4997 enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
5001 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
5002 regno = true_regnum (x);
5009 /* ??? BR<->BR register copies can happen due to a bad gcse/cse/global
5010 interaction. We end up with two pseudos with overlapping lifetimes
5011 both of which are equiv to the same constant, and both which need
5012 to be in BR_REGS. This seems to be a cse bug. cse_basic_block_end
5013 changes depending on the path length, which means the qty_first_reg
5014 check in make_regs_eqv can give different answers at different times.
5015 At some point I'll probably need a reload_indi pattern to handle
5018 We can also get GR_AND_FR_REGS to BR_REGS/AR_REGS copies, where we
5019 wound up with a FP register from GR_AND_FR_REGS. Extend that to all
5020 non-general registers for good measure. */
5021 if (regno >= 0 && ! GENERAL_REGNO_P (regno))
5024 /* This is needed if a pseudo used as a call_operand gets spilled to a
5026 if (GET_CODE (x) == MEM)
5032 /* Need to go through general registers to get to other class regs. */
5033 if (regno >= 0 && ! (FR_REGNO_P (regno) || GENERAL_REGNO_P (regno)))
5036 /* This can happen when a paradoxical subreg is an operand to the
5038 /* ??? This shouldn't be necessary after instruction scheduling is
5039 enabled, because paradoxical subregs are not accepted by
5040 register_operand when INSN_SCHEDULING is defined. Or alternatively,
5041 stop the paradoxical subreg stupidity in the *_operand functions
5043 if (GET_CODE (x) == MEM
5044 && (GET_MODE (x) == SImode || GET_MODE (x) == HImode
5045 || GET_MODE (x) == QImode))
5048 /* This can happen because of the ior/and/etc patterns that accept FP
5049 registers as operands. If the third operand is a constant, then it
5050 needs to be reloaded into a FP register. */
5051 if (GET_CODE (x) == CONST_INT)
5054 /* This can happen because of register elimination in a muldi3 insn.
5055 E.g. `26107 * (unsigned long)&u'. */
5056 if (GET_CODE (x) == PLUS)
5061 /* ??? This happens if we cse/gcse a BImode value across a call,
5062 and the function has a nonlocal goto. This is because global
5063 does not allocate call crossing pseudos to hard registers when
5064 crtl->has_nonlocal_goto is true. This is relatively
5065 common for C++ programs that use exceptions. To reproduce,
5066 return NO_REGS and compile libstdc++. */
5067 if (GET_CODE (x) == MEM)
5070 /* This can happen when we take a BImode subreg of a DImode value,
5071 and that DImode value winds up in some non-GR register. */
5072 if (regno >= 0 && ! GENERAL_REGNO_P (regno) && ! PR_REGNO_P (regno))
5084 /* Implement targetm.unspec_may_trap_p hook. */
5086 ia64_unspec_may_trap_p (const_rtx x, unsigned flags)
5088 if (GET_CODE (x) == UNSPEC)
5090 switch (XINT (x, 1))
5096 case UNSPEC_CHKACLR:
5098 /* These unspecs are just wrappers. */
5099 return may_trap_p_1 (XVECEXP (x, 0, 0), flags);
5103 return default_unspec_may_trap_p (x, flags);
5107 /* Parse the -mfixed-range= option string. */
5110 fix_range (const char *const_str)
5113 char *str, *dash, *comma;
5115 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
5116 REG2 are either register names or register numbers. The effect
5117 of this option is to mark the registers in the range from REG1 to
5118 REG2 as ``fixed'' so they won't be used by the compiler. This is
5119 used, e.g., to ensure that kernel mode code doesn't use f32-f127. */
5121 i = strlen (const_str);
5122 str = (char *) alloca (i + 1);
5123 memcpy (str, const_str, i + 1);
5127 dash = strchr (str, '-');
5130 warning (0, "value of -mfixed-range must have form REG1-REG2");
5135 comma = strchr (dash + 1, ',');
5139 first = decode_reg_name (str);
5142 warning (0, "unknown register name: %s", str);
5146 last = decode_reg_name (dash + 1);
5149 warning (0, "unknown register name: %s", dash + 1);
5157 warning (0, "%s-%s is an empty range", str, dash + 1);
5161 for (i = first; i <= last; ++i)
5162 fixed_regs[i] = call_used_regs[i] = 1;
5172 /* Implement TARGET_HANDLE_OPTION. */
5175 ia64_handle_option (size_t code, const char *arg, int value)
5179 case OPT_mfixed_range_:
5183 case OPT_mtls_size_:
5184 if (value != 14 && value != 22 && value != 64)
5185 error ("bad value %<%s%> for -mtls-size= switch", arg);
5192 const char *name; /* processor name or nickname. */
5193 enum processor_type processor;
5195 const processor_alias_table[] =
5197 {"itanium", PROCESSOR_ITANIUM},
5198 {"itanium1", PROCESSOR_ITANIUM},
5199 {"merced", PROCESSOR_ITANIUM},
5200 {"itanium2", PROCESSOR_ITANIUM2},
5201 {"mckinley", PROCESSOR_ITANIUM2},
5203 int const pta_size = ARRAY_SIZE (processor_alias_table);
5206 for (i = 0; i < pta_size; i++)
5207 if (!strcmp (arg, processor_alias_table[i].name))
5209 ia64_tune = processor_alias_table[i].processor;
5213 error ("bad value %<%s%> for -mtune= switch", arg);
5222 /* Implement OVERRIDE_OPTIONS. */
5225 ia64_override_options (void)
5227 if (TARGET_AUTO_PIC)
5228 target_flags |= MASK_CONST_GP;
5230 if (TARGET_INLINE_SQRT == INL_MIN_LAT)
5232 warning (0, "not yet implemented: latency-optimized inline square root");
5233 TARGET_INLINE_SQRT = INL_MAX_THR;
5236 ia64_section_threshold = g_switch_set ? g_switch_value : IA64_DEFAULT_GVALUE;
5238 init_machine_status = ia64_init_machine_status;
5241 /* Initialize the record of emitted frame related registers. */
5243 void ia64_init_expanders (void)
5245 memset (&emitted_frame_related_regs, 0, sizeof (emitted_frame_related_regs));
5248 static struct machine_function *
5249 ia64_init_machine_status (void)
5251 return GGC_CNEW (struct machine_function);
5254 static enum attr_itanium_class ia64_safe_itanium_class (rtx);
5255 static enum attr_type ia64_safe_type (rtx);
5257 static enum attr_itanium_class
5258 ia64_safe_itanium_class (rtx insn)
5260 if (recog_memoized (insn) >= 0)
5261 return get_attr_itanium_class (insn);
5263 return ITANIUM_CLASS_UNKNOWN;
5266 static enum attr_type
5267 ia64_safe_type (rtx insn)
5269 if (recog_memoized (insn) >= 0)
5270 return get_attr_type (insn);
5272 return TYPE_UNKNOWN;
5275 /* The following collection of routines emit instruction group stop bits as
5276 necessary to avoid dependencies. */
5278 /* Need to track some additional registers as far as serialization is
5279 concerned so we can properly handle br.call and br.ret. We could
5280 make these registers visible to gcc, but since these registers are
5281 never explicitly used in gcc generated code, it seems wasteful to
5282 do so (plus it would make the call and return patterns needlessly
5284 #define REG_RP (BR_REG (0))
5285 #define REG_AR_CFM (FIRST_PSEUDO_REGISTER + 1)
5286 /* This is used for volatile asms which may require a stop bit immediately
5287 before and after them. */
5288 #define REG_VOLATILE (FIRST_PSEUDO_REGISTER + 2)
5289 #define AR_UNAT_BIT_0 (FIRST_PSEUDO_REGISTER + 3)
5290 #define NUM_REGS (AR_UNAT_BIT_0 + 64)
5292 /* For each register, we keep track of how it has been written in the
5293 current instruction group.
5295 If a register is written unconditionally (no qualifying predicate),
5296 WRITE_COUNT is set to 2 and FIRST_PRED is ignored.
5298 If a register is written if its qualifying predicate P is true, we
5299 set WRITE_COUNT to 1 and FIRST_PRED to P. Later on, the same register
5300 may be written again by the complement of P (P^1) and when this happens,
5301 WRITE_COUNT gets set to 2.
5303 The result of this is that whenever an insn attempts to write a register
5304 whose WRITE_COUNT is two, we need to issue an insn group barrier first.
5306 If a predicate register is written by a floating-point insn, we set
5307 WRITTEN_BY_FP to true.
5309 If a predicate register is written by an AND.ORCM we set WRITTEN_BY_AND
5310 to true; if it was written by an OR.ANDCM we set WRITTEN_BY_OR to true. */
5312 #if GCC_VERSION >= 4000
5313 #define RWS_FIELD_TYPE __extension__ unsigned short
5315 #define RWS_FIELD_TYPE unsigned int
5317 struct reg_write_state
5319 RWS_FIELD_TYPE write_count : 2;
5320 RWS_FIELD_TYPE first_pred : 10;
5321 RWS_FIELD_TYPE written_by_fp : 1;
5322 RWS_FIELD_TYPE written_by_and : 1;
5323 RWS_FIELD_TYPE written_by_or : 1;
5326 /* Cumulative info for the current instruction group. */
5327 struct reg_write_state rws_sum[NUM_REGS];
5328 #ifdef ENABLE_CHECKING
5329 /* Bitmap whether a register has been written in the current insn. */
5330 HARD_REG_ELT_TYPE rws_insn[(NUM_REGS + HOST_BITS_PER_WIDEST_FAST_INT - 1)
5331 / HOST_BITS_PER_WIDEST_FAST_INT];
5334 rws_insn_set (int regno)
5336 gcc_assert (!TEST_HARD_REG_BIT (rws_insn, regno));
5337 SET_HARD_REG_BIT (rws_insn, regno);
5341 rws_insn_test (int regno)
5343 return TEST_HARD_REG_BIT (rws_insn, regno);
5346 /* When not checking, track just REG_AR_CFM and REG_VOLATILE. */
5347 unsigned char rws_insn[2];
5350 rws_insn_set (int regno)
5352 if (regno == REG_AR_CFM)
5354 else if (regno == REG_VOLATILE)
5359 rws_insn_test (int regno)
5361 if (regno == REG_AR_CFM)
5363 if (regno == REG_VOLATILE)
5369 /* Indicates whether this is the first instruction after a stop bit,
5370 in which case we don't need another stop bit. Without this,
5371 ia64_variable_issue will die when scheduling an alloc. */
5372 static int first_instruction;
5374 /* Misc flags needed to compute RAW/WAW dependencies while we are traversing
5375 RTL for one instruction. */
5378 unsigned int is_write : 1; /* Is register being written? */
5379 unsigned int is_fp : 1; /* Is register used as part of an fp op? */
5380 unsigned int is_branch : 1; /* Is register used as part of a branch? */
5381 unsigned int is_and : 1; /* Is register used as part of and.orcm? */
5382 unsigned int is_or : 1; /* Is register used as part of or.andcm? */
5383 unsigned int is_sibcall : 1; /* Is this a sibling or normal call? */
5386 static void rws_update (int, struct reg_flags, int);
5387 static int rws_access_regno (int, struct reg_flags, int);
5388 static int rws_access_reg (rtx, struct reg_flags, int);
5389 static void update_set_flags (rtx, struct reg_flags *);
5390 static int set_src_needs_barrier (rtx, struct reg_flags, int);
5391 static int rtx_needs_barrier (rtx, struct reg_flags, int);
5392 static void init_insn_group_barriers (void);
5393 static int group_barrier_needed (rtx);
5394 static int safe_group_barrier_needed (rtx);
5395 static int in_safe_group_barrier;
5397 /* Update *RWS for REGNO, which is being written by the current instruction,
5398 with predicate PRED, and associated register flags in FLAGS. */
5401 rws_update (int regno, struct reg_flags flags, int pred)
5404 rws_sum[regno].write_count++;
5406 rws_sum[regno].write_count = 2;
5407 rws_sum[regno].written_by_fp |= flags.is_fp;
5408 /* ??? Not tracking and/or across differing predicates. */
5409 rws_sum[regno].written_by_and = flags.is_and;
5410 rws_sum[regno].written_by_or = flags.is_or;
5411 rws_sum[regno].first_pred = pred;
5414 /* Handle an access to register REGNO of type FLAGS using predicate register
5415 PRED. Update rws_sum array. Return 1 if this access creates
5416 a dependency with an earlier instruction in the same group. */
5419 rws_access_regno (int regno, struct reg_flags flags, int pred)
5421 int need_barrier = 0;
5423 gcc_assert (regno < NUM_REGS);
5425 if (! PR_REGNO_P (regno))
5426 flags.is_and = flags.is_or = 0;
5432 rws_insn_set (regno);
5433 write_count = rws_sum[regno].write_count;
5435 switch (write_count)
5438 /* The register has not been written yet. */
5439 if (!in_safe_group_barrier)
5440 rws_update (regno, flags, pred);
5444 /* The register has been written via a predicate. If this is
5445 not a complementary predicate, then we need a barrier. */
5446 /* ??? This assumes that P and P+1 are always complementary
5447 predicates for P even. */
5448 if (flags.is_and && rws_sum[regno].written_by_and)
5450 else if (flags.is_or && rws_sum[regno].written_by_or)
5452 else if ((rws_sum[regno].first_pred ^ 1) != pred)
5454 if (!in_safe_group_barrier)
5455 rws_update (regno, flags, pred);
5459 /* The register has been unconditionally written already. We
5461 if (flags.is_and && rws_sum[regno].written_by_and)
5463 else if (flags.is_or && rws_sum[regno].written_by_or)
5467 if (!in_safe_group_barrier)
5469 rws_sum[regno].written_by_and = flags.is_and;
5470 rws_sum[regno].written_by_or = flags.is_or;
5480 if (flags.is_branch)
5482 /* Branches have several RAW exceptions that allow to avoid
5485 if (REGNO_REG_CLASS (regno) == BR_REGS || regno == AR_PFS_REGNUM)
5486 /* RAW dependencies on branch regs are permissible as long
5487 as the writer is a non-branch instruction. Since we
5488 never generate code that uses a branch register written
5489 by a branch instruction, handling this case is
5493 if (REGNO_REG_CLASS (regno) == PR_REGS
5494 && ! rws_sum[regno].written_by_fp)
5495 /* The predicates of a branch are available within the
5496 same insn group as long as the predicate was written by
5497 something other than a floating-point instruction. */
5501 if (flags.is_and && rws_sum[regno].written_by_and)
5503 if (flags.is_or && rws_sum[regno].written_by_or)
5506 switch (rws_sum[regno].write_count)
5509 /* The register has not been written yet. */
5513 /* The register has been written via a predicate. If this is
5514 not a complementary predicate, then we need a barrier. */
5515 /* ??? This assumes that P and P+1 are always complementary
5516 predicates for P even. */
5517 if ((rws_sum[regno].first_pred ^ 1) != pred)
5522 /* The register has been unconditionally written already. We
5532 return need_barrier;
5536 rws_access_reg (rtx reg, struct reg_flags flags, int pred)
5538 int regno = REGNO (reg);
5539 int n = HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg));
5542 return rws_access_regno (regno, flags, pred);
5545 int need_barrier = 0;
5547 need_barrier |= rws_access_regno (regno + n, flags, pred);
5548 return need_barrier;
5552 /* Examine X, which is a SET rtx, and update the flags, the predicate, and
5553 the condition, stored in *PFLAGS, *PPRED and *PCOND. */
5556 update_set_flags (rtx x, struct reg_flags *pflags)
5558 rtx src = SET_SRC (x);
5560 switch (GET_CODE (src))
5566 /* There are four cases here:
5567 (1) The destination is (pc), in which case this is a branch,
5568 nothing here applies.
5569 (2) The destination is ar.lc, in which case this is a
5570 doloop_end_internal,
5571 (3) The destination is an fp register, in which case this is
5572 an fselect instruction.
5573 (4) The condition has (unspec [(reg)] UNSPEC_LDC), in which case
5574 this is a check load.
5575 In all cases, nothing we do in this function applies. */
5579 if (COMPARISON_P (src)
5580 && SCALAR_FLOAT_MODE_P (GET_MODE (XEXP (src, 0))))
5581 /* Set pflags->is_fp to 1 so that we know we're dealing
5582 with a floating point comparison when processing the
5583 destination of the SET. */
5586 /* Discover if this is a parallel comparison. We only handle
5587 and.orcm and or.andcm at present, since we must retain a
5588 strict inverse on the predicate pair. */
5589 else if (GET_CODE (src) == AND)
5591 else if (GET_CODE (src) == IOR)
5598 /* Subroutine of rtx_needs_barrier; this function determines whether the
5599 source of a given SET rtx found in X needs a barrier. FLAGS and PRED
5600 are as in rtx_needs_barrier. COND is an rtx that holds the condition
5604 set_src_needs_barrier (rtx x, struct reg_flags flags, int pred)
5606 int need_barrier = 0;
5608 rtx src = SET_SRC (x);
5610 if (GET_CODE (src) == CALL)
5611 /* We don't need to worry about the result registers that
5612 get written by subroutine call. */
5613 return rtx_needs_barrier (src, flags, pred);
5614 else if (SET_DEST (x) == pc_rtx)
5616 /* X is a conditional branch. */
5617 /* ??? This seems redundant, as the caller sets this bit for
5619 if (!ia64_spec_check_src_p (src))
5620 flags.is_branch = 1;
5621 return rtx_needs_barrier (src, flags, pred);
5624 if (ia64_spec_check_src_p (src))
5625 /* Avoid checking one register twice (in condition
5626 and in 'then' section) for ldc pattern. */
5628 gcc_assert (REG_P (XEXP (src, 2)));
5629 need_barrier = rtx_needs_barrier (XEXP (src, 2), flags, pred);
5631 /* We process MEM below. */
5632 src = XEXP (src, 1);
5635 need_barrier |= rtx_needs_barrier (src, flags, pred);
5638 if (GET_CODE (dst) == ZERO_EXTRACT)
5640 need_barrier |= rtx_needs_barrier (XEXP (dst, 1), flags, pred);
5641 need_barrier |= rtx_needs_barrier (XEXP (dst, 2), flags, pred);
5643 return need_barrier;
5646 /* Handle an access to rtx X of type FLAGS using predicate register
5647 PRED. Return 1 if this access creates a dependency with an earlier
5648 instruction in the same group. */
5651 rtx_needs_barrier (rtx x, struct reg_flags flags, int pred)
5654 int is_complemented = 0;
5655 int need_barrier = 0;
5656 const char *format_ptr;
5657 struct reg_flags new_flags;
5665 switch (GET_CODE (x))
5668 update_set_flags (x, &new_flags);
5669 need_barrier = set_src_needs_barrier (x, new_flags, pred);
5670 if (GET_CODE (SET_SRC (x)) != CALL)
5672 new_flags.is_write = 1;
5673 need_barrier |= rtx_needs_barrier (SET_DEST (x), new_flags, pred);
5678 new_flags.is_write = 0;
5679 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5681 /* Avoid multiple register writes, in case this is a pattern with
5682 multiple CALL rtx. This avoids a failure in rws_access_reg. */
5683 if (! flags.is_sibcall && ! rws_insn_test (REG_AR_CFM))
5685 new_flags.is_write = 1;
5686 need_barrier |= rws_access_regno (REG_RP, new_flags, pred);
5687 need_barrier |= rws_access_regno (AR_PFS_REGNUM, new_flags, pred);
5688 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5693 /* X is a predicated instruction. */
5695 cond = COND_EXEC_TEST (x);
5697 need_barrier = rtx_needs_barrier (cond, flags, 0);
5699 if (GET_CODE (cond) == EQ)
5700 is_complemented = 1;
5701 cond = XEXP (cond, 0);
5702 gcc_assert (GET_CODE (cond) == REG
5703 && REGNO_REG_CLASS (REGNO (cond)) == PR_REGS);
5704 pred = REGNO (cond);
5705 if (is_complemented)
5708 need_barrier |= rtx_needs_barrier (COND_EXEC_CODE (x), flags, pred);
5709 return need_barrier;
5713 /* Clobber & use are for earlier compiler-phases only. */
5718 /* We always emit stop bits for traditional asms. We emit stop bits
5719 for volatile extended asms if TARGET_VOL_ASM_STOP is true. */
5720 if (GET_CODE (x) != ASM_OPERANDS
5721 || (MEM_VOLATILE_P (x) && TARGET_VOL_ASM_STOP))
5723 /* Avoid writing the register multiple times if we have multiple
5724 asm outputs. This avoids a failure in rws_access_reg. */
5725 if (! rws_insn_test (REG_VOLATILE))
5727 new_flags.is_write = 1;
5728 rws_access_regno (REG_VOLATILE, new_flags, pred);
5733 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
5734 We cannot just fall through here since then we would be confused
5735 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
5736 traditional asms unlike their normal usage. */
5738 for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; --i)
5739 if (rtx_needs_barrier (ASM_OPERANDS_INPUT (x, i), flags, pred))
5744 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5746 rtx pat = XVECEXP (x, 0, i);
5747 switch (GET_CODE (pat))
5750 update_set_flags (pat, &new_flags);
5751 need_barrier |= set_src_needs_barrier (pat, new_flags, pred);
5757 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5768 for (i = XVECLEN (x, 0) - 1; i >= 0; --i)
5770 rtx pat = XVECEXP (x, 0, i);
5771 if (GET_CODE (pat) == SET)
5773 if (GET_CODE (SET_SRC (pat)) != CALL)
5775 new_flags.is_write = 1;
5776 need_barrier |= rtx_needs_barrier (SET_DEST (pat), new_flags,
5780 else if (GET_CODE (pat) == CLOBBER || GET_CODE (pat) == RETURN)
5781 need_barrier |= rtx_needs_barrier (pat, flags, pred);
5786 need_barrier |= rtx_needs_barrier (SUBREG_REG (x), flags, pred);
5789 if (REGNO (x) == AR_UNAT_REGNUM)
5791 for (i = 0; i < 64; ++i)
5792 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + i, flags, pred);
5795 need_barrier = rws_access_reg (x, flags, pred);
5799 /* Find the regs used in memory address computation. */
5800 new_flags.is_write = 0;
5801 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5804 case CONST_INT: case CONST_DOUBLE: case CONST_VECTOR:
5805 case SYMBOL_REF: case LABEL_REF: case CONST:
5808 /* Operators with side-effects. */
5809 case POST_INC: case POST_DEC:
5810 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5812 new_flags.is_write = 0;
5813 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5814 new_flags.is_write = 1;
5815 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5819 gcc_assert (GET_CODE (XEXP (x, 0)) == REG);
5821 new_flags.is_write = 0;
5822 need_barrier = rws_access_reg (XEXP (x, 0), new_flags, pred);
5823 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5824 new_flags.is_write = 1;
5825 need_barrier |= rws_access_reg (XEXP (x, 0), new_flags, pred);
5828 /* Handle common unary and binary ops for efficiency. */
5829 case COMPARE: case PLUS: case MINUS: case MULT: case DIV:
5830 case MOD: case UDIV: case UMOD: case AND: case IOR:
5831 case XOR: case ASHIFT: case ROTATE: case ASHIFTRT: case LSHIFTRT:
5832 case ROTATERT: case SMIN: case SMAX: case UMIN: case UMAX:
5833 case NE: case EQ: case GE: case GT: case LE:
5834 case LT: case GEU: case GTU: case LEU: case LTU:
5835 need_barrier = rtx_needs_barrier (XEXP (x, 0), new_flags, pred);
5836 need_barrier |= rtx_needs_barrier (XEXP (x, 1), new_flags, pred);
5839 case NEG: case NOT: case SIGN_EXTEND: case ZERO_EXTEND:
5840 case TRUNCATE: case FLOAT_EXTEND: case FLOAT_TRUNCATE: case FLOAT:
5841 case FIX: case UNSIGNED_FLOAT: case UNSIGNED_FIX: case ABS:
5842 case SQRT: case FFS: case POPCOUNT:
5843 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5847 /* VEC_SELECT's second argument is a PARALLEL with integers that
5848 describe the elements selected. On ia64, those integers are
5849 always constants. Avoid walking the PARALLEL so that we don't
5850 get confused with "normal" parallels and then die. */
5851 need_barrier = rtx_needs_barrier (XEXP (x, 0), flags, pred);
5855 switch (XINT (x, 1))
5857 case UNSPEC_LTOFF_DTPMOD:
5858 case UNSPEC_LTOFF_DTPREL:
5860 case UNSPEC_LTOFF_TPREL:
5862 case UNSPEC_PRED_REL_MUTEX:
5863 case UNSPEC_PIC_CALL:
5865 case UNSPEC_FETCHADD_ACQ:
5866 case UNSPEC_BSP_VALUE:
5867 case UNSPEC_FLUSHRS:
5868 case UNSPEC_BUNDLE_SELECTOR:
5871 case UNSPEC_GR_SPILL:
5872 case UNSPEC_GR_RESTORE:
5874 HOST_WIDE_INT offset = INTVAL (XVECEXP (x, 0, 1));
5875 HOST_WIDE_INT bit = (offset >> 3) & 63;
5877 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5878 new_flags.is_write = (XINT (x, 1) == UNSPEC_GR_SPILL);
5879 need_barrier |= rws_access_regno (AR_UNAT_BIT_0 + bit,
5884 case UNSPEC_FR_SPILL:
5885 case UNSPEC_FR_RESTORE:
5886 case UNSPEC_GETF_EXP:
5887 case UNSPEC_SETF_EXP:
5889 case UNSPEC_FR_SQRT_RECIP_APPROX:
5890 case UNSPEC_FR_SQRT_RECIP_APPROX_RES:
5894 case UNSPEC_CHKACLR:
5896 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5899 case UNSPEC_FR_RECIP_APPROX:
5901 case UNSPEC_COPYSIGN:
5902 case UNSPEC_FR_RECIP_APPROX_RES:
5903 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 0), flags, pred);
5904 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5907 case UNSPEC_CMPXCHG_ACQ:
5908 need_barrier = rtx_needs_barrier (XVECEXP (x, 0, 1), flags, pred);
5909 need_barrier |= rtx_needs_barrier (XVECEXP (x, 0, 2), flags, pred);
5917 case UNSPEC_VOLATILE:
5918 switch (XINT (x, 1))
5921 /* Alloc must always be the first instruction of a group.
5922 We force this by always returning true. */
5923 /* ??? We might get better scheduling if we explicitly check for
5924 input/local/output register dependencies, and modify the
5925 scheduler so that alloc is always reordered to the start of
5926 the current group. We could then eliminate all of the
5927 first_instruction code. */
5928 rws_access_regno (AR_PFS_REGNUM, flags, pred);
5930 new_flags.is_write = 1;
5931 rws_access_regno (REG_AR_CFM, new_flags, pred);
5934 case UNSPECV_SET_BSP:
5938 case UNSPECV_BLOCKAGE:
5939 case UNSPECV_INSN_GROUP_BARRIER:
5941 case UNSPECV_PSAC_ALL:
5942 case UNSPECV_PSAC_NORMAL:
5951 new_flags.is_write = 0;
5952 need_barrier = rws_access_regno (REG_RP, flags, pred);
5953 need_barrier |= rws_access_regno (AR_PFS_REGNUM, flags, pred);
5955 new_flags.is_write = 1;
5956 need_barrier |= rws_access_regno (AR_EC_REGNUM, new_flags, pred);
5957 need_barrier |= rws_access_regno (REG_AR_CFM, new_flags, pred);
5961 format_ptr = GET_RTX_FORMAT (GET_CODE (x));
5962 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5963 switch (format_ptr[i])
5965 case '0': /* unused field */
5966 case 'i': /* integer */
5967 case 'n': /* note */
5968 case 'w': /* wide integer */
5969 case 's': /* pointer to string */
5970 case 'S': /* optional pointer to string */
5974 if (rtx_needs_barrier (XEXP (x, i), flags, pred))
5979 for (j = XVECLEN (x, i) - 1; j >= 0; --j)
5980 if (rtx_needs_barrier (XVECEXP (x, i, j), flags, pred))
5989 return need_barrier;
5992 /* Clear out the state for group_barrier_needed at the start of a
5993 sequence of insns. */
5996 init_insn_group_barriers (void)
5998 memset (rws_sum, 0, sizeof (rws_sum));
5999 first_instruction = 1;
6002 /* Given the current state, determine whether a group barrier (a stop bit) is
6003 necessary before INSN. Return nonzero if so. This modifies the state to
6004 include the effects of INSN as a side-effect. */
6007 group_barrier_needed (rtx insn)
6010 int need_barrier = 0;
6011 struct reg_flags flags;
6013 memset (&flags, 0, sizeof (flags));
6014 switch (GET_CODE (insn))
6020 /* A barrier doesn't imply an instruction group boundary. */
6024 memset (rws_insn, 0, sizeof (rws_insn));
6028 flags.is_branch = 1;
6029 flags.is_sibcall = SIBLING_CALL_P (insn);
6030 memset (rws_insn, 0, sizeof (rws_insn));
6032 /* Don't bundle a call following another call. */
6033 if ((pat = prev_active_insn (insn))
6034 && GET_CODE (pat) == CALL_INSN)
6040 need_barrier = rtx_needs_barrier (PATTERN (insn), flags, 0);
6044 if (!ia64_spec_check_p (insn))
6045 flags.is_branch = 1;
6047 /* Don't bundle a jump following a call. */
6048 if ((pat = prev_active_insn (insn))
6049 && GET_CODE (pat) == CALL_INSN)
6057 if (GET_CODE (PATTERN (insn)) == USE
6058 || GET_CODE (PATTERN (insn)) == CLOBBER)
6059 /* Don't care about USE and CLOBBER "insns"---those are used to
6060 indicate to the optimizer that it shouldn't get rid of
6061 certain operations. */
6064 pat = PATTERN (insn);
6066 /* Ug. Hack hacks hacked elsewhere. */
6067 switch (recog_memoized (insn))
6069 /* We play dependency tricks with the epilogue in order
6070 to get proper schedules. Undo this for dv analysis. */
6071 case CODE_FOR_epilogue_deallocate_stack:
6072 case CODE_FOR_prologue_allocate_stack:
6073 pat = XVECEXP (pat, 0, 0);
6076 /* The pattern we use for br.cloop confuses the code above.
6077 The second element of the vector is representative. */
6078 case CODE_FOR_doloop_end_internal:
6079 pat = XVECEXP (pat, 0, 1);
6082 /* Doesn't generate code. */
6083 case CODE_FOR_pred_rel_mutex:
6084 case CODE_FOR_prologue_use:
6091 memset (rws_insn, 0, sizeof (rws_insn));
6092 need_barrier = rtx_needs_barrier (pat, flags, 0);
6094 /* Check to see if the previous instruction was a volatile
6097 need_barrier = rws_access_regno (REG_VOLATILE, flags, 0);
6104 if (first_instruction && INSN_P (insn)
6105 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
6106 && GET_CODE (PATTERN (insn)) != USE
6107 && GET_CODE (PATTERN (insn)) != CLOBBER)
6110 first_instruction = 0;
6113 return need_barrier;
6116 /* Like group_barrier_needed, but do not clobber the current state. */
6119 safe_group_barrier_needed (rtx insn)
6121 int saved_first_instruction;
6124 saved_first_instruction = first_instruction;
6125 in_safe_group_barrier = 1;
6127 t = group_barrier_needed (insn);
6129 first_instruction = saved_first_instruction;
6130 in_safe_group_barrier = 0;
6135 /* Scan the current function and insert stop bits as necessary to
6136 eliminate dependencies. This function assumes that a final
6137 instruction scheduling pass has been run which has already
6138 inserted most of the necessary stop bits. This function only
6139 inserts new ones at basic block boundaries, since these are
6140 invisible to the scheduler. */
6143 emit_insn_group_barriers (FILE *dump)
6147 int insns_since_last_label = 0;
6149 init_insn_group_barriers ();
6151 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6153 if (GET_CODE (insn) == CODE_LABEL)
6155 if (insns_since_last_label)
6157 insns_since_last_label = 0;
6159 else if (GET_CODE (insn) == NOTE
6160 && NOTE_KIND (insn) == NOTE_INSN_BASIC_BLOCK)
6162 if (insns_since_last_label)
6164 insns_since_last_label = 0;
6166 else if (GET_CODE (insn) == INSN
6167 && GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
6168 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
6170 init_insn_group_barriers ();
6173 else if (INSN_P (insn))
6175 insns_since_last_label = 1;
6177 if (group_barrier_needed (insn))
6182 fprintf (dump, "Emitting stop before label %d\n",
6183 INSN_UID (last_label));
6184 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), last_label);
6187 init_insn_group_barriers ();
6195 /* Like emit_insn_group_barriers, but run if no final scheduling pass was run.
6196 This function has to emit all necessary group barriers. */
6199 emit_all_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
6203 init_insn_group_barriers ();
6205 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6207 if (GET_CODE (insn) == BARRIER)
6209 rtx last = prev_active_insn (insn);
6213 if (GET_CODE (last) == JUMP_INSN
6214 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
6215 last = prev_active_insn (last);
6216 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
6217 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
6219 init_insn_group_barriers ();
6221 else if (INSN_P (insn))
6223 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
6224 init_insn_group_barriers ();
6225 else if (group_barrier_needed (insn))
6227 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)), insn);
6228 init_insn_group_barriers ();
6229 group_barrier_needed (insn);
6237 /* Instruction scheduling support. */
6239 #define NR_BUNDLES 10
6241 /* A list of names of all available bundles. */
6243 static const char *bundle_name [NR_BUNDLES] =
6249 #if NR_BUNDLES == 10
6259 /* Nonzero if we should insert stop bits into the schedule. */
6261 int ia64_final_schedule = 0;
6263 /* Codes of the corresponding queried units: */
6265 static int _0mii_, _0mmi_, _0mfi_, _0mmf_;
6266 static int _0bbb_, _0mbb_, _0mib_, _0mmb_, _0mfb_, _0mlx_;
6268 static int _1mii_, _1mmi_, _1mfi_, _1mmf_;
6269 static int _1bbb_, _1mbb_, _1mib_, _1mmb_, _1mfb_, _1mlx_;
6271 static int pos_1, pos_2, pos_3, pos_4, pos_5, pos_6;
6273 /* The following variable value is an insn group barrier. */
6275 static rtx dfa_stop_insn;
6277 /* The following variable value is the last issued insn. */
6279 static rtx last_scheduled_insn;
6281 /* The following variable value is pointer to a DFA state used as
6282 temporary variable. */
6284 static state_t temp_dfa_state = NULL;
6286 /* The following variable value is DFA state after issuing the last
6289 static state_t prev_cycle_state = NULL;
6291 /* The following array element values are TRUE if the corresponding
6292 insn requires to add stop bits before it. */
6294 static char *stops_p = NULL;
6296 /* The following array element values are ZERO for non-speculative
6297 instructions and hold corresponding speculation check number for
6298 speculative instructions. */
6299 static int *spec_check_no = NULL;
6301 /* Size of spec_check_no array. */
6302 static int max_uid = 0;
6304 /* The following variable is used to set up the mentioned above array. */
6306 static int stop_before_p = 0;
6308 /* The following variable value is length of the arrays `clocks' and
6311 static int clocks_length;
6313 /* The following array element values are cycles on which the
6314 corresponding insn will be issued. The array is used only for
6319 /* The following array element values are numbers of cycles should be
6320 added to improve insn scheduling for MM_insns for Itanium1. */
6322 static int *add_cycles;
6324 /* The following variable value is number of data speculations in progress. */
6325 static int pending_data_specs = 0;
6327 static rtx ia64_single_set (rtx);
6328 static void ia64_emit_insn_before (rtx, rtx);
6330 /* Map a bundle number to its pseudo-op. */
6333 get_bundle_name (int b)
6335 return bundle_name[b];
6339 /* Return the maximum number of instructions a cpu can issue. */
6342 ia64_issue_rate (void)
6347 /* Helper function - like single_set, but look inside COND_EXEC. */
6350 ia64_single_set (rtx insn)
6352 rtx x = PATTERN (insn), ret;
6353 if (GET_CODE (x) == COND_EXEC)
6354 x = COND_EXEC_CODE (x);
6355 if (GET_CODE (x) == SET)
6358 /* Special case here prologue_allocate_stack and epilogue_deallocate_stack.
6359 Although they are not classical single set, the second set is there just
6360 to protect it from moving past FP-relative stack accesses. */
6361 switch (recog_memoized (insn))
6363 case CODE_FOR_prologue_allocate_stack:
6364 case CODE_FOR_epilogue_deallocate_stack:
6365 ret = XVECEXP (x, 0, 0);
6369 ret = single_set_2 (insn, x);
6376 /* Adjust the cost of a scheduling dependency. Return the new cost of
6377 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
6380 ia64_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
6382 enum attr_itanium_class dep_class;
6383 enum attr_itanium_class insn_class;
6385 if (REG_NOTE_KIND (link) != REG_DEP_OUTPUT)
6388 insn_class = ia64_safe_itanium_class (insn);
6389 dep_class = ia64_safe_itanium_class (dep_insn);
6390 if (dep_class == ITANIUM_CLASS_ST || dep_class == ITANIUM_CLASS_STF
6391 || insn_class == ITANIUM_CLASS_ST || insn_class == ITANIUM_CLASS_STF)
6397 /* Like emit_insn_before, but skip cycle_display notes.
6398 ??? When cycle display notes are implemented, update this. */
6401 ia64_emit_insn_before (rtx insn, rtx before)
6403 emit_insn_before (insn, before);
6406 /* The following function marks insns who produce addresses for load
6407 and store insns. Such insns will be placed into M slots because it
6408 decrease latency time for Itanium1 (see function
6409 `ia64_produce_address_p' and the DFA descriptions). */
6412 ia64_dependencies_evaluation_hook (rtx head, rtx tail)
6414 rtx insn, next, next_tail;
6416 /* Before reload, which_alternative is not set, which means that
6417 ia64_safe_itanium_class will produce wrong results for (at least)
6418 move instructions. */
6419 if (!reload_completed)
6422 next_tail = NEXT_INSN (tail);
6423 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6426 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
6428 && ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IALU)
6430 sd_iterator_def sd_it;
6432 bool has_mem_op_consumer_p = false;
6434 FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep)
6436 enum attr_itanium_class c;
6438 if (DEP_TYPE (dep) != REG_DEP_TRUE)
6441 next = DEP_CON (dep);
6442 c = ia64_safe_itanium_class (next);
6443 if ((c == ITANIUM_CLASS_ST
6444 || c == ITANIUM_CLASS_STF)
6445 && ia64_st_address_bypass_p (insn, next))
6447 has_mem_op_consumer_p = true;
6450 else if ((c == ITANIUM_CLASS_LD
6451 || c == ITANIUM_CLASS_FLD
6452 || c == ITANIUM_CLASS_FLDP)
6453 && ia64_ld_address_bypass_p (insn, next))
6455 has_mem_op_consumer_p = true;
6460 insn->call = has_mem_op_consumer_p;
6464 /* We're beginning a new block. Initialize data structures as necessary. */
6467 ia64_sched_init (FILE *dump ATTRIBUTE_UNUSED,
6468 int sched_verbose ATTRIBUTE_UNUSED,
6469 int max_ready ATTRIBUTE_UNUSED)
6471 #ifdef ENABLE_CHECKING
6474 if (reload_completed)
6475 for (insn = NEXT_INSN (current_sched_info->prev_head);
6476 insn != current_sched_info->next_tail;
6477 insn = NEXT_INSN (insn))
6478 gcc_assert (!SCHED_GROUP_P (insn));
6480 last_scheduled_insn = NULL_RTX;
6481 init_insn_group_barriers ();
6484 /* We're beginning a scheduling pass. Check assertion. */
6487 ia64_sched_init_global (FILE *dump ATTRIBUTE_UNUSED,
6488 int sched_verbose ATTRIBUTE_UNUSED,
6489 int max_ready ATTRIBUTE_UNUSED)
6491 gcc_assert (!pending_data_specs);
6494 /* Scheduling pass is now finished. Free/reset static variable. */
6496 ia64_sched_finish_global (FILE *dump ATTRIBUTE_UNUSED,
6497 int sched_verbose ATTRIBUTE_UNUSED)
6499 free (spec_check_no);
6504 /* We are about to being issuing insns for this clock cycle.
6505 Override the default sort algorithm to better slot instructions. */
6508 ia64_dfa_sched_reorder (FILE *dump, int sched_verbose, rtx *ready,
6509 int *pn_ready, int clock_var ATTRIBUTE_UNUSED,
6513 int n_ready = *pn_ready;
6514 rtx *e_ready = ready + n_ready;
6518 fprintf (dump, "// ia64_dfa_sched_reorder (type %d):\n", reorder_type);
6520 if (reorder_type == 0)
6522 /* First, move all USEs, CLOBBERs and other crud out of the way. */
6524 for (insnp = ready; insnp < e_ready; insnp++)
6525 if (insnp < e_ready)
6528 enum attr_type t = ia64_safe_type (insn);
6529 if (t == TYPE_UNKNOWN)
6531 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
6532 || asm_noperands (PATTERN (insn)) >= 0)
6534 rtx lowest = ready[n_asms];
6535 ready[n_asms] = insn;
6541 rtx highest = ready[n_ready - 1];
6542 ready[n_ready - 1] = insn;
6549 if (n_asms < n_ready)
6551 /* Some normal insns to process. Skip the asms. */
6555 else if (n_ready > 0)
6559 if (ia64_final_schedule)
6562 int nr_need_stop = 0;
6564 for (insnp = ready; insnp < e_ready; insnp++)
6565 if (safe_group_barrier_needed (*insnp))
6568 if (reorder_type == 1 && n_ready == nr_need_stop)
6570 if (reorder_type == 0)
6573 /* Move down everything that needs a stop bit, preserving
6575 while (insnp-- > ready + deleted)
6576 while (insnp >= ready + deleted)
6579 if (! safe_group_barrier_needed (insn))
6581 memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
6592 /* We are about to being issuing insns for this clock cycle. Override
6593 the default sort algorithm to better slot instructions. */
6596 ia64_sched_reorder (FILE *dump, int sched_verbose, rtx *ready, int *pn_ready,
6599 return ia64_dfa_sched_reorder (dump, sched_verbose, ready,
6600 pn_ready, clock_var, 0);
6603 /* Like ia64_sched_reorder, but called after issuing each insn.
6604 Override the default sort algorithm to better slot instructions. */
6607 ia64_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
6608 int sched_verbose ATTRIBUTE_UNUSED, rtx *ready,
6609 int *pn_ready, int clock_var)
6611 if (ia64_tune == PROCESSOR_ITANIUM && reload_completed && last_scheduled_insn)
6612 clocks [INSN_UID (last_scheduled_insn)] = clock_var;
6613 return ia64_dfa_sched_reorder (dump, sched_verbose, ready, pn_ready,
6617 /* We are about to issue INSN. Return the number of insns left on the
6618 ready queue that can be issued this cycle. */
6621 ia64_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
6622 int sched_verbose ATTRIBUTE_UNUSED,
6623 rtx insn ATTRIBUTE_UNUSED,
6624 int can_issue_more ATTRIBUTE_UNUSED)
6626 if (current_sched_info->flags & DO_SPECULATION)
6627 /* Modulo scheduling does not extend h_i_d when emitting
6628 new instructions. Deal with it. */
6630 if (DONE_SPEC (insn) & BEGIN_DATA)
6631 pending_data_specs++;
6632 if (CHECK_SPEC (insn) & BEGIN_DATA)
6633 pending_data_specs--;
6636 last_scheduled_insn = insn;
6637 memcpy (prev_cycle_state, curr_state, dfa_state_size);
6638 if (reload_completed)
6640 int needed = group_barrier_needed (insn);
6642 gcc_assert (!needed);
6643 if (GET_CODE (insn) == CALL_INSN)
6644 init_insn_group_barriers ();
6645 stops_p [INSN_UID (insn)] = stop_before_p;
6651 /* We are choosing insn from the ready queue. Return nonzero if INSN
6655 ia64_first_cycle_multipass_dfa_lookahead_guard (rtx insn)
6657 gcc_assert (insn && INSN_P (insn));
6658 return ((!reload_completed
6659 || !safe_group_barrier_needed (insn))
6660 && ia64_first_cycle_multipass_dfa_lookahead_guard_spec (insn));
6663 /* We are choosing insn from the ready queue. Return nonzero if INSN
6667 ia64_first_cycle_multipass_dfa_lookahead_guard_spec (const_rtx insn)
6669 gcc_assert (insn && INSN_P (insn));
6670 /* Size of ALAT is 32. As far as we perform conservative data speculation,
6671 we keep ALAT half-empty. */
6672 return (pending_data_specs < 16
6673 || !(TODO_SPEC (insn) & BEGIN_DATA));
6676 /* The following variable value is pseudo-insn used by the DFA insn
6677 scheduler to change the DFA state when the simulated clock is
6680 static rtx dfa_pre_cycle_insn;
6682 /* We are about to being issuing INSN. Return nonzero if we cannot
6683 issue it on given cycle CLOCK and return zero if we should not sort
6684 the ready queue on the next clock start. */
6687 ia64_dfa_new_cycle (FILE *dump, int verbose, rtx insn, int last_clock,
6688 int clock, int *sort_p)
6690 int setup_clocks_p = FALSE;
6692 gcc_assert (insn && INSN_P (insn));
6693 if ((reload_completed && safe_group_barrier_needed (insn))
6694 || (last_scheduled_insn
6695 && (GET_CODE (last_scheduled_insn) == CALL_INSN
6696 || GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6697 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)))
6699 init_insn_group_barriers ();
6700 if (verbose && dump)
6701 fprintf (dump, "// Stop should be before %d%s\n", INSN_UID (insn),
6702 last_clock == clock ? " + cycle advance" : "");
6704 if (last_clock == clock)
6706 state_transition (curr_state, dfa_stop_insn);
6707 if (TARGET_EARLY_STOP_BITS)
6708 *sort_p = (last_scheduled_insn == NULL_RTX
6709 || GET_CODE (last_scheduled_insn) != CALL_INSN);
6714 else if (reload_completed)
6715 setup_clocks_p = TRUE;
6716 if (GET_CODE (PATTERN (last_scheduled_insn)) == ASM_INPUT
6717 || asm_noperands (PATTERN (last_scheduled_insn)) >= 0)
6718 state_reset (curr_state);
6721 memcpy (curr_state, prev_cycle_state, dfa_state_size);
6722 state_transition (curr_state, dfa_stop_insn);
6723 state_transition (curr_state, dfa_pre_cycle_insn);
6724 state_transition (curr_state, NULL);
6727 else if (reload_completed)
6728 setup_clocks_p = TRUE;
6729 if (setup_clocks_p && ia64_tune == PROCESSOR_ITANIUM
6730 && GET_CODE (PATTERN (insn)) != ASM_INPUT
6731 && asm_noperands (PATTERN (insn)) < 0)
6733 enum attr_itanium_class c = ia64_safe_itanium_class (insn);
6735 if (c != ITANIUM_CLASS_MMMUL && c != ITANIUM_CLASS_MMSHF)
6737 sd_iterator_def sd_it;
6741 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
6742 if (DEP_TYPE (dep) == REG_DEP_TRUE)
6744 enum attr_itanium_class dep_class;
6745 rtx dep_insn = DEP_PRO (dep);
6747 dep_class = ia64_safe_itanium_class (dep_insn);
6748 if ((dep_class == ITANIUM_CLASS_MMMUL
6749 || dep_class == ITANIUM_CLASS_MMSHF)
6750 && last_clock - clocks [INSN_UID (dep_insn)] < 4
6752 || last_clock - clocks [INSN_UID (dep_insn)] < d))
6753 d = last_clock - clocks [INSN_UID (dep_insn)];
6756 add_cycles [INSN_UID (insn)] = 3 - d;
6762 /* Implement targetm.sched.h_i_d_extended hook.
6763 Extend internal data structures. */
6765 ia64_h_i_d_extended (void)
6767 if (current_sched_info->flags & DO_SPECULATION)
6769 int new_max_uid = get_max_uid () + 1;
6771 spec_check_no = (int *) xrecalloc (spec_check_no, new_max_uid,
6772 max_uid, sizeof (*spec_check_no));
6773 max_uid = new_max_uid;
6776 if (stops_p != NULL)
6778 int new_clocks_length = get_max_uid () + 1;
6780 stops_p = (char *) xrecalloc (stops_p, new_clocks_length, clocks_length, 1);
6782 if (ia64_tune == PROCESSOR_ITANIUM)
6784 clocks = (int *) xrecalloc (clocks, new_clocks_length, clocks_length,
6786 add_cycles = (int *) xrecalloc (add_cycles, new_clocks_length,
6787 clocks_length, sizeof (int));
6790 clocks_length = new_clocks_length;
6794 /* Constants that help mapping 'enum machine_mode' to int. */
6797 SPEC_MODE_INVALID = -1,
6798 SPEC_MODE_FIRST = 0,
6799 SPEC_MODE_FOR_EXTEND_FIRST = 1,
6800 SPEC_MODE_FOR_EXTEND_LAST = 3,
6804 /* Return index of the MODE. */
6806 ia64_mode_to_int (enum machine_mode mode)
6810 case BImode: return 0; /* SPEC_MODE_FIRST */
6811 case QImode: return 1; /* SPEC_MODE_FOR_EXTEND_FIRST */
6812 case HImode: return 2;
6813 case SImode: return 3; /* SPEC_MODE_FOR_EXTEND_LAST */
6814 case DImode: return 4;
6815 case SFmode: return 5;
6816 case DFmode: return 6;
6817 case XFmode: return 7;
6819 /* ??? This mode needs testing. Bypasses for ldfp8 instruction are not
6820 mentioned in itanium[12].md. Predicate fp_register_operand also
6821 needs to be defined. Bottom line: better disable for now. */
6822 return SPEC_MODE_INVALID;
6823 default: return SPEC_MODE_INVALID;
6827 /* Provide information about speculation capabilities. */
6829 ia64_set_sched_flags (spec_info_t spec_info)
6831 unsigned int *flags = &(current_sched_info->flags);
6833 if (*flags & SCHED_RGN
6834 || *flags & SCHED_EBB)
6838 if ((mflag_sched_br_data_spec && !reload_completed && optimize > 0)
6839 || (mflag_sched_ar_data_spec && reload_completed))
6843 if ((mflag_sched_br_in_data_spec && !reload_completed)
6844 || (mflag_sched_ar_in_data_spec && reload_completed))
6848 if (mflag_sched_control_spec)
6850 mask |= BEGIN_CONTROL;
6852 if (mflag_sched_in_control_spec)
6853 mask |= BE_IN_CONTROL;
6858 *flags |= USE_DEPS_LIST | DO_SPECULATION;
6860 if (mask & BE_IN_SPEC)
6863 spec_info->mask = mask;
6864 spec_info->flags = 0;
6866 if ((mask & DATA_SPEC) && mflag_sched_prefer_non_data_spec_insns)
6867 spec_info->flags |= PREFER_NON_DATA_SPEC;
6869 if ((mask & CONTROL_SPEC)
6870 && mflag_sched_prefer_non_control_spec_insns)
6871 spec_info->flags |= PREFER_NON_CONTROL_SPEC;
6873 if (mflag_sched_spec_verbose)
6875 if (sched_verbose >= 1)
6876 spec_info->dump = sched_dump;
6878 spec_info->dump = stderr;
6881 spec_info->dump = 0;
6883 if (mflag_sched_count_spec_in_critical_path)
6884 spec_info->flags |= COUNT_SPEC_IN_CRITICAL_PATH;
6889 /* Implement targetm.sched.speculate_insn hook.
6890 Check if the INSN can be TS speculative.
6891 If 'no' - return -1.
6892 If 'yes' - generate speculative pattern in the NEW_PAT and return 1.
6893 If current pattern of the INSN already provides TS speculation, return 0. */
6895 ia64_speculate_insn (rtx insn, ds_t ts, rtx *new_pat)
6897 rtx pat, reg, mem, mem_reg;
6898 int mode_no, gen_p = 1;
6901 gcc_assert (!(ts & ~BEGIN_SPEC) && ts);
6903 pat = PATTERN (insn);
6905 if (GET_CODE (pat) == COND_EXEC)
6906 pat = COND_EXEC_CODE (pat);
6908 /* This should be a SET ... */
6909 if (GET_CODE (pat) != SET)
6912 reg = SET_DEST (pat);
6913 /* ... to the general/fp register ... */
6914 if (!REG_P (reg) || !(GR_REGNO_P (REGNO (reg)) || FP_REGNO_P (REGNO (reg))))
6917 /* ... from the mem ... */
6918 mem = SET_SRC (pat);
6920 /* ... that can, possibly, be a zero_extend ... */
6921 if (GET_CODE (mem) == ZERO_EXTEND)
6923 mem = XEXP (mem, 0);
6929 /* ... or a speculative load. */
6930 if (GET_CODE (mem) == UNSPEC)
6934 code = XINT (mem, 1);
6935 if (code != UNSPEC_LDA && code != UNSPEC_LDS && code != UNSPEC_LDSA)
6938 if ((code == UNSPEC_LDA && !(ts & BEGIN_CONTROL))
6939 || (code == UNSPEC_LDS && !(ts & BEGIN_DATA))
6940 || code == UNSPEC_LDSA)
6943 mem = XVECEXP (mem, 0, 0);
6944 gcc_assert (MEM_P (mem));
6947 /* Source should be a mem ... */
6951 /* ... addressed by a register. */
6952 mem_reg = XEXP (mem, 0);
6953 if (!REG_P (mem_reg))
6956 /* We should use MEM's mode since REG's mode in presence of ZERO_EXTEND
6957 will always be DImode. */
6958 mode_no = ia64_mode_to_int (GET_MODE (mem));
6960 if (mode_no == SPEC_MODE_INVALID
6962 && !(SPEC_MODE_FOR_EXTEND_FIRST <= mode_no
6963 && mode_no <= SPEC_MODE_FOR_EXTEND_LAST)))
6966 extract_insn_cached (insn);
6967 gcc_assert (reg == recog_data.operand[0] && mem == recog_data.operand[1]);
6969 *new_pat = ia64_gen_spec_insn (insn, ts, mode_no, gen_p != 0, extend_p);
6976 /* Offset to reach ZERO_EXTEND patterns. */
6977 SPEC_GEN_EXTEND_OFFSET = SPEC_MODE_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 1,
6978 /* Number of patterns for each speculation mode. */
6979 SPEC_N = (SPEC_MODE_LAST
6980 + SPEC_MODE_FOR_EXTEND_LAST - SPEC_MODE_FOR_EXTEND_FIRST + 2)
6983 enum SPEC_GEN_LD_MAP
6985 /* Offset to ld.a patterns. */
6986 SPEC_GEN_A = 0 * SPEC_N,
6987 /* Offset to ld.s patterns. */
6988 SPEC_GEN_S = 1 * SPEC_N,
6989 /* Offset to ld.sa patterns. */
6990 SPEC_GEN_SA = 2 * SPEC_N,
6991 /* Offset to ld.sa patterns. For this patterns corresponding ld.c will
6993 SPEC_GEN_SA_FOR_S = 3 * SPEC_N
6996 /* These offsets are used to get (4 * SPEC_N). */
6997 enum SPEC_GEN_CHECK_OFFSET
6999 SPEC_GEN_CHKA_FOR_A_OFFSET = 4 * SPEC_N - SPEC_GEN_A,
7000 SPEC_GEN_CHKA_FOR_SA_OFFSET = 4 * SPEC_N - SPEC_GEN_SA
7003 /* If GEN_P is true, calculate the index of needed speculation check and return
7004 speculative pattern for INSN with speculative mode TS, machine mode
7005 MODE_NO and with ZERO_EXTEND (if EXTEND_P is true).
7006 If GEN_P is false, just calculate the index of needed speculation check. */
7008 ia64_gen_spec_insn (rtx insn, ds_t ts, int mode_no, bool gen_p, bool extend_p)
7014 static rtx (* const gen_load[]) (rtx, rtx) = {
7024 gen_zero_extendqidi2_advanced,
7025 gen_zero_extendhidi2_advanced,
7026 gen_zero_extendsidi2_advanced,
7028 gen_movbi_speculative,
7029 gen_movqi_speculative,
7030 gen_movhi_speculative,
7031 gen_movsi_speculative,
7032 gen_movdi_speculative,
7033 gen_movsf_speculative,
7034 gen_movdf_speculative,
7035 gen_movxf_speculative,
7036 gen_movti_speculative,
7037 gen_zero_extendqidi2_speculative,
7038 gen_zero_extendhidi2_speculative,
7039 gen_zero_extendsidi2_speculative,
7041 gen_movbi_speculative_advanced,
7042 gen_movqi_speculative_advanced,
7043 gen_movhi_speculative_advanced,
7044 gen_movsi_speculative_advanced,
7045 gen_movdi_speculative_advanced,
7046 gen_movsf_speculative_advanced,
7047 gen_movdf_speculative_advanced,
7048 gen_movxf_speculative_advanced,
7049 gen_movti_speculative_advanced,
7050 gen_zero_extendqidi2_speculative_advanced,
7051 gen_zero_extendhidi2_speculative_advanced,
7052 gen_zero_extendsidi2_speculative_advanced,
7054 gen_movbi_speculative_advanced,
7055 gen_movqi_speculative_advanced,
7056 gen_movhi_speculative_advanced,
7057 gen_movsi_speculative_advanced,
7058 gen_movdi_speculative_advanced,
7059 gen_movsf_speculative_advanced,
7060 gen_movdf_speculative_advanced,
7061 gen_movxf_speculative_advanced,
7062 gen_movti_speculative_advanced,
7063 gen_zero_extendqidi2_speculative_advanced,
7064 gen_zero_extendhidi2_speculative_advanced,
7065 gen_zero_extendsidi2_speculative_advanced
7068 load_no = extend_p ? mode_no + SPEC_GEN_EXTEND_OFFSET : mode_no;
7070 if (ts & BEGIN_DATA)
7072 /* We don't need recovery because even if this is ld.sa
7073 ALAT entry will be allocated only if NAT bit is set to zero.
7074 So it is enough to use ld.c here. */
7076 if (ts & BEGIN_CONTROL)
7078 load_no += SPEC_GEN_SA;
7080 if (!mflag_sched_ldc)
7081 shift = SPEC_GEN_CHKA_FOR_SA_OFFSET;
7085 load_no += SPEC_GEN_A;
7087 if (!mflag_sched_ldc)
7088 shift = SPEC_GEN_CHKA_FOR_A_OFFSET;
7091 else if (ts & BEGIN_CONTROL)
7093 /* ld.sa can be used instead of ld.s to avoid basic block splitting. */
7094 if (!mflag_control_ldc)
7095 load_no += SPEC_GEN_S;
7098 gcc_assert (mflag_sched_ldc);
7099 load_no += SPEC_GEN_SA_FOR_S;
7105 /* Set the desired check index. We add '1', because zero element in this
7106 array means, that instruction with such uid is non-speculative. */
7107 spec_check_no[INSN_UID (insn)] = load_no + shift + 1;
7112 new_pat = gen_load[load_no] (copy_rtx (recog_data.operand[0]),
7113 copy_rtx (recog_data.operand[1]));
7115 pat = PATTERN (insn);
7116 if (GET_CODE (pat) == COND_EXEC)
7117 new_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx
7118 (COND_EXEC_TEST (pat)), new_pat);
7123 /* Offset to branchy checks. */
7124 enum { SPEC_GEN_CHECK_MUTATION_OFFSET = 5 * SPEC_N };
7126 /* Return nonzero, if INSN needs branchy recovery check. */
7128 ia64_needs_block_p (const_rtx insn)
7132 check_no = spec_check_no[INSN_UID(insn)] - 1;
7133 gcc_assert (0 <= check_no && check_no < SPEC_GEN_CHECK_MUTATION_OFFSET);
7135 return ((SPEC_GEN_S <= check_no && check_no < SPEC_GEN_S + SPEC_N)
7136 || (4 * SPEC_N <= check_no && check_no < 4 * SPEC_N + SPEC_N));
7139 /* Generate (or regenerate, if (MUTATE_P)) recovery check for INSN.
7140 If (LABEL != 0 || MUTATE_P), generate branchy recovery check.
7141 Otherwise, generate a simple check. */
7143 ia64_gen_check (rtx insn, rtx label, bool mutate_p)
7145 rtx op1, pat, check_pat;
7147 static rtx (* const gen_check[]) (rtx, rtx) = {
7157 gen_zero_extendqidi2_clr,
7158 gen_zero_extendhidi2_clr,
7159 gen_zero_extendsidi2_clr,
7161 gen_speculation_check_bi,
7162 gen_speculation_check_qi,
7163 gen_speculation_check_hi,
7164 gen_speculation_check_si,
7165 gen_speculation_check_di,
7166 gen_speculation_check_sf,
7167 gen_speculation_check_df,
7168 gen_speculation_check_xf,
7169 gen_speculation_check_ti,
7170 gen_speculation_check_di,
7171 gen_speculation_check_di,
7172 gen_speculation_check_di,
7183 gen_zero_extendqidi2_clr,
7184 gen_zero_extendhidi2_clr,
7185 gen_zero_extendsidi2_clr,
7196 gen_zero_extendqidi2_clr,
7197 gen_zero_extendhidi2_clr,
7198 gen_zero_extendsidi2_clr,
7200 gen_advanced_load_check_clr_bi,
7201 gen_advanced_load_check_clr_qi,
7202 gen_advanced_load_check_clr_hi,
7203 gen_advanced_load_check_clr_si,
7204 gen_advanced_load_check_clr_di,
7205 gen_advanced_load_check_clr_sf,
7206 gen_advanced_load_check_clr_df,
7207 gen_advanced_load_check_clr_xf,
7208 gen_advanced_load_check_clr_ti,
7209 gen_advanced_load_check_clr_di,
7210 gen_advanced_load_check_clr_di,
7211 gen_advanced_load_check_clr_di,
7213 /* Following checks are generated during mutation. */
7214 gen_advanced_load_check_clr_bi,
7215 gen_advanced_load_check_clr_qi,
7216 gen_advanced_load_check_clr_hi,
7217 gen_advanced_load_check_clr_si,
7218 gen_advanced_load_check_clr_di,
7219 gen_advanced_load_check_clr_sf,
7220 gen_advanced_load_check_clr_df,
7221 gen_advanced_load_check_clr_xf,
7222 gen_advanced_load_check_clr_ti,
7223 gen_advanced_load_check_clr_di,
7224 gen_advanced_load_check_clr_di,
7225 gen_advanced_load_check_clr_di,
7227 0,0,0,0,0,0,0,0,0,0,0,0,
7229 gen_advanced_load_check_clr_bi,
7230 gen_advanced_load_check_clr_qi,
7231 gen_advanced_load_check_clr_hi,
7232 gen_advanced_load_check_clr_si,
7233 gen_advanced_load_check_clr_di,
7234 gen_advanced_load_check_clr_sf,
7235 gen_advanced_load_check_clr_df,
7236 gen_advanced_load_check_clr_xf,
7237 gen_advanced_load_check_clr_ti,
7238 gen_advanced_load_check_clr_di,
7239 gen_advanced_load_check_clr_di,
7240 gen_advanced_load_check_clr_di,
7242 gen_speculation_check_bi,
7243 gen_speculation_check_qi,
7244 gen_speculation_check_hi,
7245 gen_speculation_check_si,
7246 gen_speculation_check_di,
7247 gen_speculation_check_sf,
7248 gen_speculation_check_df,
7249 gen_speculation_check_xf,
7250 gen_speculation_check_ti,
7251 gen_speculation_check_di,
7252 gen_speculation_check_di,
7253 gen_speculation_check_di
7256 extract_insn_cached (insn);
7260 gcc_assert (mutate_p || ia64_needs_block_p (insn));
7265 gcc_assert (!mutate_p && !ia64_needs_block_p (insn));
7266 op1 = copy_rtx (recog_data.operand[1]);
7271 Find the speculation check number by searching for original
7272 speculative load in the RESOLVED_DEPS list of INSN.
7273 As long as patterns are unique for each instruction, this can be
7274 accomplished by matching ORIG_PAT fields. */
7276 sd_iterator_def sd_it;
7279 rtx orig_pat = ORIG_PAT (insn);
7281 FOR_EACH_DEP (insn, SD_LIST_RES_BACK, sd_it, dep)
7283 rtx x = DEP_PRO (dep);
7285 if (ORIG_PAT (x) == orig_pat)
7286 check_no = spec_check_no[INSN_UID (x)];
7288 gcc_assert (check_no);
7290 spec_check_no[INSN_UID (insn)] = (check_no
7291 + SPEC_GEN_CHECK_MUTATION_OFFSET);
7294 check_pat = (gen_check[spec_check_no[INSN_UID (insn)] - 1]
7295 (copy_rtx (recog_data.operand[0]), op1));
7297 pat = PATTERN (insn);
7298 if (GET_CODE (pat) == COND_EXEC)
7299 check_pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (pat)),
7305 /* Return nonzero, if X is branchy recovery check. */
7307 ia64_spec_check_p (rtx x)
7310 if (GET_CODE (x) == COND_EXEC)
7311 x = COND_EXEC_CODE (x);
7312 if (GET_CODE (x) == SET)
7313 return ia64_spec_check_src_p (SET_SRC (x));
7317 /* Return nonzero, if SRC belongs to recovery check. */
7319 ia64_spec_check_src_p (rtx src)
7321 if (GET_CODE (src) == IF_THEN_ELSE)
7326 if (GET_CODE (t) == NE)
7330 if (GET_CODE (t) == UNSPEC)
7336 if (code == UNSPEC_CHKACLR
7337 || code == UNSPEC_CHKS
7338 || code == UNSPEC_LDCCLR)
7340 gcc_assert (code != 0);
7350 /* The following page contains abstract data `bundle states' which are
7351 used for bundling insns (inserting nops and template generation). */
7353 /* The following describes state of insn bundling. */
7357 /* Unique bundle state number to identify them in the debugging
7360 rtx insn; /* corresponding insn, NULL for the 1st and the last state */
7361 /* number nops before and after the insn */
7362 short before_nops_num, after_nops_num;
7363 int insn_num; /* insn number (0 - for initial state, 1 - for the 1st
7365 int cost; /* cost of the state in cycles */
7366 int accumulated_insns_num; /* number of all previous insns including
7367 nops. L is considered as 2 insns */
7368 int branch_deviation; /* deviation of previous branches from 3rd slots */
7369 struct bundle_state *next; /* next state with the same insn_num */
7370 struct bundle_state *originator; /* originator (previous insn state) */
7371 /* All bundle states are in the following chain. */
7372 struct bundle_state *allocated_states_chain;
7373 /* The DFA State after issuing the insn and the nops. */
7377 /* The following is map insn number to the corresponding bundle state. */
7379 static struct bundle_state **index_to_bundle_states;
7381 /* The unique number of next bundle state. */
7383 static int bundle_states_num;
7385 /* All allocated bundle states are in the following chain. */
7387 static struct bundle_state *allocated_bundle_states_chain;
7389 /* All allocated but not used bundle states are in the following
7392 static struct bundle_state *free_bundle_state_chain;
7395 /* The following function returns a free bundle state. */
7397 static struct bundle_state *
7398 get_free_bundle_state (void)
7400 struct bundle_state *result;
7402 if (free_bundle_state_chain != NULL)
7404 result = free_bundle_state_chain;
7405 free_bundle_state_chain = result->next;
7409 result = XNEW (struct bundle_state);
7410 result->dfa_state = xmalloc (dfa_state_size);
7411 result->allocated_states_chain = allocated_bundle_states_chain;
7412 allocated_bundle_states_chain = result;
7414 result->unique_num = bundle_states_num++;
7419 /* The following function frees given bundle state. */
7422 free_bundle_state (struct bundle_state *state)
7424 state->next = free_bundle_state_chain;
7425 free_bundle_state_chain = state;
7428 /* Start work with abstract data `bundle states'. */
7431 initiate_bundle_states (void)
7433 bundle_states_num = 0;
7434 free_bundle_state_chain = NULL;
7435 allocated_bundle_states_chain = NULL;
7438 /* Finish work with abstract data `bundle states'. */
7441 finish_bundle_states (void)
7443 struct bundle_state *curr_state, *next_state;
7445 for (curr_state = allocated_bundle_states_chain;
7447 curr_state = next_state)
7449 next_state = curr_state->allocated_states_chain;
7450 free (curr_state->dfa_state);
7455 /* Hash table of the bundle states. The key is dfa_state and insn_num
7456 of the bundle states. */
7458 static htab_t bundle_state_table;
7460 /* The function returns hash of BUNDLE_STATE. */
7463 bundle_state_hash (const void *bundle_state)
7465 const struct bundle_state *const state
7466 = (const struct bundle_state *) bundle_state;
7469 for (result = i = 0; i < dfa_state_size; i++)
7470 result += (((unsigned char *) state->dfa_state) [i]
7471 << ((i % CHAR_BIT) * 3 + CHAR_BIT));
7472 return result + state->insn_num;
7475 /* The function returns nonzero if the bundle state keys are equal. */
7478 bundle_state_eq_p (const void *bundle_state_1, const void *bundle_state_2)
7480 const struct bundle_state *const state1
7481 = (const struct bundle_state *) bundle_state_1;
7482 const struct bundle_state *const state2
7483 = (const struct bundle_state *) bundle_state_2;
7485 return (state1->insn_num == state2->insn_num
7486 && memcmp (state1->dfa_state, state2->dfa_state,
7487 dfa_state_size) == 0);
7490 /* The function inserts the BUNDLE_STATE into the hash table. The
7491 function returns nonzero if the bundle has been inserted into the
7492 table. The table contains the best bundle state with given key. */
7495 insert_bundle_state (struct bundle_state *bundle_state)
7499 entry_ptr = htab_find_slot (bundle_state_table, bundle_state, 1);
7500 if (*entry_ptr == NULL)
7502 bundle_state->next = index_to_bundle_states [bundle_state->insn_num];
7503 index_to_bundle_states [bundle_state->insn_num] = bundle_state;
7504 *entry_ptr = (void *) bundle_state;
7507 else if (bundle_state->cost < ((struct bundle_state *) *entry_ptr)->cost
7508 || (bundle_state->cost == ((struct bundle_state *) *entry_ptr)->cost
7509 && (((struct bundle_state *)*entry_ptr)->accumulated_insns_num
7510 > bundle_state->accumulated_insns_num
7511 || (((struct bundle_state *)
7512 *entry_ptr)->accumulated_insns_num
7513 == bundle_state->accumulated_insns_num
7514 && ((struct bundle_state *)
7515 *entry_ptr)->branch_deviation
7516 > bundle_state->branch_deviation))))
7519 struct bundle_state temp;
7521 temp = *(struct bundle_state *) *entry_ptr;
7522 *(struct bundle_state *) *entry_ptr = *bundle_state;
7523 ((struct bundle_state *) *entry_ptr)->next = temp.next;
7524 *bundle_state = temp;
7529 /* Start work with the hash table. */
7532 initiate_bundle_state_table (void)
7534 bundle_state_table = htab_create (50, bundle_state_hash, bundle_state_eq_p,
7538 /* Finish work with the hash table. */
7541 finish_bundle_state_table (void)
7543 htab_delete (bundle_state_table);
7548 /* The following variable is a insn `nop' used to check bundle states
7549 with different number of inserted nops. */
7551 static rtx ia64_nop;
7553 /* The following function tries to issue NOPS_NUM nops for the current
7554 state without advancing processor cycle. If it failed, the
7555 function returns FALSE and frees the current state. */
7558 try_issue_nops (struct bundle_state *curr_state, int nops_num)
7562 for (i = 0; i < nops_num; i++)
7563 if (state_transition (curr_state->dfa_state, ia64_nop) >= 0)
7565 free_bundle_state (curr_state);
7571 /* The following function tries to issue INSN for the current
7572 state without advancing processor cycle. If it failed, the
7573 function returns FALSE and frees the current state. */
7576 try_issue_insn (struct bundle_state *curr_state, rtx insn)
7578 if (insn && state_transition (curr_state->dfa_state, insn) >= 0)
7580 free_bundle_state (curr_state);
7586 /* The following function tries to issue BEFORE_NOPS_NUM nops and INSN
7587 starting with ORIGINATOR without advancing processor cycle. If
7588 TRY_BUNDLE_END_P is TRUE, the function also/only (if
7589 ONLY_BUNDLE_END_P is TRUE) tries to issue nops to fill all bundle.
7590 If it was successful, the function creates new bundle state and
7591 insert into the hash table and into `index_to_bundle_states'. */
7594 issue_nops_and_insn (struct bundle_state *originator, int before_nops_num,
7595 rtx insn, int try_bundle_end_p, int only_bundle_end_p)
7597 struct bundle_state *curr_state;
7599 curr_state = get_free_bundle_state ();
7600 memcpy (curr_state->dfa_state, originator->dfa_state, dfa_state_size);
7601 curr_state->insn = insn;
7602 curr_state->insn_num = originator->insn_num + 1;
7603 curr_state->cost = originator->cost;
7604 curr_state->originator = originator;
7605 curr_state->before_nops_num = before_nops_num;
7606 curr_state->after_nops_num = 0;
7607 curr_state->accumulated_insns_num
7608 = originator->accumulated_insns_num + before_nops_num;
7609 curr_state->branch_deviation = originator->branch_deviation;
7611 if (INSN_CODE (insn) == CODE_FOR_insn_group_barrier)
7613 gcc_assert (GET_MODE (insn) != TImode);
7614 if (!try_issue_nops (curr_state, before_nops_num))
7616 if (!try_issue_insn (curr_state, insn))
7618 memcpy (temp_dfa_state, curr_state->dfa_state, dfa_state_size);
7619 if (state_transition (temp_dfa_state, dfa_pre_cycle_insn) >= 0
7620 && curr_state->accumulated_insns_num % 3 != 0)
7622 free_bundle_state (curr_state);
7626 else if (GET_MODE (insn) != TImode)
7628 if (!try_issue_nops (curr_state, before_nops_num))
7630 if (!try_issue_insn (curr_state, insn))
7632 curr_state->accumulated_insns_num++;
7633 gcc_assert (GET_CODE (PATTERN (insn)) != ASM_INPUT
7634 && asm_noperands (PATTERN (insn)) < 0);
7636 if (ia64_safe_type (insn) == TYPE_L)
7637 curr_state->accumulated_insns_num++;
7641 /* If this is an insn that must be first in a group, then don't allow
7642 nops to be emitted before it. Currently, alloc is the only such
7643 supported instruction. */
7644 /* ??? The bundling automatons should handle this for us, but they do
7645 not yet have support for the first_insn attribute. */
7646 if (before_nops_num > 0 && get_attr_first_insn (insn) == FIRST_INSN_YES)
7648 free_bundle_state (curr_state);
7652 state_transition (curr_state->dfa_state, dfa_pre_cycle_insn);
7653 state_transition (curr_state->dfa_state, NULL);
7655 if (!try_issue_nops (curr_state, before_nops_num))
7657 if (!try_issue_insn (curr_state, insn))
7659 curr_state->accumulated_insns_num++;
7660 if (GET_CODE (PATTERN (insn)) == ASM_INPUT
7661 || asm_noperands (PATTERN (insn)) >= 0)
7663 /* Finish bundle containing asm insn. */
7664 curr_state->after_nops_num
7665 = 3 - curr_state->accumulated_insns_num % 3;
7666 curr_state->accumulated_insns_num
7667 += 3 - curr_state->accumulated_insns_num % 3;
7669 else if (ia64_safe_type (insn) == TYPE_L)
7670 curr_state->accumulated_insns_num++;
7672 if (ia64_safe_type (insn) == TYPE_B)
7673 curr_state->branch_deviation
7674 += 2 - (curr_state->accumulated_insns_num - 1) % 3;
7675 if (try_bundle_end_p && curr_state->accumulated_insns_num % 3 != 0)
7677 if (!only_bundle_end_p && insert_bundle_state (curr_state))
7680 struct bundle_state *curr_state1;
7681 struct bundle_state *allocated_states_chain;
7683 curr_state1 = get_free_bundle_state ();
7684 dfa_state = curr_state1->dfa_state;
7685 allocated_states_chain = curr_state1->allocated_states_chain;
7686 *curr_state1 = *curr_state;
7687 curr_state1->dfa_state = dfa_state;
7688 curr_state1->allocated_states_chain = allocated_states_chain;
7689 memcpy (curr_state1->dfa_state, curr_state->dfa_state,
7691 curr_state = curr_state1;
7693 if (!try_issue_nops (curr_state,
7694 3 - curr_state->accumulated_insns_num % 3))
7696 curr_state->after_nops_num
7697 = 3 - curr_state->accumulated_insns_num % 3;
7698 curr_state->accumulated_insns_num
7699 += 3 - curr_state->accumulated_insns_num % 3;
7701 if (!insert_bundle_state (curr_state))
7702 free_bundle_state (curr_state);
7706 /* The following function returns position in the two window bundle
7710 get_max_pos (state_t state)
7712 if (cpu_unit_reservation_p (state, pos_6))
7714 else if (cpu_unit_reservation_p (state, pos_5))
7716 else if (cpu_unit_reservation_p (state, pos_4))
7718 else if (cpu_unit_reservation_p (state, pos_3))
7720 else if (cpu_unit_reservation_p (state, pos_2))
7722 else if (cpu_unit_reservation_p (state, pos_1))
7728 /* The function returns code of a possible template for given position
7729 and state. The function should be called only with 2 values of
7730 position equal to 3 or 6. We avoid generating F NOPs by putting
7731 templates containing F insns at the end of the template search
7732 because undocumented anomaly in McKinley derived cores which can
7733 cause stalls if an F-unit insn (including a NOP) is issued within a
7734 six-cycle window after reading certain application registers (such
7735 as ar.bsp). Furthermore, power-considerations also argue against
7736 the use of F-unit instructions unless they're really needed. */
7739 get_template (state_t state, int pos)
7744 if (cpu_unit_reservation_p (state, _0mmi_))
7746 else if (cpu_unit_reservation_p (state, _0mii_))
7748 else if (cpu_unit_reservation_p (state, _0mmb_))
7750 else if (cpu_unit_reservation_p (state, _0mib_))
7752 else if (cpu_unit_reservation_p (state, _0mbb_))
7754 else if (cpu_unit_reservation_p (state, _0bbb_))
7756 else if (cpu_unit_reservation_p (state, _0mmf_))
7758 else if (cpu_unit_reservation_p (state, _0mfi_))
7760 else if (cpu_unit_reservation_p (state, _0mfb_))
7762 else if (cpu_unit_reservation_p (state, _0mlx_))
7767 if (cpu_unit_reservation_p (state, _1mmi_))
7769 else if (cpu_unit_reservation_p (state, _1mii_))
7771 else if (cpu_unit_reservation_p (state, _1mmb_))
7773 else if (cpu_unit_reservation_p (state, _1mib_))
7775 else if (cpu_unit_reservation_p (state, _1mbb_))
7777 else if (cpu_unit_reservation_p (state, _1bbb_))
7779 else if (_1mmf_ >= 0 && cpu_unit_reservation_p (state, _1mmf_))
7781 else if (cpu_unit_reservation_p (state, _1mfi_))
7783 else if (cpu_unit_reservation_p (state, _1mfb_))
7785 else if (cpu_unit_reservation_p (state, _1mlx_))
7794 /* The following function returns an insn important for insn bundling
7795 followed by INSN and before TAIL. */
7798 get_next_important_insn (rtx insn, rtx tail)
7800 for (; insn && insn != tail; insn = NEXT_INSN (insn))
7802 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
7803 && GET_CODE (PATTERN (insn)) != USE
7804 && GET_CODE (PATTERN (insn)) != CLOBBER)
7809 /* Add a bundle selector TEMPLATE0 before INSN. */
7812 ia64_add_bundle_selector_before (int template0, rtx insn)
7814 rtx b = gen_bundle_selector (GEN_INT (template0));
7816 ia64_emit_insn_before (b, insn);
7817 #if NR_BUNDLES == 10
7818 if ((template0 == 4 || template0 == 5)
7819 && (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS)))
7822 rtx note = NULL_RTX;
7824 /* In .mbb and .bbb bundles, check if CALL_INSN isn't in the
7825 first or second slot. If it is and has REG_EH_NOTE set, copy it
7826 to following nops, as br.call sets rp to the address of following
7827 bundle and therefore an EH region end must be on a bundle
7829 insn = PREV_INSN (insn);
7830 for (i = 0; i < 3; i++)
7833 insn = next_active_insn (insn);
7834 while (GET_CODE (insn) == INSN
7835 && get_attr_empty (insn) == EMPTY_YES);
7836 if (GET_CODE (insn) == CALL_INSN)
7837 note = find_reg_note (insn, REG_EH_REGION, NULL_RTX);
7842 gcc_assert ((code = recog_memoized (insn)) == CODE_FOR_nop
7843 || code == CODE_FOR_nop_b);
7844 if (find_reg_note (insn, REG_EH_REGION, NULL_RTX))
7848 = gen_rtx_EXPR_LIST (REG_EH_REGION, XEXP (note, 0),
7856 /* The following function does insn bundling. Bundling means
7857 inserting templates and nop insns to fit insn groups into permitted
7858 templates. Instruction scheduling uses NDFA (non-deterministic
7859 finite automata) encoding informations about the templates and the
7860 inserted nops. Nondeterminism of the automata permits follows
7861 all possible insn sequences very fast.
7863 Unfortunately it is not possible to get information about inserting
7864 nop insns and used templates from the automata states. The
7865 automata only says that we can issue an insn possibly inserting
7866 some nops before it and using some template. Therefore insn
7867 bundling in this function is implemented by using DFA
7868 (deterministic finite automata). We follow all possible insn
7869 sequences by inserting 0-2 nops (that is what the NDFA describe for
7870 insn scheduling) before/after each insn being bundled. We know the
7871 start of simulated processor cycle from insn scheduling (insn
7872 starting a new cycle has TImode).
7874 Simple implementation of insn bundling would create enormous
7875 number of possible insn sequences satisfying information about new
7876 cycle ticks taken from the insn scheduling. To make the algorithm
7877 practical we use dynamic programming. Each decision (about
7878 inserting nops and implicitly about previous decisions) is described
7879 by structure bundle_state (see above). If we generate the same
7880 bundle state (key is automaton state after issuing the insns and
7881 nops for it), we reuse already generated one. As consequence we
7882 reject some decisions which cannot improve the solution and
7883 reduce memory for the algorithm.
7885 When we reach the end of EBB (extended basic block), we choose the
7886 best sequence and then, moving back in EBB, insert templates for
7887 the best alternative. The templates are taken from querying
7888 automaton state for each insn in chosen bundle states.
7890 So the algorithm makes two (forward and backward) passes through
7891 EBB. There is an additional forward pass through EBB for Itanium1
7892 processor. This pass inserts more nops to make dependency between
7893 a producer insn and MMMUL/MMSHF at least 4 cycles long. */
7896 bundling (FILE *dump, int verbose, rtx prev_head_insn, rtx tail)
7898 struct bundle_state *curr_state, *next_state, *best_state;
7899 rtx insn, next_insn;
7901 int i, bundle_end_p, only_bundle_end_p, asm_p;
7902 int pos = 0, max_pos, template0, template1;
7905 enum attr_type type;
7908 /* Count insns in the EBB. */
7909 for (insn = NEXT_INSN (prev_head_insn);
7910 insn && insn != tail;
7911 insn = NEXT_INSN (insn))
7917 dfa_clean_insn_cache ();
7918 initiate_bundle_state_table ();
7919 index_to_bundle_states = XNEWVEC (struct bundle_state *, insn_num + 2);
7920 /* First (forward) pass -- generation of bundle states. */
7921 curr_state = get_free_bundle_state ();
7922 curr_state->insn = NULL;
7923 curr_state->before_nops_num = 0;
7924 curr_state->after_nops_num = 0;
7925 curr_state->insn_num = 0;
7926 curr_state->cost = 0;
7927 curr_state->accumulated_insns_num = 0;
7928 curr_state->branch_deviation = 0;
7929 curr_state->next = NULL;
7930 curr_state->originator = NULL;
7931 state_reset (curr_state->dfa_state);
7932 index_to_bundle_states [0] = curr_state;
7934 /* Shift cycle mark if it is put on insn which could be ignored. */
7935 for (insn = NEXT_INSN (prev_head_insn);
7937 insn = NEXT_INSN (insn))
7939 && (ia64_safe_itanium_class (insn) == ITANIUM_CLASS_IGNORE
7940 || GET_CODE (PATTERN (insn)) == USE
7941 || GET_CODE (PATTERN (insn)) == CLOBBER)
7942 && GET_MODE (insn) == TImode)
7944 PUT_MODE (insn, VOIDmode);
7945 for (next_insn = NEXT_INSN (insn);
7947 next_insn = NEXT_INSN (next_insn))
7948 if (INSN_P (next_insn)
7949 && ia64_safe_itanium_class (next_insn) != ITANIUM_CLASS_IGNORE
7950 && GET_CODE (PATTERN (next_insn)) != USE
7951 && GET_CODE (PATTERN (next_insn)) != CLOBBER)
7953 PUT_MODE (next_insn, TImode);
7957 /* Forward pass: generation of bundle states. */
7958 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
7962 gcc_assert (INSN_P (insn)
7963 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
7964 && GET_CODE (PATTERN (insn)) != USE
7965 && GET_CODE (PATTERN (insn)) != CLOBBER);
7966 type = ia64_safe_type (insn);
7967 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
7969 index_to_bundle_states [insn_num] = NULL;
7970 for (curr_state = index_to_bundle_states [insn_num - 1];
7972 curr_state = next_state)
7974 pos = curr_state->accumulated_insns_num % 3;
7975 next_state = curr_state->next;
7976 /* We must fill up the current bundle in order to start a
7977 subsequent asm insn in a new bundle. Asm insn is always
7978 placed in a separate bundle. */
7980 = (next_insn != NULL_RTX
7981 && INSN_CODE (insn) == CODE_FOR_insn_group_barrier
7982 && ia64_safe_type (next_insn) == TYPE_UNKNOWN);
7983 /* We may fill up the current bundle if it is the cycle end
7984 without a group barrier. */
7986 = (only_bundle_end_p || next_insn == NULL_RTX
7987 || (GET_MODE (next_insn) == TImode
7988 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier));
7989 if (type == TYPE_F || type == TYPE_B || type == TYPE_L
7991 /* We need to insert 2 nops for cases like M_MII. To
7992 guarantee issuing all insns on the same cycle for
7993 Itanium 1, we need to issue 2 nops after the first M
7994 insn (MnnMII where n is a nop insn). */
7995 || ((type == TYPE_M || type == TYPE_A)
7996 && ia64_tune == PROCESSOR_ITANIUM
7997 && !bundle_end_p && pos == 1))
7998 issue_nops_and_insn (curr_state, 2, insn, bundle_end_p,
8000 issue_nops_and_insn (curr_state, 1, insn, bundle_end_p,
8002 issue_nops_and_insn (curr_state, 0, insn, bundle_end_p,
8005 gcc_assert (index_to_bundle_states [insn_num]);
8006 for (curr_state = index_to_bundle_states [insn_num];
8008 curr_state = curr_state->next)
8009 if (verbose >= 2 && dump)
8011 /* This structure is taken from generated code of the
8012 pipeline hazard recognizer (see file insn-attrtab.c).
8013 Please don't forget to change the structure if a new
8014 automaton is added to .md file. */
8017 unsigned short one_automaton_state;
8018 unsigned short oneb_automaton_state;
8019 unsigned short two_automaton_state;
8020 unsigned short twob_automaton_state;
8025 "// Bundle state %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
8026 curr_state->unique_num,
8027 (curr_state->originator == NULL
8028 ? -1 : curr_state->originator->unique_num),
8030 curr_state->before_nops_num, curr_state->after_nops_num,
8031 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8032 (ia64_tune == PROCESSOR_ITANIUM
8033 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
8034 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
8039 /* We should find a solution because the 2nd insn scheduling has
8041 gcc_assert (index_to_bundle_states [insn_num]);
8042 /* Find a state corresponding to the best insn sequence. */
8044 for (curr_state = index_to_bundle_states [insn_num];
8046 curr_state = curr_state->next)
8047 /* We are just looking at the states with fully filled up last
8048 bundle. The first we prefer insn sequences with minimal cost
8049 then with minimal inserted nops and finally with branch insns
8050 placed in the 3rd slots. */
8051 if (curr_state->accumulated_insns_num % 3 == 0
8052 && (best_state == NULL || best_state->cost > curr_state->cost
8053 || (best_state->cost == curr_state->cost
8054 && (curr_state->accumulated_insns_num
8055 < best_state->accumulated_insns_num
8056 || (curr_state->accumulated_insns_num
8057 == best_state->accumulated_insns_num
8058 && curr_state->branch_deviation
8059 < best_state->branch_deviation)))))
8060 best_state = curr_state;
8061 /* Second (backward) pass: adding nops and templates. */
8062 insn_num = best_state->before_nops_num;
8063 template0 = template1 = -1;
8064 for (curr_state = best_state;
8065 curr_state->originator != NULL;
8066 curr_state = curr_state->originator)
8068 insn = curr_state->insn;
8069 asm_p = (GET_CODE (PATTERN (insn)) == ASM_INPUT
8070 || asm_noperands (PATTERN (insn)) >= 0);
8072 if (verbose >= 2 && dump)
8076 unsigned short one_automaton_state;
8077 unsigned short oneb_automaton_state;
8078 unsigned short two_automaton_state;
8079 unsigned short twob_automaton_state;
8084 "// Best %d (orig %d, cost %d, nops %d/%d, insns %d, branch %d, state %d) for %d\n",
8085 curr_state->unique_num,
8086 (curr_state->originator == NULL
8087 ? -1 : curr_state->originator->unique_num),
8089 curr_state->before_nops_num, curr_state->after_nops_num,
8090 curr_state->accumulated_insns_num, curr_state->branch_deviation,
8091 (ia64_tune == PROCESSOR_ITANIUM
8092 ? ((struct DFA_chip *) curr_state->dfa_state)->oneb_automaton_state
8093 : ((struct DFA_chip *) curr_state->dfa_state)->twob_automaton_state),
8096 /* Find the position in the current bundle window. The window can
8097 contain at most two bundles. Two bundle window means that
8098 the processor will make two bundle rotation. */
8099 max_pos = get_max_pos (curr_state->dfa_state);
8101 /* The following (negative template number) means that the
8102 processor did one bundle rotation. */
8103 || (max_pos == 3 && template0 < 0))
8105 /* We are at the end of the window -- find template(s) for
8109 template0 = get_template (curr_state->dfa_state, 3);
8112 template1 = get_template (curr_state->dfa_state, 3);
8113 template0 = get_template (curr_state->dfa_state, 6);
8116 if (max_pos > 3 && template1 < 0)
8117 /* It may happen when we have the stop inside a bundle. */
8119 gcc_assert (pos <= 3);
8120 template1 = get_template (curr_state->dfa_state, 3);
8124 /* Emit nops after the current insn. */
8125 for (i = 0; i < curr_state->after_nops_num; i++)
8128 emit_insn_after (nop, insn);
8130 gcc_assert (pos >= 0);
8133 /* We are at the start of a bundle: emit the template
8134 (it should be defined). */
8135 gcc_assert (template0 >= 0);
8136 ia64_add_bundle_selector_before (template0, nop);
8137 /* If we have two bundle window, we make one bundle
8138 rotation. Otherwise template0 will be undefined
8139 (negative value). */
8140 template0 = template1;
8144 /* Move the position backward in the window. Group barrier has
8145 no slot. Asm insn takes all bundle. */
8146 if (INSN_CODE (insn) != CODE_FOR_insn_group_barrier
8147 && GET_CODE (PATTERN (insn)) != ASM_INPUT
8148 && asm_noperands (PATTERN (insn)) < 0)
8150 /* Long insn takes 2 slots. */
8151 if (ia64_safe_type (insn) == TYPE_L)
8153 gcc_assert (pos >= 0);
8155 && INSN_CODE (insn) != CODE_FOR_insn_group_barrier
8156 && GET_CODE (PATTERN (insn)) != ASM_INPUT
8157 && asm_noperands (PATTERN (insn)) < 0)
8159 /* The current insn is at the bundle start: emit the
8161 gcc_assert (template0 >= 0);
8162 ia64_add_bundle_selector_before (template0, insn);
8163 b = PREV_INSN (insn);
8165 /* See comment above in analogous place for emitting nops
8167 template0 = template1;
8170 /* Emit nops after the current insn. */
8171 for (i = 0; i < curr_state->before_nops_num; i++)
8174 ia64_emit_insn_before (nop, insn);
8175 nop = PREV_INSN (insn);
8178 gcc_assert (pos >= 0);
8181 /* See comment above in analogous place for emitting nops
8183 gcc_assert (template0 >= 0);
8184 ia64_add_bundle_selector_before (template0, insn);
8185 b = PREV_INSN (insn);
8187 template0 = template1;
8192 if (ia64_tune == PROCESSOR_ITANIUM)
8193 /* Insert additional cycles for MM-insns (MMMUL and MMSHF).
8194 Itanium1 has a strange design, if the distance between an insn
8195 and dependent MM-insn is less 4 then we have a 6 additional
8196 cycles stall. So we make the distance equal to 4 cycles if it
8198 for (insn = get_next_important_insn (NEXT_INSN (prev_head_insn), tail);
8202 gcc_assert (INSN_P (insn)
8203 && ia64_safe_itanium_class (insn) != ITANIUM_CLASS_IGNORE
8204 && GET_CODE (PATTERN (insn)) != USE
8205 && GET_CODE (PATTERN (insn)) != CLOBBER);
8206 next_insn = get_next_important_insn (NEXT_INSN (insn), tail);
8207 if (INSN_UID (insn) < clocks_length && add_cycles [INSN_UID (insn)])
8208 /* We found a MM-insn which needs additional cycles. */
8214 /* Now we are searching for a template of the bundle in
8215 which the MM-insn is placed and the position of the
8216 insn in the bundle (0, 1, 2). Also we are searching
8217 for that there is a stop before the insn. */
8218 last = prev_active_insn (insn);
8219 pred_stop_p = recog_memoized (last) == CODE_FOR_insn_group_barrier;
8221 last = prev_active_insn (last);
8223 for (;; last = prev_active_insn (last))
8224 if (recog_memoized (last) == CODE_FOR_bundle_selector)
8226 template0 = XINT (XVECEXP (PATTERN (last), 0, 0), 0);
8228 /* The insn is in MLX bundle. Change the template
8229 onto MFI because we will add nops before the
8230 insn. It simplifies subsequent code a lot. */
8232 = gen_bundle_selector (const2_rtx); /* -> MFI */
8235 else if (recog_memoized (last) != CODE_FOR_insn_group_barrier
8236 && (ia64_safe_itanium_class (last)
8237 != ITANIUM_CLASS_IGNORE))
8239 /* Some check of correctness: the stop is not at the
8240 bundle start, there are no more 3 insns in the bundle,
8241 and the MM-insn is not at the start of bundle with
8243 gcc_assert ((!pred_stop_p || n)
8245 && (template0 != 9 || !n));
8246 /* Put nops after the insn in the bundle. */
8247 for (j = 3 - n; j > 0; j --)
8248 ia64_emit_insn_before (gen_nop (), insn);
8249 /* It takes into account that we will add more N nops
8250 before the insn lately -- please see code below. */
8251 add_cycles [INSN_UID (insn)]--;
8252 if (!pred_stop_p || add_cycles [INSN_UID (insn)])
8253 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8256 add_cycles [INSN_UID (insn)]--;
8257 for (i = add_cycles [INSN_UID (insn)]; i > 0; i--)
8259 /* Insert "MII;" template. */
8260 ia64_emit_insn_before (gen_bundle_selector (const0_rtx),
8262 ia64_emit_insn_before (gen_nop (), insn);
8263 ia64_emit_insn_before (gen_nop (), insn);
8266 /* To decrease code size, we use "MI;I;"
8268 ia64_emit_insn_before
8269 (gen_insn_group_barrier (GEN_INT (3)), insn);
8272 ia64_emit_insn_before (gen_nop (), insn);
8273 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8276 /* Put the MM-insn in the same slot of a bundle with the
8277 same template as the original one. */
8278 ia64_add_bundle_selector_before (template0, insn);
8279 /* To put the insn in the same slot, add necessary number
8281 for (j = n; j > 0; j --)
8282 ia64_emit_insn_before (gen_nop (), insn);
8283 /* Put the stop if the original bundle had it. */
8285 ia64_emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8289 free (index_to_bundle_states);
8290 finish_bundle_state_table ();
8292 dfa_clean_insn_cache ();
8295 /* The following function is called at the end of scheduling BB or
8296 EBB. After reload, it inserts stop bits and does insn bundling. */
8299 ia64_sched_finish (FILE *dump, int sched_verbose)
8302 fprintf (dump, "// Finishing schedule.\n");
8303 if (!reload_completed)
8305 if (reload_completed)
8307 final_emit_insn_group_barriers (dump);
8308 bundling (dump, sched_verbose, current_sched_info->prev_head,
8309 current_sched_info->next_tail);
8310 if (sched_verbose && dump)
8311 fprintf (dump, "// finishing %d-%d\n",
8312 INSN_UID (NEXT_INSN (current_sched_info->prev_head)),
8313 INSN_UID (PREV_INSN (current_sched_info->next_tail)));
8319 /* The following function inserts stop bits in scheduled BB or EBB. */
8322 final_emit_insn_group_barriers (FILE *dump ATTRIBUTE_UNUSED)
8325 int need_barrier_p = 0;
8326 rtx prev_insn = NULL_RTX;
8328 init_insn_group_barriers ();
8330 for (insn = NEXT_INSN (current_sched_info->prev_head);
8331 insn != current_sched_info->next_tail;
8332 insn = NEXT_INSN (insn))
8334 if (GET_CODE (insn) == BARRIER)
8336 rtx last = prev_active_insn (insn);
8340 if (GET_CODE (last) == JUMP_INSN
8341 && GET_CODE (PATTERN (last)) == ADDR_DIFF_VEC)
8342 last = prev_active_insn (last);
8343 if (recog_memoized (last) != CODE_FOR_insn_group_barrier)
8344 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)), last);
8346 init_insn_group_barriers ();
8348 prev_insn = NULL_RTX;
8350 else if (INSN_P (insn))
8352 if (recog_memoized (insn) == CODE_FOR_insn_group_barrier)
8354 init_insn_group_barriers ();
8356 prev_insn = NULL_RTX;
8358 else if (need_barrier_p || group_barrier_needed (insn))
8360 if (TARGET_EARLY_STOP_BITS)
8365 last != current_sched_info->prev_head;
8366 last = PREV_INSN (last))
8367 if (INSN_P (last) && GET_MODE (last) == TImode
8368 && stops_p [INSN_UID (last)])
8370 if (last == current_sched_info->prev_head)
8372 last = prev_active_insn (last);
8374 && recog_memoized (last) != CODE_FOR_insn_group_barrier)
8375 emit_insn_after (gen_insn_group_barrier (GEN_INT (3)),
8377 init_insn_group_barriers ();
8378 for (last = NEXT_INSN (last);
8380 last = NEXT_INSN (last))
8382 group_barrier_needed (last);
8386 emit_insn_before (gen_insn_group_barrier (GEN_INT (3)),
8388 init_insn_group_barriers ();
8390 group_barrier_needed (insn);
8391 prev_insn = NULL_RTX;
8393 else if (recog_memoized (insn) >= 0)
8395 need_barrier_p = (GET_CODE (insn) == CALL_INSN
8396 || GET_CODE (PATTERN (insn)) == ASM_INPUT
8397 || asm_noperands (PATTERN (insn)) >= 0);
8404 /* If the following function returns TRUE, we will use the DFA
8408 ia64_first_cycle_multipass_dfa_lookahead (void)
8410 return (reload_completed ? 6 : 4);
8413 /* The following function initiates variable `dfa_pre_cycle_insn'. */
8416 ia64_init_dfa_pre_cycle_insn (void)
8418 if (temp_dfa_state == NULL)
8420 dfa_state_size = state_size ();
8421 temp_dfa_state = xmalloc (dfa_state_size);
8422 prev_cycle_state = xmalloc (dfa_state_size);
8424 dfa_pre_cycle_insn = make_insn_raw (gen_pre_cycle ());
8425 PREV_INSN (dfa_pre_cycle_insn) = NEXT_INSN (dfa_pre_cycle_insn) = NULL_RTX;
8426 recog_memoized (dfa_pre_cycle_insn);
8427 dfa_stop_insn = make_insn_raw (gen_insn_group_barrier (GEN_INT (3)));
8428 PREV_INSN (dfa_stop_insn) = NEXT_INSN (dfa_stop_insn) = NULL_RTX;
8429 recog_memoized (dfa_stop_insn);
8432 /* The following function returns the pseudo insn DFA_PRE_CYCLE_INSN
8433 used by the DFA insn scheduler. */
8436 ia64_dfa_pre_cycle_insn (void)
8438 return dfa_pre_cycle_insn;
8441 /* The following function returns TRUE if PRODUCER (of type ilog or
8442 ld) produces address for CONSUMER (of type st or stf). */
8445 ia64_st_address_bypass_p (rtx producer, rtx consumer)
8449 gcc_assert (producer && consumer);
8450 dest = ia64_single_set (producer);
8452 reg = SET_DEST (dest);
8454 if (GET_CODE (reg) == SUBREG)
8455 reg = SUBREG_REG (reg);
8456 gcc_assert (GET_CODE (reg) == REG);
8458 dest = ia64_single_set (consumer);
8460 mem = SET_DEST (dest);
8461 gcc_assert (mem && GET_CODE (mem) == MEM);
8462 return reg_mentioned_p (reg, mem);
8465 /* The following function returns TRUE if PRODUCER (of type ilog or
8466 ld) produces address for CONSUMER (of type ld or fld). */
8469 ia64_ld_address_bypass_p (rtx producer, rtx consumer)
8471 rtx dest, src, reg, mem;
8473 gcc_assert (producer && consumer);
8474 dest = ia64_single_set (producer);
8476 reg = SET_DEST (dest);
8478 if (GET_CODE (reg) == SUBREG)
8479 reg = SUBREG_REG (reg);
8480 gcc_assert (GET_CODE (reg) == REG);
8482 src = ia64_single_set (consumer);
8484 mem = SET_SRC (src);
8487 if (GET_CODE (mem) == UNSPEC && XVECLEN (mem, 0) > 0)
8488 mem = XVECEXP (mem, 0, 0);
8489 else if (GET_CODE (mem) == IF_THEN_ELSE)
8490 /* ??? Is this bypass necessary for ld.c? */
8492 gcc_assert (XINT (XEXP (XEXP (mem, 0), 0), 1) == UNSPEC_LDCCLR);
8493 mem = XEXP (mem, 1);
8496 while (GET_CODE (mem) == SUBREG || GET_CODE (mem) == ZERO_EXTEND)
8497 mem = XEXP (mem, 0);
8499 if (GET_CODE (mem) == UNSPEC)
8501 int c = XINT (mem, 1);
8503 gcc_assert (c == UNSPEC_LDA || c == UNSPEC_LDS || c == UNSPEC_LDSA);
8504 mem = XVECEXP (mem, 0, 0);
8507 /* Note that LO_SUM is used for GOT loads. */
8508 gcc_assert (GET_CODE (mem) == LO_SUM || GET_CODE (mem) == MEM);
8510 return reg_mentioned_p (reg, mem);
8513 /* The following function returns TRUE if INSN produces address for a
8514 load/store insn. We will place such insns into M slot because it
8515 decreases its latency time. */
8518 ia64_produce_address_p (rtx insn)
8524 /* Emit pseudo-ops for the assembler to describe predicate relations.
8525 At present this assumes that we only consider predicate pairs to
8526 be mutex, and that the assembler can deduce proper values from
8527 straight-line code. */
8530 emit_predicate_relation_info (void)
8534 FOR_EACH_BB_REVERSE (bb)
8537 rtx head = BB_HEAD (bb);
8539 /* We only need such notes at code labels. */
8540 if (GET_CODE (head) != CODE_LABEL)
8542 if (NOTE_INSN_BASIC_BLOCK_P (NEXT_INSN (head)))
8543 head = NEXT_INSN (head);
8545 /* Skip p0, which may be thought to be live due to (reg:DI p0)
8546 grabbing the entire block of predicate registers. */
8547 for (r = PR_REG (2); r < PR_REG (64); r += 2)
8548 if (REGNO_REG_SET_P (df_get_live_in (bb), r))
8550 rtx p = gen_rtx_REG (BImode, r);
8551 rtx n = emit_insn_after (gen_pred_rel_mutex (p), head);
8552 if (head == BB_END (bb))
8558 /* Look for conditional calls that do not return, and protect predicate
8559 relations around them. Otherwise the assembler will assume the call
8560 returns, and complain about uses of call-clobbered predicates after
8562 FOR_EACH_BB_REVERSE (bb)
8564 rtx insn = BB_HEAD (bb);
8568 if (GET_CODE (insn) == CALL_INSN
8569 && GET_CODE (PATTERN (insn)) == COND_EXEC
8570 && find_reg_note (insn, REG_NORETURN, NULL_RTX))
8572 rtx b = emit_insn_before (gen_safe_across_calls_all (), insn);
8573 rtx a = emit_insn_after (gen_safe_across_calls_normal (), insn);
8574 if (BB_HEAD (bb) == insn)
8576 if (BB_END (bb) == insn)
8580 if (insn == BB_END (bb))
8582 insn = NEXT_INSN (insn);
8587 /* Perform machine dependent operations on the rtl chain INSNS. */
8592 /* We are freeing block_for_insn in the toplev to keep compatibility
8593 with old MDEP_REORGS that are not CFG based. Recompute it now. */
8594 compute_bb_for_insn ();
8596 /* If optimizing, we'll have split before scheduling. */
8600 if (optimize && ia64_flag_schedule_insns2 && dbg_cnt (ia64_sched2))
8602 timevar_push (TV_SCHED2);
8603 ia64_final_schedule = 1;
8605 initiate_bundle_states ();
8606 ia64_nop = make_insn_raw (gen_nop ());
8607 PREV_INSN (ia64_nop) = NEXT_INSN (ia64_nop) = NULL_RTX;
8608 recog_memoized (ia64_nop);
8609 clocks_length = get_max_uid () + 1;
8610 stops_p = XCNEWVEC (char, clocks_length);
8611 if (ia64_tune == PROCESSOR_ITANIUM)
8613 clocks = XCNEWVEC (int, clocks_length);
8614 add_cycles = XCNEWVEC (int, clocks_length);
8616 if (ia64_tune == PROCESSOR_ITANIUM2)
8618 pos_1 = get_cpu_unit_code ("2_1");
8619 pos_2 = get_cpu_unit_code ("2_2");
8620 pos_3 = get_cpu_unit_code ("2_3");
8621 pos_4 = get_cpu_unit_code ("2_4");
8622 pos_5 = get_cpu_unit_code ("2_5");
8623 pos_6 = get_cpu_unit_code ("2_6");
8624 _0mii_ = get_cpu_unit_code ("2b_0mii.");
8625 _0mmi_ = get_cpu_unit_code ("2b_0mmi.");
8626 _0mfi_ = get_cpu_unit_code ("2b_0mfi.");
8627 _0mmf_ = get_cpu_unit_code ("2b_0mmf.");
8628 _0bbb_ = get_cpu_unit_code ("2b_0bbb.");
8629 _0mbb_ = get_cpu_unit_code ("2b_0mbb.");
8630 _0mib_ = get_cpu_unit_code ("2b_0mib.");
8631 _0mmb_ = get_cpu_unit_code ("2b_0mmb.");
8632 _0mfb_ = get_cpu_unit_code ("2b_0mfb.");
8633 _0mlx_ = get_cpu_unit_code ("2b_0mlx.");
8634 _1mii_ = get_cpu_unit_code ("2b_1mii.");
8635 _1mmi_ = get_cpu_unit_code ("2b_1mmi.");
8636 _1mfi_ = get_cpu_unit_code ("2b_1mfi.");
8637 _1mmf_ = get_cpu_unit_code ("2b_1mmf.");
8638 _1bbb_ = get_cpu_unit_code ("2b_1bbb.");
8639 _1mbb_ = get_cpu_unit_code ("2b_1mbb.");
8640 _1mib_ = get_cpu_unit_code ("2b_1mib.");
8641 _1mmb_ = get_cpu_unit_code ("2b_1mmb.");
8642 _1mfb_ = get_cpu_unit_code ("2b_1mfb.");
8643 _1mlx_ = get_cpu_unit_code ("2b_1mlx.");
8647 pos_1 = get_cpu_unit_code ("1_1");
8648 pos_2 = get_cpu_unit_code ("1_2");
8649 pos_3 = get_cpu_unit_code ("1_3");
8650 pos_4 = get_cpu_unit_code ("1_4");
8651 pos_5 = get_cpu_unit_code ("1_5");
8652 pos_6 = get_cpu_unit_code ("1_6");
8653 _0mii_ = get_cpu_unit_code ("1b_0mii.");
8654 _0mmi_ = get_cpu_unit_code ("1b_0mmi.");
8655 _0mfi_ = get_cpu_unit_code ("1b_0mfi.");
8656 _0mmf_ = get_cpu_unit_code ("1b_0mmf.");
8657 _0bbb_ = get_cpu_unit_code ("1b_0bbb.");
8658 _0mbb_ = get_cpu_unit_code ("1b_0mbb.");
8659 _0mib_ = get_cpu_unit_code ("1b_0mib.");
8660 _0mmb_ = get_cpu_unit_code ("1b_0mmb.");
8661 _0mfb_ = get_cpu_unit_code ("1b_0mfb.");
8662 _0mlx_ = get_cpu_unit_code ("1b_0mlx.");
8663 _1mii_ = get_cpu_unit_code ("1b_1mii.");
8664 _1mmi_ = get_cpu_unit_code ("1b_1mmi.");
8665 _1mfi_ = get_cpu_unit_code ("1b_1mfi.");
8666 _1mmf_ = get_cpu_unit_code ("1b_1mmf.");
8667 _1bbb_ = get_cpu_unit_code ("1b_1bbb.");
8668 _1mbb_ = get_cpu_unit_code ("1b_1mbb.");
8669 _1mib_ = get_cpu_unit_code ("1b_1mib.");
8670 _1mmb_ = get_cpu_unit_code ("1b_1mmb.");
8671 _1mfb_ = get_cpu_unit_code ("1b_1mfb.");
8672 _1mlx_ = get_cpu_unit_code ("1b_1mlx.");
8675 /* We cannot reuse this one because it has been corrupted by the
8677 finish_bundle_states ();
8678 if (ia64_tune == PROCESSOR_ITANIUM)
8685 emit_insn_group_barriers (dump_file);
8687 ia64_final_schedule = 0;
8688 timevar_pop (TV_SCHED2);
8691 emit_all_insn_group_barriers (dump_file);
8695 /* A call must not be the last instruction in a function, so that the
8696 return address is still within the function, so that unwinding works
8697 properly. Note that IA-64 differs from dwarf2 on this point. */
8698 if (flag_unwind_tables || (flag_exceptions && !USING_SJLJ_EXCEPTIONS))
8703 insn = get_last_insn ();
8704 if (! INSN_P (insn))
8705 insn = prev_active_insn (insn);
8706 /* Skip over insns that expand to nothing. */
8707 while (GET_CODE (insn) == INSN && get_attr_empty (insn) == EMPTY_YES)
8709 if (GET_CODE (PATTERN (insn)) == UNSPEC_VOLATILE
8710 && XINT (PATTERN (insn), 1) == UNSPECV_INSN_GROUP_BARRIER)
8712 insn = prev_active_insn (insn);
8714 if (GET_CODE (insn) == CALL_INSN)
8717 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
8718 emit_insn (gen_break_f ());
8719 emit_insn (gen_insn_group_barrier (GEN_INT (3)));
8723 emit_predicate_relation_info ();
8725 if (ia64_flag_var_tracking)
8727 timevar_push (TV_VAR_TRACKING);
8728 variable_tracking_main ();
8729 timevar_pop (TV_VAR_TRACKING);
8731 df_finish_pass (false);
8734 /* Return true if REGNO is used by the epilogue. */
8737 ia64_epilogue_uses (int regno)
8742 /* With a call to a function in another module, we will write a new
8743 value to "gp". After returning from such a call, we need to make
8744 sure the function restores the original gp-value, even if the
8745 function itself does not use the gp anymore. */
8746 return !(TARGET_AUTO_PIC || TARGET_NO_PIC);
8748 case IN_REG (0): case IN_REG (1): case IN_REG (2): case IN_REG (3):
8749 case IN_REG (4): case IN_REG (5): case IN_REG (6): case IN_REG (7):
8750 /* For functions defined with the syscall_linkage attribute, all
8751 input registers are marked as live at all function exits. This
8752 prevents the register allocator from using the input registers,
8753 which in turn makes it possible to restart a system call after
8754 an interrupt without having to save/restore the input registers.
8755 This also prevents kernel data from leaking to application code. */
8756 return lookup_attribute ("syscall_linkage",
8757 TYPE_ATTRIBUTES (TREE_TYPE (current_function_decl))) != NULL;
8760 /* Conditional return patterns can't represent the use of `b0' as
8761 the return address, so we force the value live this way. */
8765 /* Likewise for ar.pfs, which is used by br.ret. */
8773 /* Return true if REGNO is used by the frame unwinder. */
8776 ia64_eh_uses (int regno)
8778 enum ia64_frame_regs r;
8780 if (! reload_completed)
8786 for (r = reg_save_b0; r <= reg_save_ar_lc; r++)
8787 if (regno == current_frame_info.r[r]
8788 || regno == emitted_frame_related_regs[r])
8794 /* Return true if this goes in small data/bss. */
8796 /* ??? We could also support own long data here. Generating movl/add/ld8
8797 instead of addl,ld8/ld8. This makes the code bigger, but should make the
8798 code faster because there is one less load. This also includes incomplete
8799 types which can't go in sdata/sbss. */
8802 ia64_in_small_data_p (const_tree exp)
8804 if (TARGET_NO_SDATA)
8807 /* We want to merge strings, so we never consider them small data. */
8808 if (TREE_CODE (exp) == STRING_CST)
8811 /* Functions are never small data. */
8812 if (TREE_CODE (exp) == FUNCTION_DECL)
8815 if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
8817 const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
8819 if (strcmp (section, ".sdata") == 0
8820 || strncmp (section, ".sdata.", 7) == 0
8821 || strncmp (section, ".gnu.linkonce.s.", 16) == 0
8822 || strcmp (section, ".sbss") == 0
8823 || strncmp (section, ".sbss.", 6) == 0
8824 || strncmp (section, ".gnu.linkonce.sb.", 17) == 0)
8829 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp));
8831 /* If this is an incomplete type with size 0, then we can't put it
8832 in sdata because it might be too big when completed. */
8833 if (size > 0 && size <= ia64_section_threshold)
8840 /* Output assembly directives for prologue regions. */
8842 /* The current basic block number. */
8844 static bool last_block;
8846 /* True if we need a copy_state command at the start of the next block. */
8848 static bool need_copy_state;
8850 #ifndef MAX_ARTIFICIAL_LABEL_BYTES
8851 # define MAX_ARTIFICIAL_LABEL_BYTES 30
8854 /* Emit a debugging label after a call-frame-related insn. We'd
8855 rather output the label right away, but we'd have to output it
8856 after, not before, the instruction, and the instruction has not
8857 been output yet. So we emit the label after the insn, delete it to
8858 avoid introducing basic blocks, and mark it as preserved, such that
8859 it is still output, given that it is referenced in debug info. */
8862 ia64_emit_deleted_label_after_insn (rtx insn)
8864 char label[MAX_ARTIFICIAL_LABEL_BYTES];
8865 rtx lb = gen_label_rtx ();
8866 rtx label_insn = emit_label_after (lb, insn);
8868 LABEL_PRESERVE_P (lb) = 1;
8870 delete_insn (label_insn);
8872 ASM_GENERATE_INTERNAL_LABEL (label, "L", CODE_LABEL_NUMBER (label_insn));
8874 return xstrdup (label);
8877 /* Define the CFA after INSN with the steady-state definition. */
8880 ia64_dwarf2out_def_steady_cfa (rtx insn)
8882 rtx fp = frame_pointer_needed
8883 ? hard_frame_pointer_rtx
8884 : stack_pointer_rtx;
8887 (ia64_emit_deleted_label_after_insn (insn),
8889 ia64_initial_elimination_offset
8890 (REGNO (arg_pointer_rtx), REGNO (fp))
8891 + ARG_POINTER_CFA_OFFSET (current_function_decl));
8894 /* The generic dwarf2 frame debug info generator does not define a
8895 separate region for the very end of the epilogue, so refrain from
8896 doing so in the IA64-specific code as well. */
8898 #define IA64_CHANGE_CFA_IN_EPILOGUE 0
8900 /* The function emits unwind directives for the start of an epilogue. */
8903 process_epilogue (FILE *asm_out_file, rtx insn, bool unwind, bool frame)
8905 /* If this isn't the last block of the function, then we need to label the
8906 current state, and copy it back in at the start of the next block. */
8911 fprintf (asm_out_file, "\t.label_state %d\n",
8912 ++cfun->machine->state_num);
8913 need_copy_state = true;
8917 fprintf (asm_out_file, "\t.restore sp\n");
8918 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
8919 dwarf2out_def_cfa (ia64_emit_deleted_label_after_insn (insn),
8920 STACK_POINTER_REGNUM, INCOMING_FRAME_SP_OFFSET);
8923 /* This function processes a SET pattern looking for specific patterns
8924 which result in emitting an assembly directive required for unwinding. */
8927 process_set (FILE *asm_out_file, rtx pat, rtx insn, bool unwind, bool frame)
8929 rtx src = SET_SRC (pat);
8930 rtx dest = SET_DEST (pat);
8931 int src_regno, dest_regno;
8933 /* Look for the ALLOC insn. */
8934 if (GET_CODE (src) == UNSPEC_VOLATILE
8935 && XINT (src, 1) == UNSPECV_ALLOC
8936 && GET_CODE (dest) == REG)
8938 dest_regno = REGNO (dest);
8940 /* If this is the final destination for ar.pfs, then this must
8941 be the alloc in the prologue. */
8942 if (dest_regno == current_frame_info.r[reg_save_ar_pfs])
8945 fprintf (asm_out_file, "\t.save ar.pfs, r%d\n",
8946 ia64_dbx_register_number (dest_regno));
8950 /* This must be an alloc before a sibcall. We must drop the
8951 old frame info. The easiest way to drop the old frame
8952 info is to ensure we had a ".restore sp" directive
8953 followed by a new prologue. If the procedure doesn't
8954 have a memory-stack frame, we'll issue a dummy ".restore
8956 if (current_frame_info.total_size == 0 && !frame_pointer_needed)
8957 /* if haven't done process_epilogue() yet, do it now */
8958 process_epilogue (asm_out_file, insn, unwind, frame);
8960 fprintf (asm_out_file, "\t.prologue\n");
8965 /* Look for SP = .... */
8966 if (GET_CODE (dest) == REG && REGNO (dest) == STACK_POINTER_REGNUM)
8968 if (GET_CODE (src) == PLUS)
8970 rtx op0 = XEXP (src, 0);
8971 rtx op1 = XEXP (src, 1);
8973 gcc_assert (op0 == dest && GET_CODE (op1) == CONST_INT);
8975 if (INTVAL (op1) < 0)
8977 gcc_assert (!frame_pointer_needed);
8979 fprintf (asm_out_file, "\t.fframe "HOST_WIDE_INT_PRINT_DEC"\n",
8982 ia64_dwarf2out_def_steady_cfa (insn);
8985 process_epilogue (asm_out_file, insn, unwind, frame);
8989 gcc_assert (GET_CODE (src) == REG
8990 && REGNO (src) == HARD_FRAME_POINTER_REGNUM);
8991 process_epilogue (asm_out_file, insn, unwind, frame);
8997 /* Register move we need to look at. */
8998 if (GET_CODE (dest) == REG && GET_CODE (src) == REG)
9000 src_regno = REGNO (src);
9001 dest_regno = REGNO (dest);
9006 /* Saving return address pointer. */
9007 gcc_assert (dest_regno == current_frame_info.r[reg_save_b0]);
9009 fprintf (asm_out_file, "\t.save rp, r%d\n",
9010 ia64_dbx_register_number (dest_regno));
9014 gcc_assert (dest_regno == current_frame_info.r[reg_save_pr]);
9016 fprintf (asm_out_file, "\t.save pr, r%d\n",
9017 ia64_dbx_register_number (dest_regno));
9020 case AR_UNAT_REGNUM:
9021 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_unat]);
9023 fprintf (asm_out_file, "\t.save ar.unat, r%d\n",
9024 ia64_dbx_register_number (dest_regno));
9028 gcc_assert (dest_regno == current_frame_info.r[reg_save_ar_lc]);
9030 fprintf (asm_out_file, "\t.save ar.lc, r%d\n",
9031 ia64_dbx_register_number (dest_regno));
9034 case STACK_POINTER_REGNUM:
9035 gcc_assert (dest_regno == HARD_FRAME_POINTER_REGNUM
9036 && frame_pointer_needed);
9038 fprintf (asm_out_file, "\t.vframe r%d\n",
9039 ia64_dbx_register_number (dest_regno));
9041 ia64_dwarf2out_def_steady_cfa (insn);
9045 /* Everything else should indicate being stored to memory. */
9050 /* Memory store we need to look at. */
9051 if (GET_CODE (dest) == MEM && GET_CODE (src) == REG)
9057 if (GET_CODE (XEXP (dest, 0)) == REG)
9059 base = XEXP (dest, 0);
9064 gcc_assert (GET_CODE (XEXP (dest, 0)) == PLUS
9065 && GET_CODE (XEXP (XEXP (dest, 0), 1)) == CONST_INT);
9066 base = XEXP (XEXP (dest, 0), 0);
9067 off = INTVAL (XEXP (XEXP (dest, 0), 1));
9070 if (base == hard_frame_pointer_rtx)
9072 saveop = ".savepsp";
9077 gcc_assert (base == stack_pointer_rtx);
9081 src_regno = REGNO (src);
9085 gcc_assert (!current_frame_info.r[reg_save_b0]);
9087 fprintf (asm_out_file, "\t%s rp, %ld\n", saveop, off);
9091 gcc_assert (!current_frame_info.r[reg_save_pr]);
9093 fprintf (asm_out_file, "\t%s pr, %ld\n", saveop, off);
9097 gcc_assert (!current_frame_info.r[reg_save_ar_lc]);
9099 fprintf (asm_out_file, "\t%s ar.lc, %ld\n", saveop, off);
9103 gcc_assert (!current_frame_info.r[reg_save_ar_pfs]);
9105 fprintf (asm_out_file, "\t%s ar.pfs, %ld\n", saveop, off);
9108 case AR_UNAT_REGNUM:
9109 gcc_assert (!current_frame_info.r[reg_save_ar_unat]);
9111 fprintf (asm_out_file, "\t%s ar.unat, %ld\n", saveop, off);
9119 fprintf (asm_out_file, "\t.save.g 0x%x\n",
9120 1 << (src_regno - GR_REG (4)));
9129 fprintf (asm_out_file, "\t.save.b 0x%x\n",
9130 1 << (src_regno - BR_REG (1)));
9138 fprintf (asm_out_file, "\t.save.f 0x%x\n",
9139 1 << (src_regno - FR_REG (2)));
9142 case FR_REG (16): case FR_REG (17): case FR_REG (18): case FR_REG (19):
9143 case FR_REG (20): case FR_REG (21): case FR_REG (22): case FR_REG (23):
9144 case FR_REG (24): case FR_REG (25): case FR_REG (26): case FR_REG (27):
9145 case FR_REG (28): case FR_REG (29): case FR_REG (30): case FR_REG (31):
9147 fprintf (asm_out_file, "\t.save.gf 0x0, 0x%x\n",
9148 1 << (src_regno - FR_REG (12)));
9160 /* This function looks at a single insn and emits any directives
9161 required to unwind this insn. */
9163 process_for_unwind_directive (FILE *asm_out_file, rtx insn)
9165 bool unwind = (flag_unwind_tables
9166 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS));
9167 bool frame = dwarf2out_do_frame ();
9169 if (unwind || frame)
9173 if (NOTE_INSN_BASIC_BLOCK_P (insn))
9175 last_block = NOTE_BASIC_BLOCK (insn)->next_bb == EXIT_BLOCK_PTR;
9177 /* Restore unwind state from immediately before the epilogue. */
9178 if (need_copy_state)
9182 fprintf (asm_out_file, "\t.body\n");
9183 fprintf (asm_out_file, "\t.copy_state %d\n",
9184 cfun->machine->state_num);
9186 if (IA64_CHANGE_CFA_IN_EPILOGUE && frame)
9187 ia64_dwarf2out_def_steady_cfa (insn);
9188 need_copy_state = false;
9192 if (GET_CODE (insn) == NOTE || ! RTX_FRAME_RELATED_P (insn))
9195 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
9197 pat = XEXP (pat, 0);
9199 pat = PATTERN (insn);
9201 switch (GET_CODE (pat))
9204 process_set (asm_out_file, pat, insn, unwind, frame);
9210 int limit = XVECLEN (pat, 0);
9211 for (par_index = 0; par_index < limit; par_index++)
9213 rtx x = XVECEXP (pat, 0, par_index);
9214 if (GET_CODE (x) == SET)
9215 process_set (asm_out_file, x, insn, unwind, frame);
9230 IA64_BUILTIN_FLUSHRS
9234 ia64_init_builtins (void)
9239 /* The __fpreg type. */
9240 fpreg_type = make_node (REAL_TYPE);
9241 TYPE_PRECISION (fpreg_type) = 82;
9242 layout_type (fpreg_type);
9243 (*lang_hooks.types.register_builtin_type) (fpreg_type, "__fpreg");
9245 /* The __float80 type. */
9246 float80_type = make_node (REAL_TYPE);
9247 TYPE_PRECISION (float80_type) = 80;
9248 layout_type (float80_type);
9249 (*lang_hooks.types.register_builtin_type) (float80_type, "__float80");
9251 /* The __float128 type. */
9254 tree float128_type = make_node (REAL_TYPE);
9255 TYPE_PRECISION (float128_type) = 128;
9256 layout_type (float128_type);
9257 (*lang_hooks.types.register_builtin_type) (float128_type, "__float128");
9260 /* Under HPUX, this is a synonym for "long double". */
9261 (*lang_hooks.types.register_builtin_type) (long_double_type_node,
9264 #define def_builtin(name, type, code) \
9265 add_builtin_function ((name), (type), (code), BUILT_IN_MD, \
9268 def_builtin ("__builtin_ia64_bsp",
9269 build_function_type (ptr_type_node, void_list_node),
9272 def_builtin ("__builtin_ia64_flushrs",
9273 build_function_type (void_type_node, void_list_node),
9274 IA64_BUILTIN_FLUSHRS);
9280 if (built_in_decls [BUILT_IN_FINITE])
9281 set_user_assembler_name (built_in_decls [BUILT_IN_FINITE],
9283 if (built_in_decls [BUILT_IN_FINITEF])
9284 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF],
9286 if (built_in_decls [BUILT_IN_FINITEL])
9287 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEL],
9293 ia64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9294 enum machine_mode mode ATTRIBUTE_UNUSED,
9295 int ignore ATTRIBUTE_UNUSED)
9297 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
9298 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
9302 case IA64_BUILTIN_BSP:
9303 if (! target || ! register_operand (target, DImode))
9304 target = gen_reg_rtx (DImode);
9305 emit_insn (gen_bsp_value (target));
9306 #ifdef POINTERS_EXTEND_UNSIGNED
9307 target = convert_memory_address (ptr_mode, target);
9311 case IA64_BUILTIN_FLUSHRS:
9312 emit_insn (gen_flushrs ());
9322 /* For the HP-UX IA64 aggregate parameters are passed stored in the
9323 most significant bits of the stack slot. */
9326 ia64_hpux_function_arg_padding (enum machine_mode mode, const_tree type)
9328 /* Exception to normal case for structures/unions/etc. */
9330 if (type && AGGREGATE_TYPE_P (type)
9331 && int_size_in_bytes (type) < UNITS_PER_WORD)
9334 /* Fall back to the default. */
9335 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
9338 /* Emit text to declare externally defined variables and functions, because
9339 the Intel assembler does not support undefined externals. */
9342 ia64_asm_output_external (FILE *file, tree decl, const char *name)
9344 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
9345 set in order to avoid putting out names that are never really
9347 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
9349 /* maybe_assemble_visibility will return 1 if the assembler
9350 visibility directive is output. */
9351 int need_visibility = ((*targetm.binds_local_p) (decl)
9352 && maybe_assemble_visibility (decl));
9354 /* GNU as does not need anything here, but the HP linker does
9355 need something for external functions. */
9356 if ((TARGET_HPUX_LD || !TARGET_GNU_AS)
9357 && TREE_CODE (decl) == FUNCTION_DECL)
9358 (*targetm.asm_out.globalize_decl_name) (file, decl);
9359 else if (need_visibility && !TARGET_GNU_AS)
9360 (*targetm.asm_out.globalize_label) (file, name);
9364 /* Set SImode div/mod functions, init_integral_libfuncs only initializes
9365 modes of word_mode and larger. Rename the TFmode libfuncs using the
9366 HPUX conventions. __divtf3 is used for XFmode. We need to keep it for
9367 backward compatibility. */
9370 ia64_init_libfuncs (void)
9372 set_optab_libfunc (sdiv_optab, SImode, "__divsi3");
9373 set_optab_libfunc (udiv_optab, SImode, "__udivsi3");
9374 set_optab_libfunc (smod_optab, SImode, "__modsi3");
9375 set_optab_libfunc (umod_optab, SImode, "__umodsi3");
9377 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
9378 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
9379 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
9380 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
9381 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
9383 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
9384 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
9385 set_conv_libfunc (sext_optab, TFmode, XFmode, "_U_Qfcnvff_f80_to_quad");
9386 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
9387 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
9388 set_conv_libfunc (trunc_optab, XFmode, TFmode, "_U_Qfcnvff_quad_to_f80");
9390 set_conv_libfunc (sfix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_sgl");
9391 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
9392 set_conv_libfunc (sfix_optab, TImode, TFmode, "_U_Qfcnvfxt_quad_to_quad");
9393 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxut_quad_to_sgl");
9394 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxut_quad_to_dbl");
9396 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
9397 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
9398 set_conv_libfunc (sfloat_optab, TFmode, TImode, "_U_Qfcnvxf_quad_to_quad");
9399 /* HP-UX 11.23 libc does not have a function for unsigned
9400 SImode-to-TFmode conversion. */
9401 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxuf_dbl_to_quad");
9404 /* Rename all the TFmode libfuncs using the HPUX conventions. */
9407 ia64_hpux_init_libfuncs (void)
9409 ia64_init_libfuncs ();
9411 /* The HP SI millicode division and mod functions expect DI arguments.
9412 By turning them off completely we avoid using both libgcc and the
9413 non-standard millicode routines and use the HP DI millicode routines
9416 set_optab_libfunc (sdiv_optab, SImode, 0);
9417 set_optab_libfunc (udiv_optab, SImode, 0);
9418 set_optab_libfunc (smod_optab, SImode, 0);
9419 set_optab_libfunc (umod_optab, SImode, 0);
9421 set_optab_libfunc (sdiv_optab, DImode, "__milli_divI");
9422 set_optab_libfunc (udiv_optab, DImode, "__milli_divU");
9423 set_optab_libfunc (smod_optab, DImode, "__milli_remI");
9424 set_optab_libfunc (umod_optab, DImode, "__milli_remU");
9426 /* HP-UX libc has TF min/max/abs routines in it. */
9427 set_optab_libfunc (smin_optab, TFmode, "_U_Qfmin");
9428 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
9429 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
9431 /* ia64_expand_compare uses this. */
9432 cmptf_libfunc = init_one_libfunc ("_U_Qfcmp");
9434 /* These should never be used. */
9435 set_optab_libfunc (eq_optab, TFmode, 0);
9436 set_optab_libfunc (ne_optab, TFmode, 0);
9437 set_optab_libfunc (gt_optab, TFmode, 0);
9438 set_optab_libfunc (ge_optab, TFmode, 0);
9439 set_optab_libfunc (lt_optab, TFmode, 0);
9440 set_optab_libfunc (le_optab, TFmode, 0);
9443 /* Rename the division and modulus functions in VMS. */
9446 ia64_vms_init_libfuncs (void)
9448 set_optab_libfunc (sdiv_optab, SImode, "OTS$DIV_I");
9449 set_optab_libfunc (sdiv_optab, DImode, "OTS$DIV_L");
9450 set_optab_libfunc (udiv_optab, SImode, "OTS$DIV_UI");
9451 set_optab_libfunc (udiv_optab, DImode, "OTS$DIV_UL");
9452 set_optab_libfunc (smod_optab, SImode, "OTS$REM_I");
9453 set_optab_libfunc (smod_optab, DImode, "OTS$REM_L");
9454 set_optab_libfunc (umod_optab, SImode, "OTS$REM_UI");
9455 set_optab_libfunc (umod_optab, DImode, "OTS$REM_UL");
9458 /* Rename the TFmode libfuncs available from soft-fp in glibc using
9459 the HPUX conventions. */
9462 ia64_sysv4_init_libfuncs (void)
9464 ia64_init_libfuncs ();
9466 /* These functions are not part of the HPUX TFmode interface. We
9467 use them instead of _U_Qfcmp, which doesn't work the way we
9469 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
9470 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
9471 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
9472 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
9473 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
9474 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
9476 /* We leave out _U_Qfmin, _U_Qfmax and _U_Qfabs since soft-fp in
9477 glibc doesn't have them. */
9480 /* For HPUX, it is illegal to have relocations in shared segments. */
9483 ia64_hpux_reloc_rw_mask (void)
9488 /* For others, relax this so that relocations to local data goes in
9489 read-only segments, but we still cannot allow global relocations
9490 in read-only segments. */
9493 ia64_reloc_rw_mask (void)
9495 return flag_pic ? 3 : 2;
9498 /* Return the section to use for X. The only special thing we do here
9499 is to honor small data. */
9502 ia64_select_rtx_section (enum machine_mode mode, rtx x,
9503 unsigned HOST_WIDE_INT align)
9505 if (GET_MODE_SIZE (mode) > 0
9506 && GET_MODE_SIZE (mode) <= ia64_section_threshold
9507 && !TARGET_NO_SDATA)
9508 return sdata_section;
9510 return default_elf_select_rtx_section (mode, x, align);
9514 ia64_section_type_flags (tree decl, const char *name, int reloc)
9516 unsigned int flags = 0;
9518 if (strcmp (name, ".sdata") == 0
9519 || strncmp (name, ".sdata.", 7) == 0
9520 || strncmp (name, ".gnu.linkonce.s.", 16) == 0
9521 || strncmp (name, ".sdata2.", 8) == 0
9522 || strncmp (name, ".gnu.linkonce.s2.", 17) == 0
9523 || strcmp (name, ".sbss") == 0
9524 || strncmp (name, ".sbss.", 6) == 0
9525 || strncmp (name, ".gnu.linkonce.sb.", 17) == 0)
9526 flags = SECTION_SMALL;
9528 flags |= default_section_type_flags (decl, name, reloc);
9532 /* Returns true if FNTYPE (a FUNCTION_TYPE or a METHOD_TYPE) returns a
9533 structure type and that the address of that type should be passed
9534 in out0, rather than in r8. */
9537 ia64_struct_retval_addr_is_first_parm_p (tree fntype)
9539 tree ret_type = TREE_TYPE (fntype);
9541 /* The Itanium C++ ABI requires that out0, rather than r8, be used
9542 as the structure return address parameter, if the return value
9543 type has a non-trivial copy constructor or destructor. It is not
9544 clear if this same convention should be used for other
9545 programming languages. Until G++ 3.4, we incorrectly used r8 for
9546 these return values. */
9547 return (abi_version_at_least (2)
9549 && TYPE_MODE (ret_type) == BLKmode
9550 && TREE_ADDRESSABLE (ret_type)
9551 && strcmp (lang_hooks.name, "GNU C++") == 0);
9554 /* Output the assembler code for a thunk function. THUNK_DECL is the
9555 declaration for the thunk function itself, FUNCTION is the decl for
9556 the target function. DELTA is an immediate constant offset to be
9557 added to THIS. If VCALL_OFFSET is nonzero, the word at
9558 *(*this + vcall_offset) should be added to THIS. */
9561 ia64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
9562 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
9565 rtx this_rtx, insn, funexp;
9566 unsigned int this_parmno;
9567 unsigned int this_regno;
9570 reload_completed = 1;
9571 epilogue_completed = 1;
9573 /* Set things up as ia64_expand_prologue might. */
9574 last_scratch_gr_reg = 15;
9576 memset (¤t_frame_info, 0, sizeof (current_frame_info));
9577 current_frame_info.spill_cfa_off = -16;
9578 current_frame_info.n_input_regs = 1;
9579 current_frame_info.need_regstk = (TARGET_REG_NAMES != 0);
9581 /* Mark the end of the (empty) prologue. */
9582 emit_note (NOTE_INSN_PROLOGUE_END);
9584 /* Figure out whether "this" will be the first parameter (the
9585 typical case) or the second parameter (as happens when the
9586 virtual function returns certain class objects). */
9588 = (ia64_struct_retval_addr_is_first_parm_p (TREE_TYPE (thunk))
9590 this_regno = IN_REG (this_parmno);
9591 if (!TARGET_REG_NAMES)
9592 reg_names[this_regno] = ia64_reg_numbers[this_parmno];
9594 this_rtx = gen_rtx_REG (Pmode, this_regno);
9596 /* Apply the constant offset, if required. */
9597 delta_rtx = GEN_INT (delta);
9600 rtx tmp = gen_rtx_REG (ptr_mode, this_regno);
9601 REG_POINTER (tmp) = 1;
9602 if (delta && satisfies_constraint_I (delta_rtx))
9604 emit_insn (gen_ptr_extend_plus_imm (this_rtx, tmp, delta_rtx));
9608 emit_insn (gen_ptr_extend (this_rtx, tmp));
9612 if (!satisfies_constraint_I (delta_rtx))
9614 rtx tmp = gen_rtx_REG (Pmode, 2);
9615 emit_move_insn (tmp, delta_rtx);
9618 emit_insn (gen_adddi3 (this_rtx, this_rtx, delta_rtx));
9621 /* Apply the offset from the vtable, if required. */
9624 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
9625 rtx tmp = gen_rtx_REG (Pmode, 2);
9629 rtx t = gen_rtx_REG (ptr_mode, 2);
9630 REG_POINTER (t) = 1;
9631 emit_move_insn (t, gen_rtx_MEM (ptr_mode, this_rtx));
9632 if (satisfies_constraint_I (vcall_offset_rtx))
9634 emit_insn (gen_ptr_extend_plus_imm (tmp, t, vcall_offset_rtx));
9638 emit_insn (gen_ptr_extend (tmp, t));
9641 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
9645 if (!satisfies_constraint_J (vcall_offset_rtx))
9647 rtx tmp2 = gen_rtx_REG (Pmode, next_scratch_gr_reg ());
9648 emit_move_insn (tmp2, vcall_offset_rtx);
9649 vcall_offset_rtx = tmp2;
9651 emit_insn (gen_adddi3 (tmp, tmp, vcall_offset_rtx));
9655 emit_insn (gen_zero_extendsidi2 (tmp, gen_rtx_MEM (ptr_mode, tmp)));
9657 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
9659 emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp));
9662 /* Generate a tail call to the target function. */
9663 if (! TREE_USED (function))
9665 assemble_external (function);
9666 TREE_USED (function) = 1;
9668 funexp = XEXP (DECL_RTL (function), 0);
9669 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
9670 ia64_expand_call (NULL_RTX, funexp, NULL_RTX, 1);
9671 insn = get_last_insn ();
9672 SIBLING_CALL_P (insn) = 1;
9674 /* Code generation for calls relies on splitting. */
9675 reload_completed = 1;
9676 epilogue_completed = 1;
9677 try_split (PATTERN (insn), insn, 0);
9681 /* Run just enough of rest_of_compilation to get the insns emitted.
9682 There's not really enough bulk here to make other passes such as
9683 instruction scheduling worth while. Note that use_thunk calls
9684 assemble_start_function and assemble_end_function. */
9686 insn_locators_alloc ();
9687 emit_all_insn_group_barriers (NULL);
9688 insn = get_insns ();
9689 shorten_branches (insn);
9690 final_start_function (insn, file, 1);
9691 final (insn, file, 1);
9692 final_end_function ();
9693 free_after_compilation (cfun);
9695 reload_completed = 0;
9696 epilogue_completed = 0;
9699 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9702 ia64_struct_value_rtx (tree fntype,
9703 int incoming ATTRIBUTE_UNUSED)
9705 if (fntype && ia64_struct_retval_addr_is_first_parm_p (fntype))
9707 return gen_rtx_REG (Pmode, GR_REG (8));
9711 ia64_scalar_mode_supported_p (enum machine_mode mode)
9737 ia64_vector_mode_supported_p (enum machine_mode mode)
9754 /* Implement the FUNCTION_PROFILER macro. */
9757 ia64_output_function_profiler (FILE *file, int labelno)
9761 /* If the function needs a static chain and the static chain
9762 register is r15, we use an indirect call so as to bypass
9763 the PLT stub in case the executable is dynamically linked,
9764 because the stub clobbers r15 as per 5.3.6 of the psABI.
9765 We don't need to do that in non canonical PIC mode. */
9767 if (cfun->static_chain_decl && !TARGET_NO_PIC && !TARGET_AUTO_PIC)
9769 gcc_assert (STATIC_CHAIN_REGNUM == 15);
9770 indirect_call = true;
9773 indirect_call = false;
9776 fputs ("\t.prologue 4, r40\n", file);
9778 fputs ("\t.prologue\n\t.save ar.pfs, r40\n", file);
9779 fputs ("\talloc out0 = ar.pfs, 8, 0, 4, 0\n", file);
9781 if (NO_PROFILE_COUNTERS)
9782 fputs ("\tmov out3 = r0\n", file);
9786 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
9788 if (TARGET_AUTO_PIC)
9789 fputs ("\tmovl out3 = @gprel(", file);
9791 fputs ("\taddl out3 = @ltoff(", file);
9792 assemble_name (file, buf);
9793 if (TARGET_AUTO_PIC)
9794 fputs (")\n", file);
9796 fputs ("), r1\n", file);
9800 fputs ("\taddl r14 = @ltoff(@fptr(_mcount)), r1\n", file);
9801 fputs ("\t;;\n", file);
9803 fputs ("\t.save rp, r42\n", file);
9804 fputs ("\tmov out2 = b0\n", file);
9806 fputs ("\tld8 r14 = [r14]\n\t;;\n", file);
9807 fputs ("\t.body\n", file);
9808 fputs ("\tmov out1 = r1\n", file);
9811 fputs ("\tld8 r16 = [r14], 8\n\t;;\n", file);
9812 fputs ("\tmov b6 = r16\n", file);
9813 fputs ("\tld8 r1 = [r14]\n", file);
9814 fputs ("\tbr.call.sptk.many b0 = b6\n\t;;\n", file);
9817 fputs ("\tbr.call.sptk.many b0 = _mcount\n\t;;\n", file);
9820 static GTY(()) rtx mcount_func_rtx;
9822 gen_mcount_func_rtx (void)
9824 if (!mcount_func_rtx)
9825 mcount_func_rtx = init_one_libfunc ("_mcount");
9826 return mcount_func_rtx;
9830 ia64_profile_hook (int labelno)
9834 if (NO_PROFILE_COUNTERS)
9839 const char *label_name;
9840 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
9841 label_name = (*targetm.strip_name_encoding) (ggc_strdup (buf));
9842 label = gen_rtx_SYMBOL_REF (Pmode, label_name);
9843 SYMBOL_REF_FLAGS (label) = SYMBOL_FLAG_LOCAL;
9845 ip = gen_reg_rtx (Pmode);
9846 emit_insn (gen_ip_value (ip));
9847 emit_library_call (gen_mcount_func_rtx (), LCT_NORMAL,
9849 gen_rtx_REG (Pmode, BR_REG (0)), Pmode,
9854 /* Return the mangling of TYPE if it is an extended fundamental type. */
9857 ia64_mangle_type (const_tree type)
9859 type = TYPE_MAIN_VARIANT (type);
9861 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
9862 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
9865 /* On HP-UX, "long double" is mangled as "e" so __float128 is
9867 if (!TARGET_HPUX && TYPE_MODE (type) == TFmode)
9869 /* On HP-UX, "e" is not available as a mangling of __float80 so use
9870 an extended mangling. Elsewhere, "e" is available since long
9871 double is 80 bits. */
9872 if (TYPE_MODE (type) == XFmode)
9873 return TARGET_HPUX ? "u9__float80" : "e";
9874 if (TYPE_MODE (type) == RFmode)
9879 /* Return the diagnostic message string if conversion from FROMTYPE to
9880 TOTYPE is not allowed, NULL otherwise. */
9882 ia64_invalid_conversion (const_tree fromtype, const_tree totype)
9884 /* Reject nontrivial conversion to or from __fpreg. */
9885 if (TYPE_MODE (fromtype) == RFmode
9886 && TYPE_MODE (totype) != RFmode
9887 && TYPE_MODE (totype) != VOIDmode)
9888 return N_("invalid conversion from %<__fpreg%>");
9889 if (TYPE_MODE (totype) == RFmode
9890 && TYPE_MODE (fromtype) != RFmode)
9891 return N_("invalid conversion to %<__fpreg%>");
9895 /* Return the diagnostic message string if the unary operation OP is
9896 not permitted on TYPE, NULL otherwise. */
9898 ia64_invalid_unary_op (int op, const_tree type)
9900 /* Reject operations on __fpreg other than unary + or &. */
9901 if (TYPE_MODE (type) == RFmode
9902 && op != CONVERT_EXPR
9904 return N_("invalid operation on %<__fpreg%>");
9908 /* Return the diagnostic message string if the binary operation OP is
9909 not permitted on TYPE1 and TYPE2, NULL otherwise. */
9911 ia64_invalid_binary_op (int op ATTRIBUTE_UNUSED, const_tree type1, const_tree type2)
9913 /* Reject operations on __fpreg. */
9914 if (TYPE_MODE (type1) == RFmode || TYPE_MODE (type2) == RFmode)
9915 return N_("invalid operation on %<__fpreg%>");
9919 /* Implement overriding of the optimization options. */
9921 ia64_optimization_options (int level ATTRIBUTE_UNUSED,
9922 int size ATTRIBUTE_UNUSED)
9924 /* Disable the second machine independent scheduling pass and use one for the
9925 IA-64. This needs to be here instead of in OVERRIDE_OPTIONS because this
9926 is done whenever the optimization is changed via #pragma GCC optimize or
9927 attribute((optimize(...))). */
9928 ia64_flag_schedule_insns2 = flag_schedule_insns_after_reload;
9929 flag_schedule_insns_after_reload = 0;
9931 /* Let the scheduler form additional regions. */
9932 set_param_value ("max-sched-extend-regions-iters", 2);
9934 /* Set the default values for cache-related parameters. */
9935 set_param_value ("simultaneous-prefetches", 6);
9936 set_param_value ("l1-cache-line-size", 32);
9940 /* HP-UX version_id attribute.
9941 For object foo, if the version_id is set to 1234 put out an alias
9942 of '.alias foo "foo{1234}" We can't use "foo{1234}" in anything
9943 other than an alias statement because it is an illegal symbol name. */
9946 ia64_handle_version_id_attribute (tree *node ATTRIBUTE_UNUSED,
9947 tree name ATTRIBUTE_UNUSED,
9949 int flags ATTRIBUTE_UNUSED,
9952 tree arg = TREE_VALUE (args);
9954 if (TREE_CODE (arg) != STRING_CST)
9956 error("version attribute is not a string");
9957 *no_add_attrs = true;
9963 /* Target hook for c_mode_for_suffix. */
9965 static enum machine_mode
9966 ia64_c_mode_for_suffix (char suffix)
9976 #include "gt-ia64.h"