1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
25 #include "coretypes.h"
29 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
41 #include "integrate.h"
49 #include "target-def.h"
51 /* Return nonzero if there is a bypass for the output of
52 OUT_INSN and the fp store IN_INSN. */
54 hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
56 enum machine_mode store_mode;
57 enum machine_mode other_mode;
60 if (recog_memoized (in_insn) < 0
61 || get_attr_type (in_insn) != TYPE_FPSTORE
62 || recog_memoized (out_insn) < 0)
65 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
67 set = single_set (out_insn);
71 other_mode = GET_MODE (SET_SRC (set));
73 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
77 #ifndef DO_FRAME_NOTES
78 #ifdef INCOMING_RETURN_ADDR_RTX
79 #define DO_FRAME_NOTES 1
81 #define DO_FRAME_NOTES 0
85 static void copy_reg_pointer (rtx, rtx);
86 static void fix_range (const char *);
87 static bool pa_handle_option (size_t, const char *, int);
88 static int hppa_address_cost (rtx);
89 static bool hppa_rtx_costs (rtx, int, int, int *);
90 static inline rtx force_mode (enum machine_mode, rtx);
91 static void pa_reorg (void);
92 static void pa_combine_instructions (void);
93 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
94 static int forward_branch_p (rtx);
95 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
96 static int compute_movmem_length (rtx);
97 static int compute_clrmem_length (rtx);
98 static bool pa_assemble_integer (rtx, unsigned int, int);
99 static void remove_useless_addtr_insns (int);
100 static void store_reg (int, HOST_WIDE_INT, int);
101 static void store_reg_modify (int, int, HOST_WIDE_INT);
102 static void load_reg (int, HOST_WIDE_INT, int);
103 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
104 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
105 static void update_total_code_bytes (int);
106 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
107 static int pa_adjust_cost (rtx, rtx, rtx, int);
108 static int pa_adjust_priority (rtx, int);
109 static int pa_issue_rate (void);
110 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
111 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
113 static void pa_encode_section_info (tree, rtx, int);
114 static const char *pa_strip_name_encoding (const char *);
115 static bool pa_function_ok_for_sibcall (tree, tree);
116 static void pa_globalize_label (FILE *, const char *)
118 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
119 HOST_WIDE_INT, tree);
120 #if !defined(USE_COLLECT2)
121 static void pa_asm_out_constructor (rtx, int);
122 static void pa_asm_out_destructor (rtx, int);
124 static void pa_init_builtins (void);
125 static rtx hppa_builtin_saveregs (void);
126 static tree hppa_gimplify_va_arg_expr (tree, tree, tree *, tree *);
127 static bool pa_scalar_mode_supported_p (enum machine_mode);
128 static bool pa_commutative_p (rtx x, int outer_code);
129 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
130 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
131 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
132 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
133 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
134 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
135 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
136 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
137 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
138 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
139 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
140 static void output_deferred_plabels (void);
141 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
142 #ifdef ASM_OUTPUT_EXTERNAL_REAL
143 static void pa_hpux_file_end (void);
145 #ifdef HPUX_LONG_DOUBLE_LIBRARY
146 static void pa_hpux_init_libfuncs (void);
148 static rtx pa_struct_value_rtx (tree, int);
149 static bool pa_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
151 static int pa_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
153 static struct machine_function * pa_init_machine_status (void);
154 static enum reg_class pa_secondary_reload (bool, rtx, enum reg_class,
156 secondary_reload_info *);
159 /* The following extra sections are only used for SOM. */
160 static GTY(()) section *som_readonly_data_section;
161 static GTY(()) section *som_one_only_readonly_data_section;
162 static GTY(()) section *som_one_only_data_section;
164 /* Save the operands last given to a compare for use when we
165 generate a scc or bcc insn. */
166 rtx hppa_compare_op0, hppa_compare_op1;
167 enum cmp_type hppa_branch_type;
169 /* Which cpu we are scheduling for. */
170 enum processor_type pa_cpu = TARGET_SCHED_DEFAULT;
172 /* The UNIX standard to use for predefines and linking. */
173 int flag_pa_unix = TARGET_HPUX_11_11 ? 1998 : TARGET_HPUX_10_10 ? 1995 : 1993;
175 /* Counts for the number of callee-saved general and floating point
176 registers which were saved by the current function's prologue. */
177 static int gr_saved, fr_saved;
179 static rtx find_addr_reg (rtx);
181 /* Keep track of the number of bytes we have output in the CODE subspace
182 during this compilation so we'll know when to emit inline long-calls. */
183 unsigned long total_code_bytes;
185 /* The last address of the previous function plus the number of bytes in
186 associated thunks that have been output. This is used to determine if
187 a thunk can use an IA-relative branch to reach its target function. */
188 static int last_address;
190 /* Variables to handle plabels that we discover are necessary at assembly
191 output time. They are output after the current function. */
192 struct deferred_plabel GTY(())
197 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
199 static size_t n_deferred_plabels = 0;
202 /* Initialize the GCC target structure. */
204 #undef TARGET_ASM_ALIGNED_HI_OP
205 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
206 #undef TARGET_ASM_ALIGNED_SI_OP
207 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
208 #undef TARGET_ASM_ALIGNED_DI_OP
209 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
210 #undef TARGET_ASM_UNALIGNED_HI_OP
211 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
212 #undef TARGET_ASM_UNALIGNED_SI_OP
213 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
214 #undef TARGET_ASM_UNALIGNED_DI_OP
215 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
216 #undef TARGET_ASM_INTEGER
217 #define TARGET_ASM_INTEGER pa_assemble_integer
219 #undef TARGET_ASM_FUNCTION_PROLOGUE
220 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
221 #undef TARGET_ASM_FUNCTION_EPILOGUE
222 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
224 #undef TARGET_SCHED_ADJUST_COST
225 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
226 #undef TARGET_SCHED_ADJUST_PRIORITY
227 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
228 #undef TARGET_SCHED_ISSUE_RATE
229 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
231 #undef TARGET_ENCODE_SECTION_INFO
232 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
233 #undef TARGET_STRIP_NAME_ENCODING
234 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
236 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
237 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
239 #undef TARGET_COMMUTATIVE_P
240 #define TARGET_COMMUTATIVE_P pa_commutative_p
242 #undef TARGET_ASM_OUTPUT_MI_THUNK
243 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
244 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
245 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
247 #undef TARGET_ASM_FILE_END
248 #ifdef ASM_OUTPUT_EXTERNAL_REAL
249 #define TARGET_ASM_FILE_END pa_hpux_file_end
251 #define TARGET_ASM_FILE_END output_deferred_plabels
254 #if !defined(USE_COLLECT2)
255 #undef TARGET_ASM_CONSTRUCTOR
256 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
257 #undef TARGET_ASM_DESTRUCTOR
258 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
261 #undef TARGET_DEFAULT_TARGET_FLAGS
262 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
263 #undef TARGET_HANDLE_OPTION
264 #define TARGET_HANDLE_OPTION pa_handle_option
266 #undef TARGET_INIT_BUILTINS
267 #define TARGET_INIT_BUILTINS pa_init_builtins
269 #undef TARGET_RTX_COSTS
270 #define TARGET_RTX_COSTS hppa_rtx_costs
271 #undef TARGET_ADDRESS_COST
272 #define TARGET_ADDRESS_COST hppa_address_cost
274 #undef TARGET_MACHINE_DEPENDENT_REORG
275 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
277 #ifdef HPUX_LONG_DOUBLE_LIBRARY
278 #undef TARGET_INIT_LIBFUNCS
279 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
282 #undef TARGET_PROMOTE_FUNCTION_RETURN
283 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
284 #undef TARGET_PROMOTE_PROTOTYPES
285 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
287 #undef TARGET_STRUCT_VALUE_RTX
288 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
289 #undef TARGET_RETURN_IN_MEMORY
290 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
291 #undef TARGET_MUST_PASS_IN_STACK
292 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
293 #undef TARGET_PASS_BY_REFERENCE
294 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
295 #undef TARGET_CALLEE_COPIES
296 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
297 #undef TARGET_ARG_PARTIAL_BYTES
298 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
300 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
301 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
302 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
303 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
305 #undef TARGET_SCALAR_MODE_SUPPORTED_P
306 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
308 #undef TARGET_CANNOT_FORCE_CONST_MEM
309 #define TARGET_CANNOT_FORCE_CONST_MEM pa_tls_referenced_p
311 #undef TARGET_SECONDARY_RELOAD
312 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
314 struct gcc_target targetm = TARGET_INITIALIZER;
316 /* Parse the -mfixed-range= option string. */
319 fix_range (const char *const_str)
322 char *str, *dash, *comma;
324 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
325 REG2 are either register names or register numbers. The effect
326 of this option is to mark the registers in the range from REG1 to
327 REG2 as ``fixed'' so they won't be used by the compiler. This is
328 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
330 i = strlen (const_str);
331 str = (char *) alloca (i + 1);
332 memcpy (str, const_str, i + 1);
336 dash = strchr (str, '-');
339 warning (0, "value of -mfixed-range must have form REG1-REG2");
344 comma = strchr (dash + 1, ',');
348 first = decode_reg_name (str);
351 warning (0, "unknown register name: %s", str);
355 last = decode_reg_name (dash + 1);
358 warning (0, "unknown register name: %s", dash + 1);
366 warning (0, "%s-%s is an empty range", str, dash + 1);
370 for (i = first; i <= last; ++i)
371 fixed_regs[i] = call_used_regs[i] = 1;
380 /* Check if all floating point registers have been fixed. */
381 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
386 target_flags |= MASK_DISABLE_FPREGS;
389 /* Implement TARGET_HANDLE_OPTION. */
392 pa_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
397 case OPT_mpa_risc_1_0:
399 target_flags &= ~(MASK_PA_11 | MASK_PA_20);
403 case OPT_mpa_risc_1_1:
405 target_flags &= ~MASK_PA_20;
406 target_flags |= MASK_PA_11;
409 case OPT_mpa_risc_2_0:
411 target_flags |= MASK_PA_11 | MASK_PA_20;
415 if (strcmp (arg, "8000") == 0)
416 pa_cpu = PROCESSOR_8000;
417 else if (strcmp (arg, "7100") == 0)
418 pa_cpu = PROCESSOR_7100;
419 else if (strcmp (arg, "700") == 0)
420 pa_cpu = PROCESSOR_700;
421 else if (strcmp (arg, "7100LC") == 0)
422 pa_cpu = PROCESSOR_7100LC;
423 else if (strcmp (arg, "7200") == 0)
424 pa_cpu = PROCESSOR_7200;
425 else if (strcmp (arg, "7300") == 0)
426 pa_cpu = PROCESSOR_7300;
431 case OPT_mfixed_range_:
441 #if TARGET_HPUX_10_10
447 #if TARGET_HPUX_11_11
459 override_options (void)
461 /* Unconditional branches in the delay slot are not compatible with dwarf2
462 call frame information. There is no benefit in using this optimization
463 on PA8000 and later processors. */
464 if (pa_cpu >= PROCESSOR_8000
465 || (! USING_SJLJ_EXCEPTIONS && flag_exceptions)
466 || flag_unwind_tables)
467 target_flags &= ~MASK_JUMP_IN_DELAY;
469 if (flag_pic && TARGET_PORTABLE_RUNTIME)
471 warning (0, "PIC code generation is not supported in the portable runtime model");
474 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
476 warning (0, "PIC code generation is not compatible with fast indirect calls");
479 if (! TARGET_GAS && write_symbols != NO_DEBUG)
481 warning (0, "-g is only supported when using GAS on this processor,");
482 warning (0, "-g option disabled");
483 write_symbols = NO_DEBUG;
486 /* We only support the "big PIC" model now. And we always generate PIC
487 code when in 64bit mode. */
488 if (flag_pic == 1 || TARGET_64BIT)
491 /* We can't guarantee that .dword is available for 32-bit targets. */
492 if (UNITS_PER_WORD == 4)
493 targetm.asm_out.aligned_op.di = NULL;
495 /* The unaligned ops are only available when using GAS. */
498 targetm.asm_out.unaligned_op.hi = NULL;
499 targetm.asm_out.unaligned_op.si = NULL;
500 targetm.asm_out.unaligned_op.di = NULL;
503 init_machine_status = pa_init_machine_status;
507 pa_init_builtins (void)
509 #ifdef DONT_HAVE_FPUTC_UNLOCKED
510 built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] =
511 built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
512 implicit_built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED]
513 = implicit_built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
516 if (built_in_decls [BUILT_IN_FINITE])
517 set_user_assembler_name (built_in_decls [BUILT_IN_FINITE], "_Isfinite");
518 if (built_in_decls [BUILT_IN_FINITEF])
519 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF], "_Isfinitef");
523 /* Function to init struct machine_function.
524 This will be called, via a pointer variable,
525 from push_function_context. */
527 static struct machine_function *
528 pa_init_machine_status (void)
530 return ggc_alloc_cleared (sizeof (machine_function));
533 /* If FROM is a probable pointer register, mark TO as a probable
534 pointer register with the same pointer alignment as FROM. */
537 copy_reg_pointer (rtx to, rtx from)
539 if (REG_POINTER (from))
540 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
543 /* Return 1 if X contains a symbolic expression. We know these
544 expressions will have one of a few well defined forms, so
545 we need only check those forms. */
547 symbolic_expression_p (rtx x)
550 /* Strip off any HIGH. */
551 if (GET_CODE (x) == HIGH)
554 return (symbolic_operand (x, VOIDmode));
557 /* Accept any constant that can be moved in one instruction into a
560 cint_ok_for_move (HOST_WIDE_INT intval)
562 /* OK if ldo, ldil, or zdepi, can be used. */
563 return (CONST_OK_FOR_LETTER_P (intval, 'J')
564 || CONST_OK_FOR_LETTER_P (intval, 'N')
565 || CONST_OK_FOR_LETTER_P (intval, 'K'));
568 /* Return truth value of whether OP can be used as an operand in a
571 adddi3_operand (rtx op, enum machine_mode mode)
573 return (register_operand (op, mode)
574 || (GET_CODE (op) == CONST_INT
575 && (TARGET_64BIT ? INT_14_BITS (op) : INT_11_BITS (op))));
578 /* True iff zdepi can be used to generate this CONST_INT.
579 zdepi first sign extends a 5 bit signed number to a given field
580 length, then places this field anywhere in a zero. */
582 zdepi_cint_p (unsigned HOST_WIDE_INT x)
584 unsigned HOST_WIDE_INT lsb_mask, t;
586 /* This might not be obvious, but it's at least fast.
587 This function is critical; we don't have the time loops would take. */
589 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
590 /* Return true iff t is a power of two. */
591 return ((t & (t - 1)) == 0);
594 /* True iff depi or extru can be used to compute (reg & mask).
595 Accept bit pattern like these:
600 and_mask_p (unsigned HOST_WIDE_INT mask)
603 mask += mask & -mask;
604 return (mask & (mask - 1)) == 0;
607 /* True iff depi can be used to compute (reg | MASK). */
609 ior_mask_p (unsigned HOST_WIDE_INT mask)
611 mask += mask & -mask;
612 return (mask & (mask - 1)) == 0;
615 /* Legitimize PIC addresses. If the address is already
616 position-independent, we return ORIG. Newly generated
617 position-independent addresses go to REG. If we need more
618 than one register, we lose. */
621 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
625 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
627 /* Labels need special handling. */
628 if (pic_label_operand (orig, mode))
630 /* We do not want to go through the movXX expanders here since that
631 would create recursion.
633 Nor do we really want to call a generator for a named pattern
634 since that requires multiple patterns if we want to support
637 So instead we just emit the raw set, which avoids the movXX
638 expanders completely. */
639 mark_reg_pointer (reg, BITS_PER_UNIT);
640 emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
641 current_function_uses_pic_offset_table = 1;
644 if (GET_CODE (orig) == SYMBOL_REF)
650 /* Before reload, allocate a temporary register for the intermediate
651 result. This allows the sequence to be deleted when the final
652 result is unused and the insns are trivially dead. */
653 tmp_reg = ((reload_in_progress || reload_completed)
654 ? reg : gen_reg_rtx (Pmode));
656 emit_move_insn (tmp_reg,
657 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
658 gen_rtx_HIGH (word_mode, orig)));
660 = gen_const_mem (Pmode,
661 gen_rtx_LO_SUM (Pmode, tmp_reg,
662 gen_rtx_UNSPEC (Pmode,
666 current_function_uses_pic_offset_table = 1;
667 mark_reg_pointer (reg, BITS_PER_UNIT);
668 insn = emit_move_insn (reg, pic_ref);
670 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
671 set_unique_reg_note (insn, REG_EQUAL, orig);
675 else if (GET_CODE (orig) == CONST)
679 if (GET_CODE (XEXP (orig, 0)) == PLUS
680 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
684 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
686 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
687 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
688 base == reg ? 0 : reg);
690 if (GET_CODE (orig) == CONST_INT)
692 if (INT_14_BITS (orig))
693 return plus_constant (base, INTVAL (orig));
694 orig = force_reg (Pmode, orig);
696 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
697 /* Likewise, should we set special REG_NOTEs here? */
703 static GTY(()) rtx gen_tls_tga;
706 gen_tls_get_addr (void)
709 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
714 hppa_tls_call (rtx arg)
718 ret = gen_reg_rtx (Pmode);
719 emit_library_call_value (gen_tls_get_addr (), ret,
720 LCT_CONST, Pmode, 1, arg, Pmode);
726 legitimize_tls_address (rtx addr)
728 rtx ret, insn, tmp, t1, t2, tp;
729 enum tls_model model = SYMBOL_REF_TLS_MODEL (addr);
733 case TLS_MODEL_GLOBAL_DYNAMIC:
734 tmp = gen_reg_rtx (Pmode);
735 emit_insn (gen_tgd_load (tmp, addr));
736 ret = hppa_tls_call (tmp);
739 case TLS_MODEL_LOCAL_DYNAMIC:
740 ret = gen_reg_rtx (Pmode);
741 tmp = gen_reg_rtx (Pmode);
743 emit_insn (gen_tld_load (tmp, addr));
744 t1 = hppa_tls_call (tmp);
747 t2 = gen_reg_rtx (Pmode);
748 emit_libcall_block (insn, t2, t1,
749 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
751 emit_insn (gen_tld_offset_load (ret, addr, t2));
754 case TLS_MODEL_INITIAL_EXEC:
755 tp = gen_reg_rtx (Pmode);
756 tmp = gen_reg_rtx (Pmode);
757 ret = gen_reg_rtx (Pmode);
758 emit_insn (gen_tp_load (tp));
759 emit_insn (gen_tie_load (tmp, addr));
760 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
763 case TLS_MODEL_LOCAL_EXEC:
764 tp = gen_reg_rtx (Pmode);
765 ret = gen_reg_rtx (Pmode);
766 emit_insn (gen_tp_load (tp));
767 emit_insn (gen_tle_load (ret, addr, tp));
777 /* Try machine-dependent ways of modifying an illegitimate address
778 to be legitimate. If we find one, return the new, valid address.
779 This macro is used in only one place: `memory_address' in explow.c.
781 OLDX is the address as it was before break_out_memory_refs was called.
782 In some cases it is useful to look at this to decide what needs to be done.
784 MODE and WIN are passed so that this macro can use
785 GO_IF_LEGITIMATE_ADDRESS.
787 It is always safe for this macro to do nothing. It exists to recognize
788 opportunities to optimize the output.
790 For the PA, transform:
792 memory(X + <large int>)
796 if (<large int> & mask) >= 16
797 Y = (<large int> & ~mask) + mask + 1 Round up.
799 Y = (<large int> & ~mask) Round down.
801 memory (Z + (<large int> - Y));
803 This is for CSE to find several similar references, and only use one Z.
805 X can either be a SYMBOL_REF or REG, but because combine cannot
806 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
807 D will not fit in 14 bits.
809 MODE_FLOAT references allow displacements which fit in 5 bits, so use
812 MODE_INT references allow displacements which fit in 14 bits, so use
815 This relies on the fact that most mode MODE_FLOAT references will use FP
816 registers and most mode MODE_INT references will use integer registers.
817 (In the rare case of an FP register used in an integer MODE, we depend
818 on secondary reloads to clean things up.)
821 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
822 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
823 addressing modes to be used).
825 Put X and Z into registers. Then put the entire expression into
829 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
830 enum machine_mode mode)
834 /* We need to canonicalize the order of operands in unscaled indexed
835 addresses since the code that checks if an address is valid doesn't
836 always try both orders. */
837 if (!TARGET_NO_SPACE_REGS
838 && GET_CODE (x) == PLUS
839 && GET_MODE (x) == Pmode
840 && REG_P (XEXP (x, 0))
841 && REG_P (XEXP (x, 1))
842 && REG_POINTER (XEXP (x, 0))
843 && !REG_POINTER (XEXP (x, 1)))
844 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
846 if (PA_SYMBOL_REF_TLS_P (x))
847 return legitimize_tls_address (x);
849 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
851 /* Strip off CONST. */
852 if (GET_CODE (x) == CONST)
855 /* Special case. Get the SYMBOL_REF into a register and use indexing.
856 That should always be safe. */
857 if (GET_CODE (x) == PLUS
858 && GET_CODE (XEXP (x, 0)) == REG
859 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
861 rtx reg = force_reg (Pmode, XEXP (x, 1));
862 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
865 /* Note we must reject symbols which represent function addresses
866 since the assembler/linker can't handle arithmetic on plabels. */
867 if (GET_CODE (x) == PLUS
868 && GET_CODE (XEXP (x, 1)) == CONST_INT
869 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
870 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
871 || GET_CODE (XEXP (x, 0)) == REG))
873 rtx int_part, ptr_reg;
875 int offset = INTVAL (XEXP (x, 1));
878 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
879 ? (TARGET_PA_20 ? 0x3fff : 0x1f) : 0x3fff);
881 /* Choose which way to round the offset. Round up if we
882 are >= halfway to the next boundary. */
883 if ((offset & mask) >= ((mask + 1) / 2))
884 newoffset = (offset & ~ mask) + mask + 1;
886 newoffset = (offset & ~ mask);
888 /* If the newoffset will not fit in 14 bits (ldo), then
889 handling this would take 4 or 5 instructions (2 to load
890 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
891 add the new offset and the SYMBOL_REF.) Combine can
892 not handle 4->2 or 5->2 combinations, so do not create
894 if (! VAL_14_BITS_P (newoffset)
895 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
897 rtx const_part = plus_constant (XEXP (x, 0), newoffset);
900 gen_rtx_HIGH (Pmode, const_part));
903 gen_rtx_LO_SUM (Pmode,
904 tmp_reg, const_part));
908 if (! VAL_14_BITS_P (newoffset))
909 int_part = force_reg (Pmode, GEN_INT (newoffset));
911 int_part = GEN_INT (newoffset);
913 ptr_reg = force_reg (Pmode,
915 force_reg (Pmode, XEXP (x, 0)),
918 return plus_constant (ptr_reg, offset - newoffset);
921 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
923 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
924 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
925 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
926 && (OBJECT_P (XEXP (x, 1))
927 || GET_CODE (XEXP (x, 1)) == SUBREG)
928 && GET_CODE (XEXP (x, 1)) != CONST)
930 int val = INTVAL (XEXP (XEXP (x, 0), 1));
934 if (GET_CODE (reg1) != REG)
935 reg1 = force_reg (Pmode, force_operand (reg1, 0));
937 reg2 = XEXP (XEXP (x, 0), 0);
938 if (GET_CODE (reg2) != REG)
939 reg2 = force_reg (Pmode, force_operand (reg2, 0));
941 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
948 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
950 Only do so for floating point modes since this is more speculative
951 and we lose if it's an integer store. */
952 if (GET_CODE (x) == PLUS
953 && GET_CODE (XEXP (x, 0)) == PLUS
954 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
955 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
956 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
957 && (mode == SFmode || mode == DFmode))
960 /* First, try and figure out what to use as a base register. */
961 rtx reg1, reg2, base, idx, orig_base;
963 reg1 = XEXP (XEXP (x, 0), 1);
968 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
969 then emit_move_sequence will turn on REG_POINTER so we'll know
970 it's a base register below. */
971 if (GET_CODE (reg1) != REG)
972 reg1 = force_reg (Pmode, force_operand (reg1, 0));
974 if (GET_CODE (reg2) != REG)
975 reg2 = force_reg (Pmode, force_operand (reg2, 0));
977 /* Figure out what the base and index are. */
979 if (GET_CODE (reg1) == REG
980 && REG_POINTER (reg1))
983 orig_base = XEXP (XEXP (x, 0), 1);
984 idx = gen_rtx_PLUS (Pmode,
986 XEXP (XEXP (XEXP (x, 0), 0), 0),
987 XEXP (XEXP (XEXP (x, 0), 0), 1)),
990 else if (GET_CODE (reg2) == REG
991 && REG_POINTER (reg2))
994 orig_base = XEXP (x, 1);
1001 /* If the index adds a large constant, try to scale the
1002 constant so that it can be loaded with only one insn. */
1003 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1004 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1005 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1006 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1008 /* Divide the CONST_INT by the scale factor, then add it to A. */
1009 int val = INTVAL (XEXP (idx, 1));
1011 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1012 reg1 = XEXP (XEXP (idx, 0), 0);
1013 if (GET_CODE (reg1) != REG)
1014 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1016 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1018 /* We can now generate a simple scaled indexed address. */
1021 (Pmode, gen_rtx_PLUS (Pmode,
1022 gen_rtx_MULT (Pmode, reg1,
1023 XEXP (XEXP (idx, 0), 1)),
1027 /* If B + C is still a valid base register, then add them. */
1028 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1029 && INTVAL (XEXP (idx, 1)) <= 4096
1030 && INTVAL (XEXP (idx, 1)) >= -4096)
1032 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1035 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1037 reg2 = XEXP (XEXP (idx, 0), 0);
1038 if (GET_CODE (reg2) != CONST_INT)
1039 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1041 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1042 gen_rtx_MULT (Pmode,
1048 /* Get the index into a register, then add the base + index and
1049 return a register holding the result. */
1051 /* First get A into a register. */
1052 reg1 = XEXP (XEXP (idx, 0), 0);
1053 if (GET_CODE (reg1) != REG)
1054 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1056 /* And get B into a register. */
1057 reg2 = XEXP (idx, 1);
1058 if (GET_CODE (reg2) != REG)
1059 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1061 reg1 = force_reg (Pmode,
1062 gen_rtx_PLUS (Pmode,
1063 gen_rtx_MULT (Pmode, reg1,
1064 XEXP (XEXP (idx, 0), 1)),
1067 /* Add the result to our base register and return. */
1068 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1072 /* Uh-oh. We might have an address for x[n-100000]. This needs
1073 special handling to avoid creating an indexed memory address
1074 with x-100000 as the base.
1076 If the constant part is small enough, then it's still safe because
1077 there is a guard page at the beginning and end of the data segment.
1079 Scaled references are common enough that we want to try and rearrange the
1080 terms so that we can use indexing for these addresses too. Only
1081 do the optimization for floatint point modes. */
1083 if (GET_CODE (x) == PLUS
1084 && symbolic_expression_p (XEXP (x, 1)))
1086 /* Ugly. We modify things here so that the address offset specified
1087 by the index expression is computed first, then added to x to form
1088 the entire address. */
1090 rtx regx1, regx2, regy1, regy2, y;
1092 /* Strip off any CONST. */
1094 if (GET_CODE (y) == CONST)
1097 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1099 /* See if this looks like
1100 (plus (mult (reg) (shadd_const))
1101 (const (plus (symbol_ref) (const_int))))
1103 Where const_int is small. In that case the const
1104 expression is a valid pointer for indexing.
1106 If const_int is big, but can be divided evenly by shadd_const
1107 and added to (reg). This allows more scaled indexed addresses. */
1108 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1109 && GET_CODE (XEXP (x, 0)) == MULT
1110 && GET_CODE (XEXP (y, 1)) == CONST_INT
1111 && INTVAL (XEXP (y, 1)) >= -4096
1112 && INTVAL (XEXP (y, 1)) <= 4095
1113 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1114 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1116 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1120 if (GET_CODE (reg1) != REG)
1121 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1123 reg2 = XEXP (XEXP (x, 0), 0);
1124 if (GET_CODE (reg2) != REG)
1125 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1127 return force_reg (Pmode,
1128 gen_rtx_PLUS (Pmode,
1129 gen_rtx_MULT (Pmode,
1134 else if ((mode == DFmode || mode == SFmode)
1135 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1136 && GET_CODE (XEXP (x, 0)) == MULT
1137 && GET_CODE (XEXP (y, 1)) == CONST_INT
1138 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1139 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1140 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1143 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1144 / INTVAL (XEXP (XEXP (x, 0), 1))));
1145 regx2 = XEXP (XEXP (x, 0), 0);
1146 if (GET_CODE (regx2) != REG)
1147 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1148 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1152 gen_rtx_PLUS (Pmode,
1153 gen_rtx_MULT (Pmode, regx2,
1154 XEXP (XEXP (x, 0), 1)),
1155 force_reg (Pmode, XEXP (y, 0))));
1157 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1158 && INTVAL (XEXP (y, 1)) >= -4096
1159 && INTVAL (XEXP (y, 1)) <= 4095)
1161 /* This is safe because of the guard page at the
1162 beginning and end of the data space. Just
1163 return the original address. */
1168 /* Doesn't look like one we can optimize. */
1169 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1170 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1171 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1172 regx1 = force_reg (Pmode,
1173 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1175 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1183 /* For the HPPA, REG and REG+CONST is cost 0
1184 and addresses involving symbolic constants are cost 2.
1186 PIC addresses are very expensive.
1188 It is no coincidence that this has the same structure
1189 as GO_IF_LEGITIMATE_ADDRESS. */
1192 hppa_address_cost (rtx X)
1194 switch (GET_CODE (X))
1207 /* Compute a (partial) cost for rtx X. Return true if the complete
1208 cost has been computed, and false if subexpressions should be
1209 scanned. In either case, *TOTAL contains the cost result. */
1212 hppa_rtx_costs (rtx x, int code, int outer_code, int *total)
1217 if (INTVAL (x) == 0)
1219 else if (INT_14_BITS (x))
1236 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1237 && outer_code != SET)
1244 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1245 *total = COSTS_N_INSNS (3);
1246 else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1247 *total = COSTS_N_INSNS (8);
1249 *total = COSTS_N_INSNS (20);
1253 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1255 *total = COSTS_N_INSNS (14);
1263 *total = COSTS_N_INSNS (60);
1266 case PLUS: /* this includes shNadd insns */
1268 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1269 *total = COSTS_N_INSNS (3);
1271 *total = COSTS_N_INSNS (1);
1277 *total = COSTS_N_INSNS (1);
1285 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1286 new rtx with the correct mode. */
1288 force_mode (enum machine_mode mode, rtx orig)
1290 if (mode == GET_MODE (orig))
1293 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1295 return gen_rtx_REG (mode, REGNO (orig));
1298 /* Return 1 if *X is a thread-local symbol. */
1301 pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1303 return PA_SYMBOL_REF_TLS_P (*x);
1306 /* Return 1 if X contains a thread-local symbol. */
1309 pa_tls_referenced_p (rtx x)
1311 if (!TARGET_HAVE_TLS)
1314 return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0);
1317 /* Emit insns to move operands[1] into operands[0].
1319 Return 1 if we have written out everything that needs to be done to
1320 do the move. Otherwise, return 0 and the caller will emit the move
1323 Note SCRATCH_REG may not be in the proper mode depending on how it
1324 will be used. This routine is responsible for creating a new copy
1325 of SCRATCH_REG in the proper mode. */
1328 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1330 register rtx operand0 = operands[0];
1331 register rtx operand1 = operands[1];
1334 /* We can only handle indexed addresses in the destination operand
1335 of floating point stores. Thus, we need to break out indexed
1336 addresses from the destination operand. */
1337 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1339 /* This is only safe up to the beginning of life analysis. */
1340 gcc_assert (!no_new_pseudos);
1342 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1343 operand0 = replace_equiv_address (operand0, tem);
1346 /* On targets with non-equivalent space registers, break out unscaled
1347 indexed addresses from the source operand before the final CSE.
1348 We have to do this because the REG_POINTER flag is not correctly
1349 carried through various optimization passes and CSE may substitute
1350 a pseudo without the pointer set for one with the pointer set. As
1351 a result, we loose various opportunities to create insns with
1352 unscaled indexed addresses. */
1353 if (!TARGET_NO_SPACE_REGS
1354 && !cse_not_expected
1355 && GET_CODE (operand1) == MEM
1356 && GET_CODE (XEXP (operand1, 0)) == PLUS
1357 && REG_P (XEXP (XEXP (operand1, 0), 0))
1358 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1360 = replace_equiv_address (operand1,
1361 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1364 && reload_in_progress && GET_CODE (operand0) == REG
1365 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1366 operand0 = reg_equiv_mem[REGNO (operand0)];
1367 else if (scratch_reg
1368 && reload_in_progress && GET_CODE (operand0) == SUBREG
1369 && GET_CODE (SUBREG_REG (operand0)) == REG
1370 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1372 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1373 the code which tracks sets/uses for delete_output_reload. */
1374 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1375 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
1376 SUBREG_BYTE (operand0));
1377 operand0 = alter_subreg (&temp);
1381 && reload_in_progress && GET_CODE (operand1) == REG
1382 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1383 operand1 = reg_equiv_mem[REGNO (operand1)];
1384 else if (scratch_reg
1385 && reload_in_progress && GET_CODE (operand1) == SUBREG
1386 && GET_CODE (SUBREG_REG (operand1)) == REG
1387 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1389 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1390 the code which tracks sets/uses for delete_output_reload. */
1391 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1392 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
1393 SUBREG_BYTE (operand1));
1394 operand1 = alter_subreg (&temp);
1397 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1398 && ((tem = find_replacement (&XEXP (operand0, 0)))
1399 != XEXP (operand0, 0)))
1400 operand0 = replace_equiv_address (operand0, tem);
1402 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1403 && ((tem = find_replacement (&XEXP (operand1, 0)))
1404 != XEXP (operand1, 0)))
1405 operand1 = replace_equiv_address (operand1, tem);
1407 /* Handle secondary reloads for loads/stores of FP registers from
1408 REG+D addresses where D does not fit in 5 or 14 bits, including
1409 (subreg (mem (addr))) cases. */
1411 && fp_reg_operand (operand0, mode)
1412 && ((GET_CODE (operand1) == MEM
1413 && !memory_address_p ((GET_MODE_SIZE (mode) == 4 ? SFmode : DFmode),
1414 XEXP (operand1, 0)))
1415 || ((GET_CODE (operand1) == SUBREG
1416 && GET_CODE (XEXP (operand1, 0)) == MEM
1417 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1419 XEXP (XEXP (operand1, 0), 0))))))
1421 if (GET_CODE (operand1) == SUBREG)
1422 operand1 = XEXP (operand1, 0);
1424 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1425 it in WORD_MODE regardless of what mode it was originally given
1427 scratch_reg = force_mode (word_mode, scratch_reg);
1429 /* D might not fit in 14 bits either; for such cases load D into
1431 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
1433 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1434 emit_move_insn (scratch_reg,
1435 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1437 XEXP (XEXP (operand1, 0), 0),
1441 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1442 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1443 replace_equiv_address (operand1, scratch_reg)));
1446 else if (scratch_reg
1447 && fp_reg_operand (operand1, mode)
1448 && ((GET_CODE (operand0) == MEM
1449 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1451 XEXP (operand0, 0)))
1452 || ((GET_CODE (operand0) == SUBREG)
1453 && GET_CODE (XEXP (operand0, 0)) == MEM
1454 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1456 XEXP (XEXP (operand0, 0), 0)))))
1458 if (GET_CODE (operand0) == SUBREG)
1459 operand0 = XEXP (operand0, 0);
1461 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1462 it in WORD_MODE regardless of what mode it was originally given
1464 scratch_reg = force_mode (word_mode, scratch_reg);
1466 /* D might not fit in 14 bits either; for such cases load D into
1468 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
1470 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1471 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1474 XEXP (XEXP (operand0, 0),
1479 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1480 emit_insn (gen_rtx_SET (VOIDmode,
1481 replace_equiv_address (operand0, scratch_reg),
1485 /* Handle secondary reloads for loads of FP registers from constant
1486 expressions by forcing the constant into memory.
1488 Use scratch_reg to hold the address of the memory location.
1490 The proper fix is to change PREFERRED_RELOAD_CLASS to return
1491 NO_REGS when presented with a const_int and a register class
1492 containing only FP registers. Doing so unfortunately creates
1493 more problems than it solves. Fix this for 2.5. */
1494 else if (scratch_reg
1495 && CONSTANT_P (operand1)
1496 && fp_reg_operand (operand0, mode))
1498 rtx const_mem, xoperands[2];
1500 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1501 it in WORD_MODE regardless of what mode it was originally given
1503 scratch_reg = force_mode (word_mode, scratch_reg);
1505 /* Force the constant into memory and put the address of the
1506 memory location into scratch_reg. */
1507 const_mem = force_const_mem (mode, operand1);
1508 xoperands[0] = scratch_reg;
1509 xoperands[1] = XEXP (const_mem, 0);
1510 emit_move_sequence (xoperands, Pmode, 0);
1512 /* Now load the destination register. */
1513 emit_insn (gen_rtx_SET (mode, operand0,
1514 replace_equiv_address (const_mem, scratch_reg)));
1517 /* Handle secondary reloads for SAR. These occur when trying to load
1518 the SAR from memory, FP register, or with a constant. */
1519 else if (scratch_reg
1520 && GET_CODE (operand0) == REG
1521 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1522 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1523 && (GET_CODE (operand1) == MEM
1524 || GET_CODE (operand1) == CONST_INT
1525 || (GET_CODE (operand1) == REG
1526 && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1))))))
1528 /* D might not fit in 14 bits either; for such cases load D into
1530 if (GET_CODE (operand1) == MEM
1531 && !memory_address_p (Pmode, XEXP (operand1, 0)))
1533 /* We are reloading the address into the scratch register, so we
1534 want to make sure the scratch register is a full register. */
1535 scratch_reg = force_mode (word_mode, scratch_reg);
1537 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1538 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1541 XEXP (XEXP (operand1, 0),
1545 /* Now we are going to load the scratch register from memory,
1546 we want to load it in the same width as the original MEM,
1547 which must be the same as the width of the ultimate destination,
1549 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1551 emit_move_insn (scratch_reg,
1552 replace_equiv_address (operand1, scratch_reg));
1556 /* We want to load the scratch register using the same mode as
1557 the ultimate destination. */
1558 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1560 emit_move_insn (scratch_reg, operand1);
1563 /* And emit the insn to set the ultimate destination. We know that
1564 the scratch register has the same mode as the destination at this
1566 emit_move_insn (operand0, scratch_reg);
1569 /* Handle the most common case: storing into a register. */
1570 else if (register_operand (operand0, mode))
1572 if (register_operand (operand1, mode)
1573 || (GET_CODE (operand1) == CONST_INT
1574 && cint_ok_for_move (INTVAL (operand1)))
1575 || (operand1 == CONST0_RTX (mode))
1576 || (GET_CODE (operand1) == HIGH
1577 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1578 /* Only `general_operands' can come here, so MEM is ok. */
1579 || GET_CODE (operand1) == MEM)
1581 /* Various sets are created during RTL generation which don't
1582 have the REG_POINTER flag correctly set. After the CSE pass,
1583 instruction recognition can fail if we don't consistently
1584 set this flag when performing register copies. This should
1585 also improve the opportunities for creating insns that use
1586 unscaled indexing. */
1587 if (REG_P (operand0) && REG_P (operand1))
1589 if (REG_POINTER (operand1)
1590 && !REG_POINTER (operand0)
1591 && !HARD_REGISTER_P (operand0))
1592 copy_reg_pointer (operand0, operand1);
1593 else if (REG_POINTER (operand0)
1594 && !REG_POINTER (operand1)
1595 && !HARD_REGISTER_P (operand1))
1596 copy_reg_pointer (operand1, operand0);
1599 /* When MEMs are broken out, the REG_POINTER flag doesn't
1600 get set. In some cases, we can set the REG_POINTER flag
1601 from the declaration for the MEM. */
1602 if (REG_P (operand0)
1603 && GET_CODE (operand1) == MEM
1604 && !REG_POINTER (operand0))
1606 tree decl = MEM_EXPR (operand1);
1608 /* Set the register pointer flag and register alignment
1609 if the declaration for this memory reference is a
1610 pointer type. Fortran indirect argument references
1613 && !(flag_argument_noalias > 1
1614 && TREE_CODE (decl) == INDIRECT_REF
1615 && TREE_CODE (TREE_OPERAND (decl, 0)) == PARM_DECL))
1619 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1621 if (TREE_CODE (decl) == COMPONENT_REF)
1622 decl = TREE_OPERAND (decl, 1);
1624 type = TREE_TYPE (decl);
1625 if (TREE_CODE (type) == ARRAY_TYPE)
1626 type = get_inner_array_type (type);
1628 if (POINTER_TYPE_P (type))
1632 type = TREE_TYPE (type);
1633 /* Using TYPE_ALIGN_OK is rather conservative as
1634 only the ada frontend actually sets it. */
1635 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1637 mark_reg_pointer (operand0, align);
1642 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1646 else if (GET_CODE (operand0) == MEM)
1648 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1649 && !(reload_in_progress || reload_completed))
1651 rtx temp = gen_reg_rtx (DFmode);
1653 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1654 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1657 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1659 /* Run this case quickly. */
1660 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1663 if (! (reload_in_progress || reload_completed))
1665 operands[0] = validize_mem (operand0);
1666 operands[1] = operand1 = force_reg (mode, operand1);
1670 /* Simplify the source if we need to.
1671 Note we do have to handle function labels here, even though we do
1672 not consider them legitimate constants. Loop optimizations can
1673 call the emit_move_xxx with one as a source. */
1674 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1675 || function_label_operand (operand1, mode)
1676 || (GET_CODE (operand1) == HIGH
1677 && symbolic_operand (XEXP (operand1, 0), mode)))
1681 if (GET_CODE (operand1) == HIGH)
1684 operand1 = XEXP (operand1, 0);
1686 if (symbolic_operand (operand1, mode))
1688 /* Argh. The assembler and linker can't handle arithmetic
1691 So we force the plabel into memory, load operand0 from
1692 the memory location, then add in the constant part. */
1693 if ((GET_CODE (operand1) == CONST
1694 && GET_CODE (XEXP (operand1, 0)) == PLUS
1695 && function_label_operand (XEXP (XEXP (operand1, 0), 0), Pmode))
1696 || function_label_operand (operand1, mode))
1698 rtx temp, const_part;
1700 /* Figure out what (if any) scratch register to use. */
1701 if (reload_in_progress || reload_completed)
1703 scratch_reg = scratch_reg ? scratch_reg : operand0;
1704 /* SCRATCH_REG will hold an address and maybe the actual
1705 data. We want it in WORD_MODE regardless of what mode it
1706 was originally given to us. */
1707 scratch_reg = force_mode (word_mode, scratch_reg);
1710 scratch_reg = gen_reg_rtx (Pmode);
1712 if (GET_CODE (operand1) == CONST)
1714 /* Save away the constant part of the expression. */
1715 const_part = XEXP (XEXP (operand1, 0), 1);
1716 gcc_assert (GET_CODE (const_part) == CONST_INT);
1718 /* Force the function label into memory. */
1719 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1723 /* No constant part. */
1724 const_part = NULL_RTX;
1726 /* Force the function label into memory. */
1727 temp = force_const_mem (mode, operand1);
1731 /* Get the address of the memory location. PIC-ify it if
1733 temp = XEXP (temp, 0);
1735 temp = legitimize_pic_address (temp, mode, scratch_reg);
1737 /* Put the address of the memory location into our destination
1740 emit_move_sequence (operands, mode, scratch_reg);
1742 /* Now load from the memory location into our destination
1744 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1745 emit_move_sequence (operands, mode, scratch_reg);
1747 /* And add back in the constant part. */
1748 if (const_part != NULL_RTX)
1749 expand_inc (operand0, const_part);
1758 if (reload_in_progress || reload_completed)
1760 temp = scratch_reg ? scratch_reg : operand0;
1761 /* TEMP will hold an address and maybe the actual
1762 data. We want it in WORD_MODE regardless of what mode it
1763 was originally given to us. */
1764 temp = force_mode (word_mode, temp);
1767 temp = gen_reg_rtx (Pmode);
1769 /* (const (plus (symbol) (const_int))) must be forced to
1770 memory during/after reload if the const_int will not fit
1772 if (GET_CODE (operand1) == CONST
1773 && GET_CODE (XEXP (operand1, 0)) == PLUS
1774 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
1775 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
1776 && (reload_completed || reload_in_progress)
1779 rtx const_mem = force_const_mem (mode, operand1);
1780 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
1782 operands[1] = replace_equiv_address (const_mem, operands[1]);
1783 emit_move_sequence (operands, mode, temp);
1787 operands[1] = legitimize_pic_address (operand1, mode, temp);
1788 if (REG_P (operand0) && REG_P (operands[1]))
1789 copy_reg_pointer (operand0, operands[1]);
1790 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
1793 /* On the HPPA, references to data space are supposed to use dp,
1794 register 27, but showing it in the RTL inhibits various cse
1795 and loop optimizations. */
1800 if (reload_in_progress || reload_completed)
1802 temp = scratch_reg ? scratch_reg : operand0;
1803 /* TEMP will hold an address and maybe the actual
1804 data. We want it in WORD_MODE regardless of what mode it
1805 was originally given to us. */
1806 temp = force_mode (word_mode, temp);
1809 temp = gen_reg_rtx (mode);
1811 /* Loading a SYMBOL_REF into a register makes that register
1812 safe to be used as the base in an indexed address.
1814 Don't mark hard registers though. That loses. */
1815 if (GET_CODE (operand0) == REG
1816 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1817 mark_reg_pointer (operand0, BITS_PER_UNIT);
1818 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
1819 mark_reg_pointer (temp, BITS_PER_UNIT);
1822 set = gen_rtx_SET (mode, operand0, temp);
1824 set = gen_rtx_SET (VOIDmode,
1826 gen_rtx_LO_SUM (mode, temp, operand1));
1828 emit_insn (gen_rtx_SET (VOIDmode,
1830 gen_rtx_HIGH (mode, operand1)));
1836 else if (pa_tls_referenced_p (operand1))
1841 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
1843 addend = XEXP (XEXP (tmp, 0), 1);
1844 tmp = XEXP (XEXP (tmp, 0), 0);
1847 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
1848 tmp = legitimize_tls_address (tmp);
1851 tmp = gen_rtx_PLUS (mode, tmp, addend);
1852 tmp = force_operand (tmp, operands[0]);
1856 else if (GET_CODE (operand1) != CONST_INT
1857 || !cint_ok_for_move (INTVAL (operand1)))
1861 HOST_WIDE_INT value = 0;
1862 HOST_WIDE_INT insv = 0;
1865 if (GET_CODE (operand1) == CONST_INT)
1866 value = INTVAL (operand1);
1869 && GET_CODE (operand1) == CONST_INT
1870 && HOST_BITS_PER_WIDE_INT > 32
1871 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
1875 /* Extract the low order 32 bits of the value and sign extend.
1876 If the new value is the same as the original value, we can
1877 can use the original value as-is. If the new value is
1878 different, we use it and insert the most-significant 32-bits
1879 of the original value into the final result. */
1880 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
1881 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
1884 #if HOST_BITS_PER_WIDE_INT > 32
1885 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
1889 operand1 = GEN_INT (nval);
1893 if (reload_in_progress || reload_completed)
1894 temp = scratch_reg ? scratch_reg : operand0;
1896 temp = gen_reg_rtx (mode);
1898 /* We don't directly split DImode constants on 32-bit targets
1899 because PLUS uses an 11-bit immediate and the insn sequence
1900 generated is not as efficient as the one using HIGH/LO_SUM. */
1901 if (GET_CODE (operand1) == CONST_INT
1902 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
1903 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1906 /* Directly break constant into high and low parts. This
1907 provides better optimization opportunities because various
1908 passes recognize constants split with PLUS but not LO_SUM.
1909 We use a 14-bit signed low part except when the addition
1910 of 0x4000 to the high part might change the sign of the
1912 HOST_WIDE_INT low = value & 0x3fff;
1913 HOST_WIDE_INT high = value & ~ 0x3fff;
1917 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
1925 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
1926 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1930 emit_insn (gen_rtx_SET (VOIDmode, temp,
1931 gen_rtx_HIGH (mode, operand1)));
1932 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
1935 insn = emit_move_insn (operands[0], operands[1]);
1937 /* Now insert the most significant 32 bits of the value
1938 into the register. When we don't have a second register
1939 available, it could take up to nine instructions to load
1940 a 64-bit integer constant. Prior to reload, we force
1941 constants that would take more than three instructions
1942 to load to the constant pool. During and after reload,
1943 we have to handle all possible values. */
1946 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
1947 register and the value to be inserted is outside the
1948 range that can be loaded with three depdi instructions. */
1949 if (temp != operand0 && (insv >= 16384 || insv < -16384))
1951 operand1 = GEN_INT (insv);
1953 emit_insn (gen_rtx_SET (VOIDmode, temp,
1954 gen_rtx_HIGH (mode, operand1)));
1955 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
1956 emit_insn (gen_insv (operand0, GEN_INT (32),
1961 int len = 5, pos = 27;
1963 /* Insert the bits using the depdi instruction. */
1966 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
1967 HOST_WIDE_INT sign = v5 < 0;
1969 /* Left extend the insertion. */
1970 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
1971 while (pos > 0 && (insv & 1) == sign)
1973 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
1978 emit_insn (gen_insv (operand0, GEN_INT (len),
1979 GEN_INT (pos), GEN_INT (v5)));
1981 len = pos > 0 && pos < 5 ? pos : 5;
1987 set_unique_reg_note (insn, REG_EQUAL, op1);
1992 /* Now have insn-emit do whatever it normally does. */
1996 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
1997 it will need a link/runtime reloc). */
2000 reloc_needed (tree exp)
2004 switch (TREE_CODE (exp))
2011 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2012 reloc |= reloc_needed (TREE_OPERAND (exp, 1));
2017 case NON_LVALUE_EXPR:
2018 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2024 unsigned HOST_WIDE_INT ix;
2026 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2028 reloc |= reloc_needed (value);
2041 /* Does operand (which is a symbolic_operand) live in text space?
2042 If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
2046 read_only_operand (rtx operand, enum machine_mode mode ATTRIBUTE_UNUSED)
2048 if (GET_CODE (operand) == CONST)
2049 operand = XEXP (XEXP (operand, 0), 0);
2052 if (GET_CODE (operand) == SYMBOL_REF)
2053 return SYMBOL_REF_FLAG (operand) && !CONSTANT_POOL_ADDRESS_P (operand);
2057 if (GET_CODE (operand) == SYMBOL_REF)
2058 return SYMBOL_REF_FLAG (operand) || CONSTANT_POOL_ADDRESS_P (operand);
2064 /* Return the best assembler insn template
2065 for moving operands[1] into operands[0] as a fullword. */
2067 singlemove_string (rtx *operands)
2069 HOST_WIDE_INT intval;
2071 if (GET_CODE (operands[0]) == MEM)
2072 return "stw %r1,%0";
2073 if (GET_CODE (operands[1]) == MEM)
2075 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2080 gcc_assert (GET_MODE (operands[1]) == SFmode);
2082 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2084 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2085 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2087 operands[1] = GEN_INT (i);
2088 /* Fall through to CONST_INT case. */
2090 if (GET_CODE (operands[1]) == CONST_INT)
2092 intval = INTVAL (operands[1]);
2094 if (VAL_14_BITS_P (intval))
2096 else if ((intval & 0x7ff) == 0)
2097 return "ldil L'%1,%0";
2098 else if (zdepi_cint_p (intval))
2099 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2101 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2103 return "copy %1,%0";
2107 /* Compute position (in OP[1]) and width (in OP[2])
2108 useful for copying IMM to a register using the zdepi
2109 instructions. Store the immediate value to insert in OP[0]. */
2111 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2115 /* Find the least significant set bit in IMM. */
2116 for (lsb = 0; lsb < 32; lsb++)
2123 /* Choose variants based on *sign* of the 5-bit field. */
2124 if ((imm & 0x10) == 0)
2125 len = (lsb <= 28) ? 4 : 32 - lsb;
2128 /* Find the width of the bitstring in IMM. */
2129 for (len = 5; len < 32; len++)
2131 if ((imm & (1 << len)) == 0)
2135 /* Sign extend IMM as a 5-bit value. */
2136 imm = (imm & 0xf) - 0x10;
2144 /* Compute position (in OP[1]) and width (in OP[2])
2145 useful for copying IMM to a register using the depdi,z
2146 instructions. Store the immediate value to insert in OP[0]. */
2148 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2150 HOST_WIDE_INT lsb, len;
2152 /* Find the least significant set bit in IMM. */
2153 for (lsb = 0; lsb < HOST_BITS_PER_WIDE_INT; lsb++)
2160 /* Choose variants based on *sign* of the 5-bit field. */
2161 if ((imm & 0x10) == 0)
2162 len = ((lsb <= HOST_BITS_PER_WIDE_INT - 4)
2163 ? 4 : HOST_BITS_PER_WIDE_INT - lsb);
2166 /* Find the width of the bitstring in IMM. */
2167 for (len = 5; len < HOST_BITS_PER_WIDE_INT; len++)
2169 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2173 /* Sign extend IMM as a 5-bit value. */
2174 imm = (imm & 0xf) - 0x10;
2182 /* Output assembler code to perform a doubleword move insn
2183 with operands OPERANDS. */
2186 output_move_double (rtx *operands)
2188 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2190 rtx addreg0 = 0, addreg1 = 0;
2192 /* First classify both operands. */
2194 if (REG_P (operands[0]))
2196 else if (offsettable_memref_p (operands[0]))
2198 else if (GET_CODE (operands[0]) == MEM)
2203 if (REG_P (operands[1]))
2205 else if (CONSTANT_P (operands[1]))
2207 else if (offsettable_memref_p (operands[1]))
2209 else if (GET_CODE (operands[1]) == MEM)
2214 /* Check for the cases that the operand constraints are not
2215 supposed to allow to happen. */
2216 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2218 /* Handle copies between general and floating registers. */
2220 if (optype0 == REGOP && optype1 == REGOP
2221 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2223 if (FP_REG_P (operands[0]))
2225 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2226 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2227 return "{fldds|fldd} -16(%%sp),%0";
2231 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2232 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2233 return "{ldws|ldw} -12(%%sp),%R0";
2237 /* Handle auto decrementing and incrementing loads and stores
2238 specifically, since the structure of the function doesn't work
2239 for them without major modification. Do it better when we learn
2240 this port about the general inc/dec addressing of PA.
2241 (This was written by tege. Chide him if it doesn't work.) */
2243 if (optype0 == MEMOP)
2245 /* We have to output the address syntax ourselves, since print_operand
2246 doesn't deal with the addresses we want to use. Fix this later. */
2248 rtx addr = XEXP (operands[0], 0);
2249 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2251 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2253 operands[0] = XEXP (addr, 0);
2254 gcc_assert (GET_CODE (operands[1]) == REG
2255 && GET_CODE (operands[0]) == REG);
2257 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2259 /* No overlap between high target register and address
2260 register. (We do this in a non-obvious way to
2261 save a register file writeback) */
2262 if (GET_CODE (addr) == POST_INC)
2263 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2264 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2266 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2268 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2270 operands[0] = XEXP (addr, 0);
2271 gcc_assert (GET_CODE (operands[1]) == REG
2272 && GET_CODE (operands[0]) == REG);
2274 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2275 /* No overlap between high target register and address
2276 register. (We do this in a non-obvious way to save a
2277 register file writeback) */
2278 if (GET_CODE (addr) == PRE_INC)
2279 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2280 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2283 if (optype1 == MEMOP)
2285 /* We have to output the address syntax ourselves, since print_operand
2286 doesn't deal with the addresses we want to use. Fix this later. */
2288 rtx addr = XEXP (operands[1], 0);
2289 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2291 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2293 operands[1] = XEXP (addr, 0);
2294 gcc_assert (GET_CODE (operands[0]) == REG
2295 && GET_CODE (operands[1]) == REG);
2297 if (!reg_overlap_mentioned_p (high_reg, addr))
2299 /* No overlap between high target register and address
2300 register. (We do this in a non-obvious way to
2301 save a register file writeback) */
2302 if (GET_CODE (addr) == POST_INC)
2303 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2304 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2308 /* This is an undefined situation. We should load into the
2309 address register *and* update that register. Probably
2310 we don't need to handle this at all. */
2311 if (GET_CODE (addr) == POST_INC)
2312 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2313 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2316 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2318 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2320 operands[1] = XEXP (addr, 0);
2321 gcc_assert (GET_CODE (operands[0]) == REG
2322 && GET_CODE (operands[1]) == REG);
2324 if (!reg_overlap_mentioned_p (high_reg, addr))
2326 /* No overlap between high target register and address
2327 register. (We do this in a non-obvious way to
2328 save a register file writeback) */
2329 if (GET_CODE (addr) == PRE_INC)
2330 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2331 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2335 /* This is an undefined situation. We should load into the
2336 address register *and* update that register. Probably
2337 we don't need to handle this at all. */
2338 if (GET_CODE (addr) == PRE_INC)
2339 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2340 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2343 else if (GET_CODE (addr) == PLUS
2344 && GET_CODE (XEXP (addr, 0)) == MULT)
2347 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2349 if (!reg_overlap_mentioned_p (high_reg, addr))
2351 xoperands[0] = high_reg;
2352 xoperands[1] = XEXP (addr, 1);
2353 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2354 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2355 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2357 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2361 xoperands[0] = high_reg;
2362 xoperands[1] = XEXP (addr, 1);
2363 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2364 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2365 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2367 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2372 /* If an operand is an unoffsettable memory ref, find a register
2373 we can increment temporarily to make it refer to the second word. */
2375 if (optype0 == MEMOP)
2376 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2378 if (optype1 == MEMOP)
2379 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2381 /* Ok, we can do one word at a time.
2382 Normally we do the low-numbered word first.
2384 In either case, set up in LATEHALF the operands to use
2385 for the high-numbered word and in some cases alter the
2386 operands in OPERANDS to be suitable for the low-numbered word. */
2388 if (optype0 == REGOP)
2389 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2390 else if (optype0 == OFFSOP)
2391 latehalf[0] = adjust_address (operands[0], SImode, 4);
2393 latehalf[0] = operands[0];
2395 if (optype1 == REGOP)
2396 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2397 else if (optype1 == OFFSOP)
2398 latehalf[1] = adjust_address (operands[1], SImode, 4);
2399 else if (optype1 == CNSTOP)
2400 split_double (operands[1], &operands[1], &latehalf[1]);
2402 latehalf[1] = operands[1];
2404 /* If the first move would clobber the source of the second one,
2405 do them in the other order.
2407 This can happen in two cases:
2409 mem -> register where the first half of the destination register
2410 is the same register used in the memory's address. Reload
2411 can create such insns.
2413 mem in this case will be either register indirect or register
2414 indirect plus a valid offset.
2416 register -> register move where REGNO(dst) == REGNO(src + 1)
2417 someone (Tim/Tege?) claimed this can happen for parameter loads.
2419 Handle mem -> register case first. */
2420 if (optype0 == REGOP
2421 && (optype1 == MEMOP || optype1 == OFFSOP)
2422 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2425 /* Do the late half first. */
2427 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2428 output_asm_insn (singlemove_string (latehalf), latehalf);
2432 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2433 return singlemove_string (operands);
2436 /* Now handle register -> register case. */
2437 if (optype0 == REGOP && optype1 == REGOP
2438 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2440 output_asm_insn (singlemove_string (latehalf), latehalf);
2441 return singlemove_string (operands);
2444 /* Normal case: do the two words, low-numbered first. */
2446 output_asm_insn (singlemove_string (operands), operands);
2448 /* Make any unoffsettable addresses point at high-numbered word. */
2450 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2452 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2455 output_asm_insn (singlemove_string (latehalf), latehalf);
2457 /* Undo the adds we just did. */
2459 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2461 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2467 output_fp_move_double (rtx *operands)
2469 if (FP_REG_P (operands[0]))
2471 if (FP_REG_P (operands[1])
2472 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2473 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2475 output_asm_insn ("fldd%F1 %1,%0", operands);
2477 else if (FP_REG_P (operands[1]))
2479 output_asm_insn ("fstd%F0 %1,%0", operands);
2485 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2487 /* This is a pain. You have to be prepared to deal with an
2488 arbitrary address here including pre/post increment/decrement.
2490 so avoid this in the MD. */
2491 gcc_assert (GET_CODE (operands[0]) == REG);
2493 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2494 xoperands[0] = operands[0];
2495 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2500 /* Return a REG that occurs in ADDR with coefficient 1.
2501 ADDR can be effectively incremented by incrementing REG. */
2504 find_addr_reg (rtx addr)
2506 while (GET_CODE (addr) == PLUS)
2508 if (GET_CODE (XEXP (addr, 0)) == REG)
2509 addr = XEXP (addr, 0);
2510 else if (GET_CODE (XEXP (addr, 1)) == REG)
2511 addr = XEXP (addr, 1);
2512 else if (CONSTANT_P (XEXP (addr, 0)))
2513 addr = XEXP (addr, 1);
2514 else if (CONSTANT_P (XEXP (addr, 1)))
2515 addr = XEXP (addr, 0);
2519 gcc_assert (GET_CODE (addr) == REG);
2523 /* Emit code to perform a block move.
2525 OPERANDS[0] is the destination pointer as a REG, clobbered.
2526 OPERANDS[1] is the source pointer as a REG, clobbered.
2527 OPERANDS[2] is a register for temporary storage.
2528 OPERANDS[3] is a register for temporary storage.
2529 OPERANDS[4] is the size as a CONST_INT
2530 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2531 OPERANDS[6] is another temporary register. */
2534 output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2536 int align = INTVAL (operands[5]);
2537 unsigned long n_bytes = INTVAL (operands[4]);
2539 /* We can't move more than a word at a time because the PA
2540 has no longer integer move insns. (Could use fp mem ops?) */
2541 if (align > (TARGET_64BIT ? 8 : 4))
2542 align = (TARGET_64BIT ? 8 : 4);
2544 /* Note that we know each loop below will execute at least twice
2545 (else we would have open-coded the copy). */
2549 /* Pre-adjust the loop counter. */
2550 operands[4] = GEN_INT (n_bytes - 16);
2551 output_asm_insn ("ldi %4,%2", operands);
2554 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2555 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2556 output_asm_insn ("std,ma %3,8(%0)", operands);
2557 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2558 output_asm_insn ("std,ma %6,8(%0)", operands);
2560 /* Handle the residual. There could be up to 7 bytes of
2561 residual to copy! */
2562 if (n_bytes % 16 != 0)
2564 operands[4] = GEN_INT (n_bytes % 8);
2565 if (n_bytes % 16 >= 8)
2566 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2567 if (n_bytes % 8 != 0)
2568 output_asm_insn ("ldd 0(%1),%6", operands);
2569 if (n_bytes % 16 >= 8)
2570 output_asm_insn ("std,ma %3,8(%0)", operands);
2571 if (n_bytes % 8 != 0)
2572 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2577 /* Pre-adjust the loop counter. */
2578 operands[4] = GEN_INT (n_bytes - 8);
2579 output_asm_insn ("ldi %4,%2", operands);
2582 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2583 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2584 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2585 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2586 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2588 /* Handle the residual. There could be up to 7 bytes of
2589 residual to copy! */
2590 if (n_bytes % 8 != 0)
2592 operands[4] = GEN_INT (n_bytes % 4);
2593 if (n_bytes % 8 >= 4)
2594 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2595 if (n_bytes % 4 != 0)
2596 output_asm_insn ("ldw 0(%1),%6", operands);
2597 if (n_bytes % 8 >= 4)
2598 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2599 if (n_bytes % 4 != 0)
2600 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2605 /* Pre-adjust the loop counter. */
2606 operands[4] = GEN_INT (n_bytes - 4);
2607 output_asm_insn ("ldi %4,%2", operands);
2610 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2611 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2612 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2613 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2614 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2616 /* Handle the residual. */
2617 if (n_bytes % 4 != 0)
2619 if (n_bytes % 4 >= 2)
2620 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2621 if (n_bytes % 2 != 0)
2622 output_asm_insn ("ldb 0(%1),%6", operands);
2623 if (n_bytes % 4 >= 2)
2624 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2625 if (n_bytes % 2 != 0)
2626 output_asm_insn ("stb %6,0(%0)", operands);
2631 /* Pre-adjust the loop counter. */
2632 operands[4] = GEN_INT (n_bytes - 2);
2633 output_asm_insn ("ldi %4,%2", operands);
2636 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2637 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2638 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2639 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2640 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2642 /* Handle the residual. */
2643 if (n_bytes % 2 != 0)
2645 output_asm_insn ("ldb 0(%1),%3", operands);
2646 output_asm_insn ("stb %3,0(%0)", operands);
2655 /* Count the number of insns necessary to handle this block move.
2657 Basic structure is the same as emit_block_move, except that we
2658 count insns rather than emit them. */
2661 compute_movmem_length (rtx insn)
2663 rtx pat = PATTERN (insn);
2664 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2665 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2666 unsigned int n_insns = 0;
2668 /* We can't move more than four bytes at a time because the PA
2669 has no longer integer move insns. (Could use fp mem ops?) */
2670 if (align > (TARGET_64BIT ? 8 : 4))
2671 align = (TARGET_64BIT ? 8 : 4);
2673 /* The basic copying loop. */
2677 if (n_bytes % (2 * align) != 0)
2679 if ((n_bytes % (2 * align)) >= align)
2682 if ((n_bytes % align) != 0)
2686 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2690 /* Emit code to perform a block clear.
2692 OPERANDS[0] is the destination pointer as a REG, clobbered.
2693 OPERANDS[1] is a register for temporary storage.
2694 OPERANDS[2] is the size as a CONST_INT
2695 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2698 output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2700 int align = INTVAL (operands[3]);
2701 unsigned long n_bytes = INTVAL (operands[2]);
2703 /* We can't clear more than a word at a time because the PA
2704 has no longer integer move insns. */
2705 if (align > (TARGET_64BIT ? 8 : 4))
2706 align = (TARGET_64BIT ? 8 : 4);
2708 /* Note that we know each loop below will execute at least twice
2709 (else we would have open-coded the copy). */
2713 /* Pre-adjust the loop counter. */
2714 operands[2] = GEN_INT (n_bytes - 16);
2715 output_asm_insn ("ldi %2,%1", operands);
2718 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2719 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2720 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2722 /* Handle the residual. There could be up to 7 bytes of
2723 residual to copy! */
2724 if (n_bytes % 16 != 0)
2726 operands[2] = GEN_INT (n_bytes % 8);
2727 if (n_bytes % 16 >= 8)
2728 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2729 if (n_bytes % 8 != 0)
2730 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2735 /* Pre-adjust the loop counter. */
2736 operands[2] = GEN_INT (n_bytes - 8);
2737 output_asm_insn ("ldi %2,%1", operands);
2740 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2741 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2742 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2744 /* Handle the residual. There could be up to 7 bytes of
2745 residual to copy! */
2746 if (n_bytes % 8 != 0)
2748 operands[2] = GEN_INT (n_bytes % 4);
2749 if (n_bytes % 8 >= 4)
2750 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2751 if (n_bytes % 4 != 0)
2752 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2757 /* Pre-adjust the loop counter. */
2758 operands[2] = GEN_INT (n_bytes - 4);
2759 output_asm_insn ("ldi %2,%1", operands);
2762 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2763 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2764 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2766 /* Handle the residual. */
2767 if (n_bytes % 4 != 0)
2769 if (n_bytes % 4 >= 2)
2770 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2771 if (n_bytes % 2 != 0)
2772 output_asm_insn ("stb %%r0,0(%0)", operands);
2777 /* Pre-adjust the loop counter. */
2778 operands[2] = GEN_INT (n_bytes - 2);
2779 output_asm_insn ("ldi %2,%1", operands);
2782 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2783 output_asm_insn ("addib,>= -2,%1,.-4", operands);
2784 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2786 /* Handle the residual. */
2787 if (n_bytes % 2 != 0)
2788 output_asm_insn ("stb %%r0,0(%0)", operands);
2797 /* Count the number of insns necessary to handle this block move.
2799 Basic structure is the same as emit_block_move, except that we
2800 count insns rather than emit them. */
2803 compute_clrmem_length (rtx insn)
2805 rtx pat = PATTERN (insn);
2806 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
2807 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
2808 unsigned int n_insns = 0;
2810 /* We can't clear more than a word at a time because the PA
2811 has no longer integer move insns. */
2812 if (align > (TARGET_64BIT ? 8 : 4))
2813 align = (TARGET_64BIT ? 8 : 4);
2815 /* The basic loop. */
2819 if (n_bytes % (2 * align) != 0)
2821 if ((n_bytes % (2 * align)) >= align)
2824 if ((n_bytes % align) != 0)
2828 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2834 output_and (rtx *operands)
2836 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2838 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2839 int ls0, ls1, ms0, p, len;
2841 for (ls0 = 0; ls0 < 32; ls0++)
2842 if ((mask & (1 << ls0)) == 0)
2845 for (ls1 = ls0; ls1 < 32; ls1++)
2846 if ((mask & (1 << ls1)) != 0)
2849 for (ms0 = ls1; ms0 < 32; ms0++)
2850 if ((mask & (1 << ms0)) == 0)
2853 gcc_assert (ms0 == 32);
2861 operands[2] = GEN_INT (len);
2862 return "{extru|extrw,u} %1,31,%2,%0";
2866 /* We could use this `depi' for the case above as well, but `depi'
2867 requires one more register file access than an `extru'. */
2872 operands[2] = GEN_INT (p);
2873 operands[3] = GEN_INT (len);
2874 return "{depi|depwi} 0,%2,%3,%0";
2878 return "and %1,%2,%0";
2881 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2882 storing the result in operands[0]. */
2884 output_64bit_and (rtx *operands)
2886 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2888 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2889 int ls0, ls1, ms0, p, len;
2891 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
2892 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
2895 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
2896 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
2899 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
2900 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
2903 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
2905 if (ls1 == HOST_BITS_PER_WIDE_INT)
2911 operands[2] = GEN_INT (len);
2912 return "extrd,u %1,63,%2,%0";
2916 /* We could use this `depi' for the case above as well, but `depi'
2917 requires one more register file access than an `extru'. */
2922 operands[2] = GEN_INT (p);
2923 operands[3] = GEN_INT (len);
2924 return "depdi 0,%2,%3,%0";
2928 return "and %1,%2,%0";
2932 output_ior (rtx *operands)
2934 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2935 int bs0, bs1, p, len;
2937 if (INTVAL (operands[2]) == 0)
2938 return "copy %1,%0";
2940 for (bs0 = 0; bs0 < 32; bs0++)
2941 if ((mask & (1 << bs0)) != 0)
2944 for (bs1 = bs0; bs1 < 32; bs1++)
2945 if ((mask & (1 << bs1)) == 0)
2948 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
2953 operands[2] = GEN_INT (p);
2954 operands[3] = GEN_INT (len);
2955 return "{depi|depwi} -1,%2,%3,%0";
2958 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2959 storing the result in operands[0]. */
2961 output_64bit_ior (rtx *operands)
2963 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2964 int bs0, bs1, p, len;
2966 if (INTVAL (operands[2]) == 0)
2967 return "copy %1,%0";
2969 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
2970 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
2973 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
2974 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
2977 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
2978 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
2983 operands[2] = GEN_INT (p);
2984 operands[3] = GEN_INT (len);
2985 return "depdi -1,%2,%3,%0";
2988 /* Target hook for assembling integer objects. This code handles
2989 aligned SI and DI integers specially since function references
2990 must be preceded by P%. */
2993 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
2995 if (size == UNITS_PER_WORD
2997 && function_label_operand (x, VOIDmode))
2999 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
3000 output_addr_const (asm_out_file, x);
3001 fputc ('\n', asm_out_file);
3004 return default_assemble_integer (x, size, aligned_p);
3007 /* Output an ascii string. */
3009 output_ascii (FILE *file, const char *p, int size)
3013 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3015 /* The HP assembler can only take strings of 256 characters at one
3016 time. This is a limitation on input line length, *not* the
3017 length of the string. Sigh. Even worse, it seems that the
3018 restriction is in number of input characters (see \xnn &
3019 \whatever). So we have to do this very carefully. */
3021 fputs ("\t.STRING \"", file);
3024 for (i = 0; i < size; i += 4)
3028 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3030 register unsigned int c = (unsigned char) p[i + io];
3032 if (c == '\"' || c == '\\')
3033 partial_output[co++] = '\\';
3034 if (c >= ' ' && c < 0177)
3035 partial_output[co++] = c;
3039 partial_output[co++] = '\\';
3040 partial_output[co++] = 'x';
3041 hexd = c / 16 - 0 + '0';
3043 hexd -= '9' - 'a' + 1;
3044 partial_output[co++] = hexd;
3045 hexd = c % 16 - 0 + '0';
3047 hexd -= '9' - 'a' + 1;
3048 partial_output[co++] = hexd;
3051 if (chars_output + co > 243)
3053 fputs ("\"\n\t.STRING \"", file);
3056 fwrite (partial_output, 1, (size_t) co, file);
3060 fputs ("\"\n", file);
3063 /* Try to rewrite floating point comparisons & branches to avoid
3064 useless add,tr insns.
3066 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3067 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3068 first attempt to remove useless add,tr insns. It is zero
3069 for the second pass as reorg sometimes leaves bogus REG_DEAD
3072 When CHECK_NOTES is zero we can only eliminate add,tr insns
3073 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3076 remove_useless_addtr_insns (int check_notes)
3079 static int pass = 0;
3081 /* This is fairly cheap, so always run it when optimizing. */
3085 int fbranch_count = 0;
3087 /* Walk all the insns in this function looking for fcmp & fbranch
3088 instructions. Keep track of how many of each we find. */
3089 for (insn = get_insns (); insn; insn = next_insn (insn))
3093 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3094 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
3097 tmp = PATTERN (insn);
3099 /* It must be a set. */
3100 if (GET_CODE (tmp) != SET)
3103 /* If the destination is CCFP, then we've found an fcmp insn. */
3104 tmp = SET_DEST (tmp);
3105 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3111 tmp = PATTERN (insn);
3112 /* If this is an fbranch instruction, bump the fbranch counter. */
3113 if (GET_CODE (tmp) == SET
3114 && SET_DEST (tmp) == pc_rtx
3115 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3116 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3117 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3118 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3126 /* Find all floating point compare + branch insns. If possible,
3127 reverse the comparison & the branch to avoid add,tr insns. */
3128 for (insn = get_insns (); insn; insn = next_insn (insn))
3132 /* Ignore anything that isn't an INSN. */
3133 if (GET_CODE (insn) != INSN)
3136 tmp = PATTERN (insn);
3138 /* It must be a set. */
3139 if (GET_CODE (tmp) != SET)
3142 /* The destination must be CCFP, which is register zero. */
3143 tmp = SET_DEST (tmp);
3144 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3147 /* INSN should be a set of CCFP.
3149 See if the result of this insn is used in a reversed FP
3150 conditional branch. If so, reverse our condition and
3151 the branch. Doing so avoids useless add,tr insns. */
3152 next = next_insn (insn);
3155 /* Jumps, calls and labels stop our search. */
3156 if (GET_CODE (next) == JUMP_INSN
3157 || GET_CODE (next) == CALL_INSN
3158 || GET_CODE (next) == CODE_LABEL)
3161 /* As does another fcmp insn. */
3162 if (GET_CODE (next) == INSN
3163 && GET_CODE (PATTERN (next)) == SET
3164 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3165 && REGNO (SET_DEST (PATTERN (next))) == 0)
3168 next = next_insn (next);
3171 /* Is NEXT_INSN a branch? */
3173 && GET_CODE (next) == JUMP_INSN)
3175 rtx pattern = PATTERN (next);
3177 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3178 and CCFP dies, then reverse our conditional and the branch
3179 to avoid the add,tr. */
3180 if (GET_CODE (pattern) == SET
3181 && SET_DEST (pattern) == pc_rtx
3182 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3183 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3184 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3185 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3186 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3187 && (fcmp_count == fbranch_count
3189 && find_regno_note (next, REG_DEAD, 0))))
3191 /* Reverse the branch. */
3192 tmp = XEXP (SET_SRC (pattern), 1);
3193 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3194 XEXP (SET_SRC (pattern), 2) = tmp;
3195 INSN_CODE (next) = -1;
3197 /* Reverse our condition. */
3198 tmp = PATTERN (insn);
3199 PUT_CODE (XEXP (tmp, 1),
3200 (reverse_condition_maybe_unordered
3201 (GET_CODE (XEXP (tmp, 1)))));
3211 /* You may have trouble believing this, but this is the 32 bit HP-PA
3216 Variable arguments (optional; any number may be allocated)
3218 SP-(4*(N+9)) arg word N
3223 Fixed arguments (must be allocated; may remain unused)
3232 SP-32 External Data Pointer (DP)
3234 SP-24 External/stub RP (RP')
3238 SP-8 Calling Stub RP (RP'')
3243 SP-0 Stack Pointer (points to next available address)
3247 /* This function saves registers as follows. Registers marked with ' are
3248 this function's registers (as opposed to the previous function's).
3249 If a frame_pointer isn't needed, r4 is saved as a general register;
3250 the space for the frame pointer is still allocated, though, to keep
3256 SP (FP') Previous FP
3257 SP + 4 Alignment filler (sigh)
3258 SP + 8 Space for locals reserved here.
3262 SP + n All call saved register used.
3266 SP + o All call saved fp registers used.
3270 SP + p (SP') points to next available address.
3274 /* Global variables set by output_function_prologue(). */
3275 /* Size of frame. Need to know this to emit return insns from
3277 static HOST_WIDE_INT actual_fsize, local_fsize;
3278 static int save_fregs;
3280 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3281 Handle case where DISP > 8k by using the add_high_const patterns.
3283 Note in DISP > 8k case, we will leave the high part of the address
3284 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3287 store_reg (int reg, HOST_WIDE_INT disp, int base)
3289 rtx insn, dest, src, basereg;
3291 src = gen_rtx_REG (word_mode, reg);
3292 basereg = gen_rtx_REG (Pmode, base);
3293 if (VAL_14_BITS_P (disp))
3295 dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3296 insn = emit_move_insn (dest, src);
3298 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3300 rtx delta = GEN_INT (disp);
3301 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3303 emit_move_insn (tmpreg, delta);
3304 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3308 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3309 gen_rtx_SET (VOIDmode, tmpreg,
3310 gen_rtx_PLUS (Pmode, basereg, delta)),
3312 RTX_FRAME_RELATED_P (insn) = 1;
3314 dest = gen_rtx_MEM (word_mode, tmpreg);
3315 insn = emit_move_insn (dest, src);
3319 rtx delta = GEN_INT (disp);
3320 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3321 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3323 emit_move_insn (tmpreg, high);
3324 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3325 insn = emit_move_insn (dest, src);
3329 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3330 gen_rtx_SET (VOIDmode,
3331 gen_rtx_MEM (word_mode,
3332 gen_rtx_PLUS (word_mode, basereg,
3340 RTX_FRAME_RELATED_P (insn) = 1;
3343 /* Emit RTL to store REG at the memory location specified by BASE and then
3344 add MOD to BASE. MOD must be <= 8k. */
3347 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3349 rtx insn, basereg, srcreg, delta;
3351 gcc_assert (VAL_14_BITS_P (mod));
3353 basereg = gen_rtx_REG (Pmode, base);
3354 srcreg = gen_rtx_REG (word_mode, reg);
3355 delta = GEN_INT (mod);
3357 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3360 RTX_FRAME_RELATED_P (insn) = 1;
3362 /* RTX_FRAME_RELATED_P must be set on each frame related set
3363 in a parallel with more than one element. */
3364 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3365 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3369 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3370 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3371 whether to add a frame note or not.
3373 In the DISP > 8k case, we leave the high part of the address in %r1.
3374 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3377 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3381 if (VAL_14_BITS_P (disp))
3383 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3384 plus_constant (gen_rtx_REG (Pmode, base), disp));
3386 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3388 rtx basereg = gen_rtx_REG (Pmode, base);
3389 rtx delta = GEN_INT (disp);
3390 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3392 emit_move_insn (tmpreg, delta);
3393 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3394 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3397 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3398 gen_rtx_SET (VOIDmode, tmpreg,
3399 gen_rtx_PLUS (Pmode, basereg, delta)),
3404 rtx basereg = gen_rtx_REG (Pmode, base);
3405 rtx delta = GEN_INT (disp);
3406 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3408 emit_move_insn (tmpreg,
3409 gen_rtx_PLUS (Pmode, basereg,
3410 gen_rtx_HIGH (Pmode, delta)));
3411 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3412 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3415 if (DO_FRAME_NOTES && note)
3416 RTX_FRAME_RELATED_P (insn) = 1;
3420 compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3425 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3426 be consistent with the rounding and size calculation done here.
3427 Change them at the same time. */
3429 /* We do our own stack alignment. First, round the size of the
3430 stack locals up to a word boundary. */
3431 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3433 /* Space for previous frame pointer + filler. If any frame is
3434 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3435 waste some space here for the sake of HP compatibility. The
3436 first slot is only used when the frame pointer is needed. */
3437 if (size || frame_pointer_needed)
3438 size += STARTING_FRAME_OFFSET;
3440 /* If the current function calls __builtin_eh_return, then we need
3441 to allocate stack space for registers that will hold data for
3442 the exception handler. */
3443 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3447 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3449 size += i * UNITS_PER_WORD;
3452 /* Account for space used by the callee general register saves. */
3453 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3454 if (regs_ever_live[i])
3455 size += UNITS_PER_WORD;
3457 /* Account for space used by the callee floating point register saves. */
3458 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3459 if (regs_ever_live[i]
3460 || (!TARGET_64BIT && regs_ever_live[i + 1]))
3464 /* We always save both halves of the FP register, so always
3465 increment the frame size by 8 bytes. */
3469 /* If any of the floating registers are saved, account for the
3470 alignment needed for the floating point register save block. */
3473 size = (size + 7) & ~7;
3478 /* The various ABIs include space for the outgoing parameters in the
3479 size of the current function's stack frame. We don't need to align
3480 for the outgoing arguments as their alignment is set by the final
3481 rounding for the frame as a whole. */
3482 size += current_function_outgoing_args_size;
3484 /* Allocate space for the fixed frame marker. This space must be
3485 allocated for any function that makes calls or allocates
3487 if (!current_function_is_leaf || size)
3488 size += TARGET_64BIT ? 48 : 32;
3490 /* Finally, round to the preferred stack boundary. */
3491 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3492 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3495 /* Generate the assembly code for function entry. FILE is a stdio
3496 stream to output the code to. SIZE is an int: how many units of
3497 temporary storage to allocate.
3499 Refer to the array `regs_ever_live' to determine which registers to
3500 save; `regs_ever_live[I]' is nonzero if register number I is ever
3501 used in the function. This function is responsible for knowing
3502 which registers should not be saved even if used. */
3504 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3505 of memory. If any fpu reg is used in the function, we allocate
3506 such a block here, at the bottom of the frame, just in case it's needed.
3508 If this function is a leaf procedure, then we may choose not
3509 to do a "save" insn. The decision about whether or not
3510 to do this is made in regclass.c. */
3513 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3515 /* The function's label and associated .PROC must never be
3516 separated and must be output *after* any profiling declarations
3517 to avoid changing spaces/subspaces within a procedure. */
3518 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3519 fputs ("\t.PROC\n", file);
3521 /* hppa_expand_prologue does the dirty work now. We just need
3522 to output the assembler directives which denote the start
3524 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3525 if (regs_ever_live[2])
3526 fputs (",CALLS,SAVE_RP", file);
3528 fputs (",NO_CALLS", file);
3530 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3531 at the beginning of the frame and that it is used as the frame
3532 pointer for the frame. We do this because our current frame
3533 layout doesn't conform to that specified in the HP runtime
3534 documentation and we need a way to indicate to programs such as
3535 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3536 isn't used by HP compilers but is supported by the assembler.
3537 However, SAVE_SP is supposed to indicate that the previous stack
3538 pointer has been saved in the frame marker. */
3539 if (frame_pointer_needed)
3540 fputs (",SAVE_SP", file);
3542 /* Pass on information about the number of callee register saves
3543 performed in the prologue.
3545 The compiler is supposed to pass the highest register number
3546 saved, the assembler then has to adjust that number before
3547 entering it into the unwind descriptor (to account for any
3548 caller saved registers with lower register numbers than the
3549 first callee saved register). */
3551 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3554 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3556 fputs ("\n\t.ENTRY\n", file);
3558 remove_useless_addtr_insns (0);
3562 hppa_expand_prologue (void)
3564 int merge_sp_adjust_with_store = 0;
3565 HOST_WIDE_INT size = get_frame_size ();
3566 HOST_WIDE_INT offset;
3574 /* Compute total size for frame pointer, filler, locals and rounding to
3575 the next word boundary. Similar code appears in compute_frame_size
3576 and must be changed in tandem with this code. */
3577 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3578 if (local_fsize || frame_pointer_needed)
3579 local_fsize += STARTING_FRAME_OFFSET;
3581 actual_fsize = compute_frame_size (size, &save_fregs);
3583 /* Compute a few things we will use often. */
3584 tmpreg = gen_rtx_REG (word_mode, 1);
3586 /* Save RP first. The calling conventions manual states RP will
3587 always be stored into the caller's frame at sp - 20 or sp - 16
3588 depending on which ABI is in use. */
3589 if (regs_ever_live[2] || current_function_calls_eh_return)
3590 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3592 /* Allocate the local frame and set up the frame pointer if needed. */
3593 if (actual_fsize != 0)
3595 if (frame_pointer_needed)
3597 /* Copy the old frame pointer temporarily into %r1. Set up the
3598 new stack pointer, then store away the saved old frame pointer
3599 into the stack at sp and at the same time update the stack
3600 pointer by actual_fsize bytes. Two versions, first
3601 handles small (<8k) frames. The second handles large (>=8k)
3603 insn = emit_move_insn (tmpreg, frame_pointer_rtx);
3605 RTX_FRAME_RELATED_P (insn) = 1;
3607 insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
3609 RTX_FRAME_RELATED_P (insn) = 1;
3611 if (VAL_14_BITS_P (actual_fsize))
3612 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3615 /* It is incorrect to store the saved frame pointer at *sp,
3616 then increment sp (writes beyond the current stack boundary).
3618 So instead use stwm to store at *sp and post-increment the
3619 stack pointer as an atomic operation. Then increment sp to
3620 finish allocating the new frame. */
3621 HOST_WIDE_INT adjust1 = 8192 - 64;
3622 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3624 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3625 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3629 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3630 we need to store the previous stack pointer (frame pointer)
3631 into the frame marker on targets that use the HP unwind
3632 library. This allows the HP unwind library to be used to
3633 unwind GCC frames. However, we are not fully compatible
3634 with the HP library because our frame layout differs from
3635 that specified in the HP runtime specification.
3637 We don't want a frame note on this instruction as the frame
3638 marker moves during dynamic stack allocation.
3640 This instruction also serves as a blockage to prevent
3641 register spills from being scheduled before the stack
3642 pointer is raised. This is necessary as we store
3643 registers using the frame pointer as a base register,
3644 and the frame pointer is set before sp is raised. */
3645 if (TARGET_HPUX_UNWIND_LIBRARY)
3647 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3648 GEN_INT (TARGET_64BIT ? -8 : -4));
3650 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3654 emit_insn (gen_blockage ());
3656 /* no frame pointer needed. */
3659 /* In some cases we can perform the first callee register save
3660 and allocating the stack frame at the same time. If so, just
3661 make a note of it and defer allocating the frame until saving
3662 the callee registers. */
3663 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3664 merge_sp_adjust_with_store = 1;
3665 /* Can not optimize. Adjust the stack frame by actual_fsize
3668 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3673 /* Normal register save.
3675 Do not save the frame pointer in the frame_pointer_needed case. It
3676 was done earlier. */
3677 if (frame_pointer_needed)
3679 offset = local_fsize;
3681 /* Saving the EH return data registers in the frame is the simplest
3682 way to get the frame unwind information emitted. We put them
3683 just before the general registers. */
3684 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3686 unsigned int i, regno;
3690 regno = EH_RETURN_DATA_REGNO (i);
3691 if (regno == INVALID_REGNUM)
3694 store_reg (regno, offset, FRAME_POINTER_REGNUM);
3695 offset += UNITS_PER_WORD;
3699 for (i = 18; i >= 4; i--)
3700 if (regs_ever_live[i] && ! call_used_regs[i])
3702 store_reg (i, offset, FRAME_POINTER_REGNUM);
3703 offset += UNITS_PER_WORD;
3706 /* Account for %r3 which is saved in a special place. */
3709 /* No frame pointer needed. */
3712 offset = local_fsize - actual_fsize;
3714 /* Saving the EH return data registers in the frame is the simplest
3715 way to get the frame unwind information emitted. */
3716 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3718 unsigned int i, regno;
3722 regno = EH_RETURN_DATA_REGNO (i);
3723 if (regno == INVALID_REGNUM)
3726 /* If merge_sp_adjust_with_store is nonzero, then we can
3727 optimize the first save. */
3728 if (merge_sp_adjust_with_store)
3730 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3731 merge_sp_adjust_with_store = 0;
3734 store_reg (regno, offset, STACK_POINTER_REGNUM);
3735 offset += UNITS_PER_WORD;
3739 for (i = 18; i >= 3; i--)
3740 if (regs_ever_live[i] && ! call_used_regs[i])
3742 /* If merge_sp_adjust_with_store is nonzero, then we can
3743 optimize the first GR save. */
3744 if (merge_sp_adjust_with_store)
3746 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3747 merge_sp_adjust_with_store = 0;
3750 store_reg (i, offset, STACK_POINTER_REGNUM);
3751 offset += UNITS_PER_WORD;
3755 /* If we wanted to merge the SP adjustment with a GR save, but we never
3756 did any GR saves, then just emit the adjustment here. */
3757 if (merge_sp_adjust_with_store)
3758 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3762 /* The hppa calling conventions say that %r19, the pic offset
3763 register, is saved at sp - 32 (in this function's frame)
3764 when generating PIC code. FIXME: What is the correct thing
3765 to do for functions which make no calls and allocate no
3766 frame? Do we need to allocate a frame, or can we just omit
3767 the save? For now we'll just omit the save.
3769 We don't want a note on this insn as the frame marker can
3770 move if there is a dynamic stack allocation. */
3771 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
3773 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
3775 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
3779 /* Align pointer properly (doubleword boundary). */
3780 offset = (offset + 7) & ~7;
3782 /* Floating point register store. */
3787 /* First get the frame or stack pointer to the start of the FP register
3789 if (frame_pointer_needed)
3791 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
3792 base = frame_pointer_rtx;
3796 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
3797 base = stack_pointer_rtx;
3800 /* Now actually save the FP registers. */
3801 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3803 if (regs_ever_live[i]
3804 || (! TARGET_64BIT && regs_ever_live[i + 1]))
3806 rtx addr, insn, reg;
3807 addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
3808 reg = gen_rtx_REG (DFmode, i);
3809 insn = emit_move_insn (addr, reg);
3812 RTX_FRAME_RELATED_P (insn) = 1;
3815 rtx mem = gen_rtx_MEM (DFmode,
3816 plus_constant (base, offset));
3818 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3819 gen_rtx_SET (VOIDmode, mem, reg),
3824 rtx meml = gen_rtx_MEM (SFmode,
3825 plus_constant (base, offset));
3826 rtx memr = gen_rtx_MEM (SFmode,
3827 plus_constant (base, offset + 4));
3828 rtx regl = gen_rtx_REG (SFmode, i);
3829 rtx regr = gen_rtx_REG (SFmode, i + 1);
3830 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
3831 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
3834 RTX_FRAME_RELATED_P (setl) = 1;
3835 RTX_FRAME_RELATED_P (setr) = 1;
3836 vec = gen_rtvec (2, setl, setr);
3838 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3839 gen_rtx_SEQUENCE (VOIDmode, vec),
3843 offset += GET_MODE_SIZE (DFmode);
3850 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
3851 Handle case where DISP > 8k by using the add_high_const patterns. */
3854 load_reg (int reg, HOST_WIDE_INT disp, int base)
3856 rtx dest = gen_rtx_REG (word_mode, reg);
3857 rtx basereg = gen_rtx_REG (Pmode, base);
3860 if (VAL_14_BITS_P (disp))
3861 src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3862 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3864 rtx delta = GEN_INT (disp);
3865 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3867 emit_move_insn (tmpreg, delta);
3868 if (TARGET_DISABLE_INDEXING)
3870 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3871 src = gen_rtx_MEM (word_mode, tmpreg);
3874 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3878 rtx delta = GEN_INT (disp);
3879 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3880 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3882 emit_move_insn (tmpreg, high);
3883 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3886 emit_move_insn (dest, src);
3889 /* Update the total code bytes output to the text section. */
3892 update_total_code_bytes (int nbytes)
3894 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
3895 && !IN_NAMED_SECTION_P (cfun->decl))
3897 if (INSN_ADDRESSES_SET_P ())
3899 unsigned long old_total = total_code_bytes;
3901 total_code_bytes += nbytes;
3903 /* Be prepared to handle overflows. */
3904 if (old_total > total_code_bytes)
3905 total_code_bytes = -1;
3908 total_code_bytes = -1;
3912 /* This function generates the assembly code for function exit.
3913 Args are as for output_function_prologue ().
3915 The function epilogue should not depend on the current stack
3916 pointer! It should use the frame pointer only. This is mandatory
3917 because of alloca; we also take advantage of it to omit stack
3918 adjustments before returning. */
3921 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3923 rtx insn = get_last_insn ();
3927 /* hppa_expand_epilogue does the dirty work now. We just need
3928 to output the assembler directives which denote the end
3931 To make debuggers happy, emit a nop if the epilogue was completely
3932 eliminated due to a volatile call as the last insn in the
3933 current function. That way the return address (in %r2) will
3934 always point to a valid instruction in the current function. */
3936 /* Get the last real insn. */
3937 if (GET_CODE (insn) == NOTE)
3938 insn = prev_real_insn (insn);
3940 /* If it is a sequence, then look inside. */
3941 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
3942 insn = XVECEXP (PATTERN (insn), 0, 0);
3944 /* If insn is a CALL_INSN, then it must be a call to a volatile
3945 function (otherwise there would be epilogue insns). */
3946 if (insn && GET_CODE (insn) == CALL_INSN)
3948 fputs ("\tnop\n", file);
3952 fputs ("\t.EXIT\n\t.PROCEND\n", file);
3954 if (TARGET_SOM && TARGET_GAS)
3956 /* We done with this subspace except possibly for some additional
3957 debug information. Forget that we are in this subspace to ensure
3958 that the next function is output in its own subspace. */
3960 cfun->machine->in_nsubspa = 2;
3963 if (INSN_ADDRESSES_SET_P ())
3965 insn = get_last_nonnote_insn ();
3966 last_address += INSN_ADDRESSES (INSN_UID (insn));
3968 last_address += insn_default_length (insn);
3969 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
3970 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
3973 /* Finally, update the total number of code bytes output so far. */
3974 update_total_code_bytes (last_address);
3978 hppa_expand_epilogue (void)
3981 HOST_WIDE_INT offset;
3982 HOST_WIDE_INT ret_off = 0;
3984 int merge_sp_adjust_with_load = 0;
3986 /* We will use this often. */
3987 tmpreg = gen_rtx_REG (word_mode, 1);
3989 /* Try to restore RP early to avoid load/use interlocks when
3990 RP gets used in the return (bv) instruction. This appears to still
3991 be necessary even when we schedule the prologue and epilogue. */
3992 if (regs_ever_live [2] || current_function_calls_eh_return)
3994 ret_off = TARGET_64BIT ? -16 : -20;
3995 if (frame_pointer_needed)
3997 load_reg (2, ret_off, FRAME_POINTER_REGNUM);
4002 /* No frame pointer, and stack is smaller than 8k. */
4003 if (VAL_14_BITS_P (ret_off - actual_fsize))
4005 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4011 /* General register restores. */
4012 if (frame_pointer_needed)
4014 offset = local_fsize;
4016 /* If the current function calls __builtin_eh_return, then we need
4017 to restore the saved EH data registers. */
4018 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4020 unsigned int i, regno;
4024 regno = EH_RETURN_DATA_REGNO (i);
4025 if (regno == INVALID_REGNUM)
4028 load_reg (regno, offset, FRAME_POINTER_REGNUM);
4029 offset += UNITS_PER_WORD;
4033 for (i = 18; i >= 4; i--)
4034 if (regs_ever_live[i] && ! call_used_regs[i])
4036 load_reg (i, offset, FRAME_POINTER_REGNUM);
4037 offset += UNITS_PER_WORD;
4042 offset = local_fsize - actual_fsize;
4044 /* If the current function calls __builtin_eh_return, then we need
4045 to restore the saved EH data registers. */
4046 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4048 unsigned int i, regno;
4052 regno = EH_RETURN_DATA_REGNO (i);
4053 if (regno == INVALID_REGNUM)
4056 /* Only for the first load.
4057 merge_sp_adjust_with_load holds the register load
4058 with which we will merge the sp adjustment. */
4059 if (merge_sp_adjust_with_load == 0
4061 && VAL_14_BITS_P (-actual_fsize))
4062 merge_sp_adjust_with_load = regno;
4064 load_reg (regno, offset, STACK_POINTER_REGNUM);
4065 offset += UNITS_PER_WORD;
4069 for (i = 18; i >= 3; i--)
4071 if (regs_ever_live[i] && ! call_used_regs[i])
4073 /* Only for the first load.
4074 merge_sp_adjust_with_load holds the register load
4075 with which we will merge the sp adjustment. */
4076 if (merge_sp_adjust_with_load == 0
4078 && VAL_14_BITS_P (-actual_fsize))
4079 merge_sp_adjust_with_load = i;
4081 load_reg (i, offset, STACK_POINTER_REGNUM);
4082 offset += UNITS_PER_WORD;
4087 /* Align pointer properly (doubleword boundary). */
4088 offset = (offset + 7) & ~7;
4090 /* FP register restores. */
4093 /* Adjust the register to index off of. */
4094 if (frame_pointer_needed)
4095 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
4097 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4099 /* Actually do the restores now. */
4100 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4101 if (regs_ever_live[i]
4102 || (! TARGET_64BIT && regs_ever_live[i + 1]))
4104 rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4105 rtx dest = gen_rtx_REG (DFmode, i);
4106 emit_move_insn (dest, src);
4110 /* Emit a blockage insn here to keep these insns from being moved to
4111 an earlier spot in the epilogue, or into the main instruction stream.
4113 This is necessary as we must not cut the stack back before all the
4114 restores are finished. */
4115 emit_insn (gen_blockage ());
4117 /* Reset stack pointer (and possibly frame pointer). The stack
4118 pointer is initially set to fp + 64 to avoid a race condition. */
4119 if (frame_pointer_needed)
4121 rtx delta = GEN_INT (-64);
4123 set_reg_plus_d (STACK_POINTER_REGNUM, FRAME_POINTER_REGNUM, 64, 0);
4124 emit_insn (gen_pre_load (frame_pointer_rtx, stack_pointer_rtx, delta));
4126 /* If we were deferring a callee register restore, do it now. */
4127 else if (merge_sp_adjust_with_load)
4129 rtx delta = GEN_INT (-actual_fsize);
4130 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4132 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4134 else if (actual_fsize != 0)
4135 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4138 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4139 frame greater than 8k), do so now. */
4141 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4143 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4145 rtx sa = EH_RETURN_STACKADJ_RTX;
4147 emit_insn (gen_blockage ());
4148 emit_insn (TARGET_64BIT
4149 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4150 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4155 hppa_pic_save_rtx (void)
4157 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4160 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4161 #define NO_DEFERRED_PROFILE_COUNTERS 0
4165 /* Vector of funcdef numbers. */
4166 static VEC(int,heap) *funcdef_nos;
4168 /* Output deferred profile counters. */
4170 output_deferred_profile_counters (void)
4175 if (VEC_empty (int, funcdef_nos))
4178 switch_to_section (data_section);
4179 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4180 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4182 for (i = 0; VEC_iterate (int, funcdef_nos, i, n); i++)
4184 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4185 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4188 VEC_free (int, heap, funcdef_nos);
4192 hppa_profile_hook (int label_no)
4194 /* We use SImode for the address of the function in both 32 and
4195 64-bit code to avoid having to provide DImode versions of the
4196 lcla2 and load_offset_label_address insn patterns. */
4197 rtx reg = gen_reg_rtx (SImode);
4198 rtx label_rtx = gen_label_rtx ();
4199 rtx begin_label_rtx, call_insn;
4200 char begin_label_name[16];
4202 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4204 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4207 emit_move_insn (arg_pointer_rtx,
4208 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4211 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4213 /* The address of the function is loaded into %r25 with a instruction-
4214 relative sequence that avoids the use of relocations. The sequence
4215 is split so that the load_offset_label_address instruction can
4216 occupy the delay slot of the call to _mcount. */
4218 emit_insn (gen_lcla2 (reg, label_rtx));
4220 emit_insn (gen_lcla1 (reg, label_rtx));
4222 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4223 reg, begin_label_rtx, label_rtx));
4225 #if !NO_DEFERRED_PROFILE_COUNTERS
4227 rtx count_label_rtx, addr, r24;
4228 char count_label_name[16];
4230 VEC_safe_push (int, heap, funcdef_nos, label_no);
4231 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4232 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4234 addr = force_reg (Pmode, count_label_rtx);
4235 r24 = gen_rtx_REG (Pmode, 24);
4236 emit_move_insn (r24, addr);
4239 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4240 gen_rtx_SYMBOL_REF (Pmode,
4242 GEN_INT (TARGET_64BIT ? 24 : 12)));
4244 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4249 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4250 gen_rtx_SYMBOL_REF (Pmode,
4252 GEN_INT (TARGET_64BIT ? 16 : 8)));
4256 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4257 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4259 /* Indicate the _mcount call cannot throw, nor will it execute a
4261 REG_NOTES (call_insn)
4262 = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx, REG_NOTES (call_insn));
4265 /* Fetch the return address for the frame COUNT steps up from
4266 the current frame, after the prologue. FRAMEADDR is the
4267 frame pointer of the COUNT frame.
4269 We want to ignore any export stub remnants here. To handle this,
4270 we examine the code at the return address, and if it is an export
4271 stub, we return a memory rtx for the stub return address stored
4274 The value returned is used in two different ways:
4276 1. To find a function's caller.
4278 2. To change the return address for a function.
4280 This function handles most instances of case 1; however, it will
4281 fail if there are two levels of stubs to execute on the return
4282 path. The only way I believe that can happen is if the return value
4283 needs a parameter relocation, which never happens for C code.
4285 This function handles most instances of case 2; however, it will
4286 fail if we did not originally have stub code on the return path
4287 but will need stub code on the new return path. This can happen if
4288 the caller & callee are both in the main program, but the new
4289 return location is in a shared library. */
4292 return_addr_rtx (int count, rtx frameaddr)
4302 rp = get_hard_reg_initial_val (Pmode, 2);
4304 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4307 saved_rp = gen_reg_rtx (Pmode);
4308 emit_move_insn (saved_rp, rp);
4310 /* Get pointer to the instruction stream. We have to mask out the
4311 privilege level from the two low order bits of the return address
4312 pointer here so that ins will point to the start of the first
4313 instruction that would have been executed if we returned. */
4314 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4315 label = gen_label_rtx ();
4317 /* Check the instruction stream at the normal return address for the
4320 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4321 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4322 0x00011820 | stub+16: mtsp r1,sr0
4323 0xe0400002 | stub+20: be,n 0(sr0,rp)
4325 If it is an export stub, than our return address is really in
4328 emit_cmp_insn (gen_rtx_MEM (SImode, ins), GEN_INT (0x4bc23fd1), NE,
4329 NULL_RTX, SImode, 1);
4330 emit_jump_insn (gen_bne (label));
4332 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 4)),
4333 GEN_INT (0x004010a1), NE, NULL_RTX, SImode, 1);
4334 emit_jump_insn (gen_bne (label));
4336 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 8)),
4337 GEN_INT (0x00011820), NE, NULL_RTX, SImode, 1);
4338 emit_jump_insn (gen_bne (label));
4340 /* 0xe0400002 must be specified as -532676606 so that it won't be
4341 rejected as an invalid immediate operand on 64-bit hosts. */
4342 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 12)),
4343 GEN_INT (-532676606), NE, NULL_RTX, SImode, 1);
4345 /* If there is no export stub then just use the value saved from
4346 the return pointer register. */
4348 emit_jump_insn (gen_bne (label));
4350 /* Here we know that our return address points to an export
4351 stub. We don't want to return the address of the export stub,
4352 but rather the return address of the export stub. That return
4353 address is stored at -24[frameaddr]. */
4355 emit_move_insn (saved_rp,
4357 memory_address (Pmode,
4358 plus_constant (frameaddr,
4365 /* This is only valid once reload has completed because it depends on
4366 knowing exactly how much (if any) frame there is and...
4368 It's only valid if there is no frame marker to de-allocate and...
4370 It's only valid if %r2 hasn't been saved into the caller's frame
4371 (we're not profiling and %r2 isn't live anywhere). */
4373 hppa_can_use_return_insn_p (void)
4375 return (reload_completed
4376 && (compute_frame_size (get_frame_size (), 0) ? 0 : 1)
4377 && ! regs_ever_live[2]
4378 && ! frame_pointer_needed);
4382 emit_bcond_fp (enum rtx_code code, rtx operand0)
4384 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4385 gen_rtx_IF_THEN_ELSE (VOIDmode,
4386 gen_rtx_fmt_ee (code,
4388 gen_rtx_REG (CCFPmode, 0),
4390 gen_rtx_LABEL_REF (VOIDmode, operand0),
4396 gen_cmp_fp (enum rtx_code code, rtx operand0, rtx operand1)
4398 return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4399 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1));
4402 /* Adjust the cost of a scheduling dependency. Return the new cost of
4403 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4406 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4408 enum attr_type attr_type;
4410 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4411 true dependencies as they are described with bypasses now. */
4412 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4415 if (! recog_memoized (insn))
4418 attr_type = get_attr_type (insn);
4420 switch (REG_NOTE_KIND (link))
4423 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4426 if (attr_type == TYPE_FPLOAD)
4428 rtx pat = PATTERN (insn);
4429 rtx dep_pat = PATTERN (dep_insn);
4430 if (GET_CODE (pat) == PARALLEL)
4432 /* This happens for the fldXs,mb patterns. */
4433 pat = XVECEXP (pat, 0, 0);
4435 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4436 /* If this happens, we have to extend this to schedule
4437 optimally. Return 0 for now. */
4440 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4442 if (! recog_memoized (dep_insn))
4444 switch (get_attr_type (dep_insn))
4451 case TYPE_FPSQRTSGL:
4452 case TYPE_FPSQRTDBL:
4453 /* A fpload can't be issued until one cycle before a
4454 preceding arithmetic operation has finished if
4455 the target of the fpload is any of the sources
4456 (or destination) of the arithmetic operation. */
4457 return insn_default_latency (dep_insn) - 1;
4464 else if (attr_type == TYPE_FPALU)
4466 rtx pat = PATTERN (insn);
4467 rtx dep_pat = PATTERN (dep_insn);
4468 if (GET_CODE (pat) == PARALLEL)
4470 /* This happens for the fldXs,mb patterns. */
4471 pat = XVECEXP (pat, 0, 0);
4473 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4474 /* If this happens, we have to extend this to schedule
4475 optimally. Return 0 for now. */
4478 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4480 if (! recog_memoized (dep_insn))
4482 switch (get_attr_type (dep_insn))
4486 case TYPE_FPSQRTSGL:
4487 case TYPE_FPSQRTDBL:
4488 /* An ALU flop can't be issued until two cycles before a
4489 preceding divide or sqrt operation has finished if
4490 the target of the ALU flop is any of the sources
4491 (or destination) of the divide or sqrt operation. */
4492 return insn_default_latency (dep_insn) - 2;
4500 /* For other anti dependencies, the cost is 0. */
4503 case REG_DEP_OUTPUT:
4504 /* Output dependency; DEP_INSN writes a register that INSN writes some
4506 if (attr_type == TYPE_FPLOAD)
4508 rtx pat = PATTERN (insn);
4509 rtx dep_pat = PATTERN (dep_insn);
4510 if (GET_CODE (pat) == PARALLEL)
4512 /* This happens for the fldXs,mb patterns. */
4513 pat = XVECEXP (pat, 0, 0);
4515 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4516 /* If this happens, we have to extend this to schedule
4517 optimally. Return 0 for now. */
4520 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4522 if (! recog_memoized (dep_insn))
4524 switch (get_attr_type (dep_insn))
4531 case TYPE_FPSQRTSGL:
4532 case TYPE_FPSQRTDBL:
4533 /* A fpload can't be issued until one cycle before a
4534 preceding arithmetic operation has finished if
4535 the target of the fpload is the destination of the
4536 arithmetic operation.
4538 Exception: For PA7100LC, PA7200 and PA7300, the cost
4539 is 3 cycles, unless they bundle together. We also
4540 pay the penalty if the second insn is a fpload. */
4541 return insn_default_latency (dep_insn) - 1;
4548 else if (attr_type == TYPE_FPALU)
4550 rtx pat = PATTERN (insn);
4551 rtx dep_pat = PATTERN (dep_insn);
4552 if (GET_CODE (pat) == PARALLEL)
4554 /* This happens for the fldXs,mb patterns. */
4555 pat = XVECEXP (pat, 0, 0);
4557 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4558 /* If this happens, we have to extend this to schedule
4559 optimally. Return 0 for now. */
4562 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4564 if (! recog_memoized (dep_insn))
4566 switch (get_attr_type (dep_insn))
4570 case TYPE_FPSQRTSGL:
4571 case TYPE_FPSQRTDBL:
4572 /* An ALU flop can't be issued until two cycles before a
4573 preceding divide or sqrt operation has finished if
4574 the target of the ALU flop is also the target of
4575 the divide or sqrt operation. */
4576 return insn_default_latency (dep_insn) - 2;
4584 /* For other output dependencies, the cost is 0. */
4592 /* Adjust scheduling priorities. We use this to try and keep addil
4593 and the next use of %r1 close together. */
4595 pa_adjust_priority (rtx insn, int priority)
4597 rtx set = single_set (insn);
4601 src = SET_SRC (set);
4602 dest = SET_DEST (set);
4603 if (GET_CODE (src) == LO_SUM
4604 && symbolic_operand (XEXP (src, 1), VOIDmode)
4605 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4608 else if (GET_CODE (src) == MEM
4609 && GET_CODE (XEXP (src, 0)) == LO_SUM
4610 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4611 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4614 else if (GET_CODE (dest) == MEM
4615 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4616 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4617 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4623 /* The 700 can only issue a single insn at a time.
4624 The 7XXX processors can issue two insns at a time.
4625 The 8000 can issue 4 insns at a time. */
4627 pa_issue_rate (void)
4631 case PROCESSOR_700: return 1;
4632 case PROCESSOR_7100: return 2;
4633 case PROCESSOR_7100LC: return 2;
4634 case PROCESSOR_7200: return 2;
4635 case PROCESSOR_7300: return 2;
4636 case PROCESSOR_8000: return 4;
4645 /* Return any length adjustment needed by INSN which already has its length
4646 computed as LENGTH. Return zero if no adjustment is necessary.
4648 For the PA: function calls, millicode calls, and backwards short
4649 conditional branches with unfilled delay slots need an adjustment by +1
4650 (to account for the NOP which will be inserted into the instruction stream).
4652 Also compute the length of an inline block move here as it is too
4653 complicated to express as a length attribute in pa.md. */
4655 pa_adjust_insn_length (rtx insn, int length)
4657 rtx pat = PATTERN (insn);
4659 /* Jumps inside switch tables which have unfilled delay slots need
4661 if (GET_CODE (insn) == JUMP_INSN
4662 && GET_CODE (pat) == PARALLEL
4663 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4665 /* Millicode insn with an unfilled delay slot. */
4666 else if (GET_CODE (insn) == INSN
4667 && GET_CODE (pat) != SEQUENCE
4668 && GET_CODE (pat) != USE
4669 && GET_CODE (pat) != CLOBBER
4670 && get_attr_type (insn) == TYPE_MILLI)
4672 /* Block move pattern. */
4673 else if (GET_CODE (insn) == INSN
4674 && GET_CODE (pat) == PARALLEL
4675 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4676 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4677 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4678 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4679 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4680 return compute_movmem_length (insn) - 4;
4681 /* Block clear pattern. */
4682 else if (GET_CODE (insn) == INSN
4683 && GET_CODE (pat) == PARALLEL
4684 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4685 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4686 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4687 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4688 return compute_clrmem_length (insn) - 4;
4689 /* Conditional branch with an unfilled delay slot. */
4690 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4692 /* Adjust a short backwards conditional with an unfilled delay slot. */
4693 if (GET_CODE (pat) == SET
4695 && ! forward_branch_p (insn))
4697 else if (GET_CODE (pat) == PARALLEL
4698 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4701 /* Adjust dbra insn with short backwards conditional branch with
4702 unfilled delay slot -- only for case where counter is in a
4703 general register register. */
4704 else if (GET_CODE (pat) == PARALLEL
4705 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4706 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4707 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4709 && ! forward_branch_p (insn))
4717 /* Print operand X (an rtx) in assembler syntax to file FILE.
4718 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4719 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4722 print_operand (FILE *file, rtx x, int code)
4727 /* Output a 'nop' if there's nothing for the delay slot. */
4728 if (dbr_sequence_length () == 0)
4729 fputs ("\n\tnop", file);
4732 /* Output a nullification completer if there's nothing for the */
4733 /* delay slot or nullification is requested. */
4734 if (dbr_sequence_length () == 0 ||
4736 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
4740 /* Print out the second register name of a register pair.
4741 I.e., R (6) => 7. */
4742 fputs (reg_names[REGNO (x) + 1], file);
4745 /* A register or zero. */
4747 || (x == CONST0_RTX (DFmode))
4748 || (x == CONST0_RTX (SFmode)))
4750 fputs ("%r0", file);
4756 /* A register or zero (floating point). */
4758 || (x == CONST0_RTX (DFmode))
4759 || (x == CONST0_RTX (SFmode)))
4761 fputs ("%fr0", file);
4770 xoperands[0] = XEXP (XEXP (x, 0), 0);
4771 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
4772 output_global_address (file, xoperands[1], 0);
4773 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
4777 case 'C': /* Plain (C)ondition */
4779 switch (GET_CODE (x))
4782 fputs ("=", file); break;
4784 fputs ("<>", file); break;
4786 fputs (">", file); break;
4788 fputs (">=", file); break;
4790 fputs (">>=", file); break;
4792 fputs (">>", file); break;
4794 fputs ("<", file); break;
4796 fputs ("<=", file); break;
4798 fputs ("<<=", file); break;
4800 fputs ("<<", file); break;
4805 case 'N': /* Condition, (N)egated */
4806 switch (GET_CODE (x))
4809 fputs ("<>", file); break;
4811 fputs ("=", file); break;
4813 fputs ("<=", file); break;
4815 fputs ("<", file); break;
4817 fputs ("<<", file); break;
4819 fputs ("<<=", file); break;
4821 fputs (">=", file); break;
4823 fputs (">", file); break;
4825 fputs (">>", file); break;
4827 fputs (">>=", file); break;
4832 /* For floating point comparisons. Note that the output
4833 predicates are the complement of the desired mode. The
4834 conditions for GT, GE, LT, LE and LTGT cause an invalid
4835 operation exception if the result is unordered and this
4836 exception is enabled in the floating-point status register. */
4838 switch (GET_CODE (x))
4841 fputs ("!=", file); break;
4843 fputs ("=", file); break;
4845 fputs ("!>", file); break;
4847 fputs ("!>=", file); break;
4849 fputs ("!<", file); break;
4851 fputs ("!<=", file); break;
4853 fputs ("!<>", file); break;
4855 fputs ("!?<=", file); break;
4857 fputs ("!?<", file); break;
4859 fputs ("!?>=", file); break;
4861 fputs ("!?>", file); break;
4863 fputs ("!?=", file); break;
4865 fputs ("!?", file); break;
4867 fputs ("?", file); break;
4872 case 'S': /* Condition, operands are (S)wapped. */
4873 switch (GET_CODE (x))
4876 fputs ("=", file); break;
4878 fputs ("<>", file); break;
4880 fputs ("<", file); break;
4882 fputs ("<=", file); break;
4884 fputs ("<<=", file); break;
4886 fputs ("<<", file); break;
4888 fputs (">", file); break;
4890 fputs (">=", file); break;
4892 fputs (">>=", file); break;
4894 fputs (">>", file); break;
4899 case 'B': /* Condition, (B)oth swapped and negate. */
4900 switch (GET_CODE (x))
4903 fputs ("<>", file); break;
4905 fputs ("=", file); break;
4907 fputs (">=", file); break;
4909 fputs (">", file); break;
4911 fputs (">>", file); break;
4913 fputs (">>=", file); break;
4915 fputs ("<=", file); break;
4917 fputs ("<", file); break;
4919 fputs ("<<", file); break;
4921 fputs ("<<=", file); break;
4927 gcc_assert (GET_CODE (x) == CONST_INT);
4928 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
4931 gcc_assert (GET_CODE (x) == CONST_INT);
4932 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
4935 gcc_assert (GET_CODE (x) == CONST_INT);
4936 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
4939 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
4940 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4943 gcc_assert (GET_CODE (x) == CONST_INT);
4944 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
4947 gcc_assert (GET_CODE (x) == CONST_INT);
4948 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
4951 if (GET_CODE (x) == CONST_INT)
4956 switch (GET_CODE (XEXP (x, 0)))
4960 if (ASSEMBLER_DIALECT == 0)
4961 fputs ("s,mb", file);
4963 fputs (",mb", file);
4967 if (ASSEMBLER_DIALECT == 0)
4968 fputs ("s,ma", file);
4970 fputs (",ma", file);
4973 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
4974 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
4976 if (ASSEMBLER_DIALECT == 0)
4979 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4980 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
4982 if (ASSEMBLER_DIALECT == 0)
4983 fputs ("x,s", file);
4987 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
4991 if (code == 'F' && ASSEMBLER_DIALECT == 0)
4997 output_global_address (file, x, 0);
5000 output_global_address (file, x, 1);
5002 case 0: /* Don't do anything special */
5007 compute_zdepwi_operands (INTVAL (x), op);
5008 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5014 compute_zdepdi_operands (INTVAL (x), op);
5015 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5019 /* We can get here from a .vtable_inherit due to our
5020 CONSTANT_ADDRESS_P rejecting perfectly good constant
5026 if (GET_CODE (x) == REG)
5028 fputs (reg_names [REGNO (x)], file);
5029 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5035 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5036 && (REGNO (x) & 1) == 0)
5039 else if (GET_CODE (x) == MEM)
5041 int size = GET_MODE_SIZE (GET_MODE (x));
5042 rtx base = NULL_RTX;
5043 switch (GET_CODE (XEXP (x, 0)))
5047 base = XEXP (XEXP (x, 0), 0);
5048 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5052 base = XEXP (XEXP (x, 0), 0);
5053 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5056 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5057 fprintf (file, "%s(%s)",
5058 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5059 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5060 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5061 fprintf (file, "%s(%s)",
5062 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5063 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5064 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5065 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5067 /* Because the REG_POINTER flag can get lost during reload,
5068 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5069 index and base registers in the combined move patterns. */
5070 rtx base = XEXP (XEXP (x, 0), 1);
5071 rtx index = XEXP (XEXP (x, 0), 0);
5073 fprintf (file, "%s(%s)",
5074 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5077 output_address (XEXP (x, 0));
5080 output_address (XEXP (x, 0));
5085 output_addr_const (file, x);
5088 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5091 output_global_address (FILE *file, rtx x, int round_constant)
5094 /* Imagine (high (const (plus ...))). */
5095 if (GET_CODE (x) == HIGH)
5098 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5099 output_addr_const (file, x);
5100 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5102 output_addr_const (file, x);
5103 fputs ("-$global$", file);
5105 else if (GET_CODE (x) == CONST)
5107 const char *sep = "";
5108 int offset = 0; /* assembler wants -$global$ at end */
5109 rtx base = NULL_RTX;
5111 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5114 base = XEXP (XEXP (x, 0), 0);
5115 output_addr_const (file, base);
5118 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5124 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5127 base = XEXP (XEXP (x, 0), 1);
5128 output_addr_const (file, base);
5131 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5137 /* How bogus. The compiler is apparently responsible for
5138 rounding the constant if it uses an LR field selector.
5140 The linker and/or assembler seem a better place since
5141 they have to do this kind of thing already.
5143 If we fail to do this, HP's optimizing linker may eliminate
5144 an addil, but not update the ldw/stw/ldo instruction that
5145 uses the result of the addil. */
5147 offset = ((offset + 0x1000) & ~0x1fff);
5149 switch (GET_CODE (XEXP (x, 0)))
5162 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5170 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5171 fputs ("-$global$", file);
5173 fprintf (file, "%s%d", sep, offset);
5176 output_addr_const (file, x);
5179 /* Output boilerplate text to appear at the beginning of the file.
5180 There are several possible versions. */
5181 #define aputs(x) fputs(x, asm_out_file)
5183 pa_file_start_level (void)
5186 aputs ("\t.LEVEL 2.0w\n");
5187 else if (TARGET_PA_20)
5188 aputs ("\t.LEVEL 2.0\n");
5189 else if (TARGET_PA_11)
5190 aputs ("\t.LEVEL 1.1\n");
5192 aputs ("\t.LEVEL 1.0\n");
5196 pa_file_start_space (int sortspace)
5198 aputs ("\t.SPACE $PRIVATE$");
5201 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5202 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5203 "\n\t.SPACE $TEXT$");
5206 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5207 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5211 pa_file_start_file (int want_version)
5213 if (write_symbols != NO_DEBUG)
5215 output_file_directive (asm_out_file, main_input_filename);
5217 aputs ("\t.version\t\"01.01\"\n");
5222 pa_file_start_mcount (const char *aswhat)
5225 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5229 pa_elf_file_start (void)
5231 pa_file_start_level ();
5232 pa_file_start_mcount ("ENTRY");
5233 pa_file_start_file (0);
5237 pa_som_file_start (void)
5239 pa_file_start_level ();
5240 pa_file_start_space (0);
5241 aputs ("\t.IMPORT $global$,DATA\n"
5242 "\t.IMPORT $$dyncall,MILLICODE\n");
5243 pa_file_start_mcount ("CODE");
5244 pa_file_start_file (0);
5248 pa_linux_file_start (void)
5250 pa_file_start_file (1);
5251 pa_file_start_level ();
5252 pa_file_start_mcount ("CODE");
5256 pa_hpux64_gas_file_start (void)
5258 pa_file_start_level ();
5259 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5261 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5263 pa_file_start_file (1);
5267 pa_hpux64_hpas_file_start (void)
5269 pa_file_start_level ();
5270 pa_file_start_space (1);
5271 pa_file_start_mcount ("CODE");
5272 pa_file_start_file (0);
5276 /* Search the deferred plabel list for SYMBOL and return its internal
5277 label. If an entry for SYMBOL is not found, a new entry is created. */
5280 get_deferred_plabel (rtx symbol)
5282 const char *fname = XSTR (symbol, 0);
5285 /* See if we have already put this function on the list of deferred
5286 plabels. This list is generally small, so a liner search is not
5287 too ugly. If it proves too slow replace it with something faster. */
5288 for (i = 0; i < n_deferred_plabels; i++)
5289 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5292 /* If the deferred plabel list is empty, or this entry was not found
5293 on the list, create a new entry on the list. */
5294 if (deferred_plabels == NULL || i == n_deferred_plabels)
5298 if (deferred_plabels == 0)
5299 deferred_plabels = (struct deferred_plabel *)
5300 ggc_alloc (sizeof (struct deferred_plabel));
5302 deferred_plabels = (struct deferred_plabel *)
5303 ggc_realloc (deferred_plabels,
5304 ((n_deferred_plabels + 1)
5305 * sizeof (struct deferred_plabel)));
5307 i = n_deferred_plabels++;
5308 deferred_plabels[i].internal_label = gen_label_rtx ();
5309 deferred_plabels[i].symbol = symbol;
5311 /* Gross. We have just implicitly taken the address of this
5312 function. Mark it in the same manner as assemble_name. */
5313 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5315 mark_referenced (id);
5318 return deferred_plabels[i].internal_label;
5322 output_deferred_plabels (void)
5326 /* If we have some deferred plabels, then we need to switch into the
5327 data or readonly data section, and align it to a 4 byte boundary
5328 before outputting the deferred plabels. */
5329 if (n_deferred_plabels)
5331 switch_to_section (flag_pic ? data_section : readonly_data_section);
5332 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5335 /* Now output the deferred plabels. */
5336 for (i = 0; i < n_deferred_plabels; i++)
5338 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5339 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5340 assemble_integer (deferred_plabels[i].symbol,
5341 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5345 #ifdef HPUX_LONG_DOUBLE_LIBRARY
5346 /* Initialize optabs to point to HPUX long double emulation routines. */
5348 pa_hpux_init_libfuncs (void)
5350 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5351 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5352 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5353 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5354 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5355 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5356 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5357 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5358 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5360 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5361 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5362 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5363 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5364 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5365 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5366 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5368 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5369 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5370 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5371 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5373 set_conv_libfunc (sfix_optab, SImode, TFmode, TARGET_64BIT
5374 ? "__U_Qfcnvfxt_quad_to_sgl"
5375 : "_U_Qfcnvfxt_quad_to_sgl");
5376 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
5377 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_usgl");
5378 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_udbl");
5380 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
5381 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
5382 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_U_Qfcnvxf_usgl_to_quad");
5383 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxf_udbl_to_quad");
5387 /* HP's millicode routines mean something special to the assembler.
5388 Keep track of which ones we have used. */
5390 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5391 static void import_milli (enum millicodes);
5392 static char imported[(int) end1000];
5393 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5394 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5395 #define MILLI_START 10
5398 import_milli (enum millicodes code)
5400 char str[sizeof (import_string)];
5402 if (!imported[(int) code])
5404 imported[(int) code] = 1;
5405 strcpy (str, import_string);
5406 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5407 output_asm_insn (str, 0);
5411 /* The register constraints have put the operands and return value in
5412 the proper registers. */
5415 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5417 import_milli (mulI);
5418 return output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5421 /* Emit the rtl for doing a division by a constant. */
5423 /* Do magic division millicodes exist for this value? */
5424 const int magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5426 /* We'll use an array to keep track of the magic millicodes and
5427 whether or not we've used them already. [n][0] is signed, [n][1] is
5430 static int div_milli[16][2];
5433 emit_hpdiv_const (rtx *operands, int unsignedp)
5435 if (GET_CODE (operands[2]) == CONST_INT
5436 && INTVAL (operands[2]) > 0
5437 && INTVAL (operands[2]) < 16
5438 && magic_milli[INTVAL (operands[2])])
5440 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5442 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5446 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5447 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5449 gen_rtx_REG (SImode, 26),
5451 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5452 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5453 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5454 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5455 gen_rtx_CLOBBER (VOIDmode, ret))));
5456 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5463 output_div_insn (rtx *operands, int unsignedp, rtx insn)
5467 /* If the divisor is a constant, try to use one of the special
5469 if (GET_CODE (operands[0]) == CONST_INT)
5471 static char buf[100];
5472 divisor = INTVAL (operands[0]);
5473 if (!div_milli[divisor][unsignedp])
5475 div_milli[divisor][unsignedp] = 1;
5477 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5479 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5483 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5484 INTVAL (operands[0]));
5485 return output_millicode_call (insn,
5486 gen_rtx_SYMBOL_REF (SImode, buf));
5490 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5491 INTVAL (operands[0]));
5492 return output_millicode_call (insn,
5493 gen_rtx_SYMBOL_REF (SImode, buf));
5496 /* Divisor isn't a special constant. */
5501 import_milli (divU);
5502 return output_millicode_call (insn,
5503 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5507 import_milli (divI);
5508 return output_millicode_call (insn,
5509 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5514 /* Output a $$rem millicode to do mod. */
5517 output_mod_insn (int unsignedp, rtx insn)
5521 import_milli (remU);
5522 return output_millicode_call (insn,
5523 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5527 import_milli (remI);
5528 return output_millicode_call (insn,
5529 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5534 output_arg_descriptor (rtx call_insn)
5536 const char *arg_regs[4];
5537 enum machine_mode arg_mode;
5539 int i, output_flag = 0;
5542 /* We neither need nor want argument location descriptors for the
5543 64bit runtime environment or the ELF32 environment. */
5544 if (TARGET_64BIT || TARGET_ELF32)
5547 for (i = 0; i < 4; i++)
5550 /* Specify explicitly that no argument relocations should take place
5551 if using the portable runtime calling conventions. */
5552 if (TARGET_PORTABLE_RUNTIME)
5554 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5559 gcc_assert (GET_CODE (call_insn) == CALL_INSN);
5560 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5561 link; link = XEXP (link, 1))
5563 rtx use = XEXP (link, 0);
5565 if (! (GET_CODE (use) == USE
5566 && GET_CODE (XEXP (use, 0)) == REG
5567 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5570 arg_mode = GET_MODE (XEXP (use, 0));
5571 regno = REGNO (XEXP (use, 0));
5572 if (regno >= 23 && regno <= 26)
5574 arg_regs[26 - regno] = "GR";
5575 if (arg_mode == DImode)
5576 arg_regs[25 - regno] = "GR";
5578 else if (regno >= 32 && regno <= 39)
5580 if (arg_mode == SFmode)
5581 arg_regs[(regno - 32) / 2] = "FR";
5584 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5585 arg_regs[(regno - 34) / 2] = "FR";
5586 arg_regs[(regno - 34) / 2 + 1] = "FU";
5588 arg_regs[(regno - 34) / 2] = "FU";
5589 arg_regs[(regno - 34) / 2 + 1] = "FR";
5594 fputs ("\t.CALL ", asm_out_file);
5595 for (i = 0; i < 4; i++)
5600 fputc (',', asm_out_file);
5601 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5604 fputc ('\n', asm_out_file);
5607 static enum reg_class
5608 pa_secondary_reload (bool in_p, rtx x, enum reg_class class,
5609 enum machine_mode mode, secondary_reload_info *sri)
5611 int is_symbolic, regno;
5613 /* Handle the easy stuff first. */
5614 if (class == R1_REGS)
5620 if (class == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5626 /* If we have something like (mem (mem (...)), we can safely assume the
5627 inner MEM will end up in a general register after reloading, so there's
5628 no need for a secondary reload. */
5629 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5632 /* Trying to load a constant into a FP register during PIC code
5633 generation requires %r1 as a scratch register. */
5635 && (mode == SImode || mode == DImode)
5636 && FP_REG_CLASS_P (class)
5637 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5639 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5640 : CODE_FOR_reload_indi_r1);
5644 /* Profiling showed the PA port spends about 1.3% of its compilation
5645 time in true_regnum from calls inside pa_secondary_reload_class. */
5646 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5647 regno = true_regnum (x);
5649 /* Handle out of range displacement for integer mode loads/stores of
5651 if (((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5652 && GET_MODE_CLASS (mode) == MODE_INT
5653 && FP_REG_CLASS_P (class))
5654 || (class == SHIFT_REGS && (regno <= 0 || regno >= 32)))
5656 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5660 /* A SAR<->FP register copy requires a secondary register (GPR) as
5661 well as secondary memory. */
5662 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
5663 && ((REGNO_REG_CLASS (regno) == SHIFT_REGS && FP_REG_CLASS_P (class))
5664 || (class == SHIFT_REGS
5665 && FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))))
5667 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5671 /* Secondary reloads of symbolic operands require %r1 as a scratch
5672 register when we're generating PIC code and the operand isn't
5674 if (GET_CODE (x) == HIGH)
5677 /* Profiling has showed GCC spends about 2.6% of its compilation
5678 time in symbolic_operand from calls inside pa_secondary_reload_class.
5679 So, we use an inline copy to avoid useless work. */
5680 switch (GET_CODE (x))
5685 is_symbolic = !SYMBOL_REF_TLS_MODEL (x);
5692 is_symbolic = (((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
5693 && !SYMBOL_REF_TLS_MODEL (XEXP (op, 0)))
5694 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
5695 && GET_CODE (XEXP (op, 1)) == CONST_INT);
5702 if (is_symbolic && (flag_pic || !read_only_operand (x, VOIDmode)))
5704 gcc_assert (mode == SImode || mode == DImode);
5705 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5706 : CODE_FOR_reload_indi_r1);
5712 /* In the 32-bit runtime, arguments larger than eight bytes are passed
5713 by invisible reference. As a GCC extension, we also pass anything
5714 with a zero or variable size by reference.
5716 The 64-bit runtime does not describe passing any types by invisible
5717 reference. The internals of GCC can't currently handle passing
5718 empty structures, and zero or variable length arrays when they are
5719 not passed entirely on the stack or by reference. Thus, as a GCC
5720 extension, we pass these types by reference. The HP compiler doesn't
5721 support these types, so hopefully there shouldn't be any compatibility
5722 issues. This may have to be revisited when HP releases a C99 compiler
5723 or updates the ABI. */
5726 pa_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5727 enum machine_mode mode, tree type,
5728 bool named ATTRIBUTE_UNUSED)
5733 size = int_size_in_bytes (type);
5735 size = GET_MODE_SIZE (mode);
5740 return size <= 0 || size > 8;
5744 function_arg_padding (enum machine_mode mode, tree type)
5747 || (TARGET_64BIT && type && AGGREGATE_TYPE_P (type)))
5749 /* Return none if justification is not required. */
5751 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
5752 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
5755 /* The directions set here are ignored when a BLKmode argument larger
5756 than a word is placed in a register. Different code is used for
5757 the stack and registers. This makes it difficult to have a
5758 consistent data representation for both the stack and registers.
5759 For both runtimes, the justification and padding for arguments on
5760 the stack and in registers should be identical. */
5762 /* The 64-bit runtime specifies left justification for aggregates. */
5765 /* The 32-bit runtime architecture specifies right justification.
5766 When the argument is passed on the stack, the argument is padded
5767 with garbage on the left. The HP compiler pads with zeros. */
5771 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
5778 /* Do what is necessary for `va_start'. We look at the current function
5779 to determine if stdargs or varargs is used and fill in an initial
5780 va_list. A pointer to this constructor is returned. */
5783 hppa_builtin_saveregs (void)
5786 tree fntype = TREE_TYPE (current_function_decl);
5787 int argadj = ((!(TYPE_ARG_TYPES (fntype) != 0
5788 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
5789 != void_type_node)))
5790 ? UNITS_PER_WORD : 0);
5793 offset = plus_constant (current_function_arg_offset_rtx, argadj);
5795 offset = current_function_arg_offset_rtx;
5801 /* Adjust for varargs/stdarg differences. */
5803 offset = plus_constant (current_function_arg_offset_rtx, -argadj);
5805 offset = current_function_arg_offset_rtx;
5807 /* We need to save %r26 .. %r19 inclusive starting at offset -64
5808 from the incoming arg pointer and growing to larger addresses. */
5809 for (i = 26, off = -64; i >= 19; i--, off += 8)
5810 emit_move_insn (gen_rtx_MEM (word_mode,
5811 plus_constant (arg_pointer_rtx, off)),
5812 gen_rtx_REG (word_mode, i));
5814 /* The incoming args pointer points just beyond the flushback area;
5815 normally this is not a serious concern. However, when we are doing
5816 varargs/stdargs we want to make the arg pointer point to the start
5817 of the incoming argument area. */
5818 emit_move_insn (virtual_incoming_args_rtx,
5819 plus_constant (arg_pointer_rtx, -64));
5821 /* Now return a pointer to the first anonymous argument. */
5822 return copy_to_reg (expand_binop (Pmode, add_optab,
5823 virtual_incoming_args_rtx,
5824 offset, 0, 0, OPTAB_LIB_WIDEN));
5827 /* Store general registers on the stack. */
5828 dest = gen_rtx_MEM (BLKmode,
5829 plus_constant (current_function_internal_arg_pointer,
5831 set_mem_alias_set (dest, get_varargs_alias_set ());
5832 set_mem_align (dest, BITS_PER_WORD);
5833 move_block_from_reg (23, dest, 4);
5835 /* move_block_from_reg will emit code to store the argument registers
5836 individually as scalar stores.
5838 However, other insns may later load from the same addresses for
5839 a structure load (passing a struct to a varargs routine).
5841 The alias code assumes that such aliasing can never happen, so we
5842 have to keep memory referencing insns from moving up beyond the
5843 last argument register store. So we emit a blockage insn here. */
5844 emit_insn (gen_blockage ());
5846 return copy_to_reg (expand_binop (Pmode, add_optab,
5847 current_function_internal_arg_pointer,
5848 offset, 0, 0, OPTAB_LIB_WIDEN));
5852 hppa_va_start (tree valist, rtx nextarg)
5854 nextarg = expand_builtin_saveregs ();
5855 std_expand_builtin_va_start (valist, nextarg);
5859 hppa_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
5863 /* Args grow upward. We can use the generic routines. */
5864 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5866 else /* !TARGET_64BIT */
5868 tree ptr = build_pointer_type (type);
5871 unsigned int size, ofs;
5874 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
5878 ptr = build_pointer_type (type);
5880 size = int_size_in_bytes (type);
5881 valist_type = TREE_TYPE (valist);
5883 /* Args grow down. Not handled by generic routines. */
5885 u = fold_convert (valist_type, size_in_bytes (type));
5886 t = build2 (MINUS_EXPR, valist_type, valist, u);
5888 /* Copied from va-pa.h, but we probably don't need to align to
5889 word size, since we generate and preserve that invariant. */
5890 u = build_int_cst (valist_type, (size > 4 ? -8 : -4));
5891 t = build2 (BIT_AND_EXPR, valist_type, t, u);
5893 t = build2 (MODIFY_EXPR, valist_type, valist, t);
5895 ofs = (8 - size) % 4;
5898 u = fold_convert (valist_type, size_int (ofs));
5899 t = build2 (PLUS_EXPR, valist_type, t, u);
5902 t = fold_convert (ptr, t);
5903 t = build_va_arg_indirect_ref (t);
5906 t = build_va_arg_indirect_ref (t);
5912 /* True if MODE is valid for the target. By "valid", we mean able to
5913 be manipulated in non-trivial ways. In particular, this means all
5914 the arithmetic is supported.
5916 Currently, TImode is not valid as the HP 64-bit runtime documentation
5917 doesn't document the alignment and calling conventions for this type.
5918 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
5919 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
5922 pa_scalar_mode_supported_p (enum machine_mode mode)
5924 int precision = GET_MODE_PRECISION (mode);
5926 switch (GET_MODE_CLASS (mode))
5928 case MODE_PARTIAL_INT:
5930 if (precision == CHAR_TYPE_SIZE)
5932 if (precision == SHORT_TYPE_SIZE)
5934 if (precision == INT_TYPE_SIZE)
5936 if (precision == LONG_TYPE_SIZE)
5938 if (precision == LONG_LONG_TYPE_SIZE)
5943 if (precision == FLOAT_TYPE_SIZE)
5945 if (precision == DOUBLE_TYPE_SIZE)
5947 if (precision == LONG_DOUBLE_TYPE_SIZE)
5951 case MODE_DECIMAL_FLOAT:
5959 /* This routine handles all the normal conditional branch sequences we
5960 might need to generate. It handles compare immediate vs compare
5961 register, nullification of delay slots, varying length branches,
5962 negated branches, and all combinations of the above. It returns the
5963 output appropriate to emit the branch corresponding to all given
5967 output_cbranch (rtx *operands, int negated, rtx insn)
5969 static char buf[100];
5971 int nullify = INSN_ANNULLED_BRANCH_P (insn);
5972 int length = get_attr_length (insn);
5975 /* A conditional branch to the following instruction (e.g. the delay slot)
5976 is asking for a disaster. This can happen when not optimizing and
5977 when jump optimization fails.
5979 While it is usually safe to emit nothing, this can fail if the
5980 preceding instruction is a nullified branch with an empty delay
5981 slot and the same branch target as this branch. We could check
5982 for this but jump optimization should eliminate nop jumps. It
5983 is always safe to emit a nop. */
5984 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
5987 /* The doubleword form of the cmpib instruction doesn't have the LEU
5988 and GTU conditions while the cmpb instruction does. Since we accept
5989 zero for cmpb, we must ensure that we use cmpb for the comparison. */
5990 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
5991 operands[2] = gen_rtx_REG (DImode, 0);
5992 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
5993 operands[1] = gen_rtx_REG (DImode, 0);
5995 /* If this is a long branch with its delay slot unfilled, set `nullify'
5996 as it can nullify the delay slot and save a nop. */
5997 if (length == 8 && dbr_sequence_length () == 0)
6000 /* If this is a short forward conditional branch which did not get
6001 its delay slot filled, the delay slot can still be nullified. */
6002 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6003 nullify = forward_branch_p (insn);
6005 /* A forward branch over a single nullified insn can be done with a
6006 comclr instruction. This avoids a single cycle penalty due to
6007 mis-predicted branch if we fall through (branch not taken). */
6009 && next_real_insn (insn) != 0
6010 && get_attr_length (next_real_insn (insn)) == 4
6011 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6017 /* All short conditional branches except backwards with an unfilled
6021 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6023 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6024 if (GET_MODE (operands[1]) == DImode)
6027 strcat (buf, "%B3");
6029 strcat (buf, "%S3");
6031 strcat (buf, " %2,%r1,%%r0");
6033 strcat (buf, ",n %2,%r1,%0");
6035 strcat (buf, " %2,%r1,%0");
6038 /* All long conditionals. Note a short backward branch with an
6039 unfilled delay slot is treated just like a long backward branch
6040 with an unfilled delay slot. */
6042 /* Handle weird backwards branch with a filled delay slot
6043 which is nullified. */
6044 if (dbr_sequence_length () != 0
6045 && ! forward_branch_p (insn)
6048 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6049 if (GET_MODE (operands[1]) == DImode)
6052 strcat (buf, "%S3");
6054 strcat (buf, "%B3");
6055 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6057 /* Handle short backwards branch with an unfilled delay slot.
6058 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6059 taken and untaken branches. */
6060 else if (dbr_sequence_length () == 0
6061 && ! forward_branch_p (insn)
6062 && INSN_ADDRESSES_SET_P ()
6063 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6064 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6066 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6067 if (GET_MODE (operands[1]) == DImode)
6070 strcat (buf, "%B3 %2,%r1,%0%#");
6072 strcat (buf, "%S3 %2,%r1,%0%#");
6076 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6077 if (GET_MODE (operands[1]) == DImode)
6080 strcat (buf, "%S3");
6082 strcat (buf, "%B3");
6084 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6086 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6091 /* The reversed conditional branch must branch over one additional
6092 instruction if the delay slot is filled and needs to be extracted
6093 by output_lbranch. If the delay slot is empty or this is a
6094 nullified forward branch, the instruction after the reversed
6095 condition branch must be nullified. */
6096 if (dbr_sequence_length () == 0
6097 || (nullify && forward_branch_p (insn)))
6101 operands[4] = GEN_INT (length);
6106 operands[4] = GEN_INT (length + 4);
6109 /* Create a reversed conditional branch which branches around
6110 the following insns. */
6111 if (GET_MODE (operands[1]) != DImode)
6117 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6120 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6126 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6129 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6138 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6141 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6147 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6150 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6154 output_asm_insn (buf, operands);
6155 return output_lbranch (operands[0], insn, xdelay);
6160 /* This routine handles output of long unconditional branches that
6161 exceed the maximum range of a simple branch instruction. Since
6162 we don't have a register available for the branch, we save register
6163 %r1 in the frame marker, load the branch destination DEST into %r1,
6164 execute the branch, and restore %r1 in the delay slot of the branch.
6166 Since long branches may have an insn in the delay slot and the
6167 delay slot is used to restore %r1, we in general need to extract
6168 this insn and execute it before the branch. However, to facilitate
6169 use of this function by conditional branches, we also provide an
6170 option to not extract the delay insn so that it will be emitted
6171 after the long branch. So, if there is an insn in the delay slot,
6172 it is extracted if XDELAY is nonzero.
6174 The lengths of the various long-branch sequences are 20, 16 and 24
6175 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6178 output_lbranch (rtx dest, rtx insn, int xdelay)
6182 xoperands[0] = dest;
6184 /* First, free up the delay slot. */
6185 if (xdelay && dbr_sequence_length () != 0)
6187 /* We can't handle a jump in the delay slot. */
6188 gcc_assert (GET_CODE (NEXT_INSN (insn)) != JUMP_INSN);
6190 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6193 /* Now delete the delay insn. */
6194 PUT_CODE (NEXT_INSN (insn), NOTE);
6195 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
6196 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
6199 /* Output an insn to save %r1. The runtime documentation doesn't
6200 specify whether the "Clean Up" slot in the callers frame can
6201 be clobbered by the callee. It isn't copied by HP's builtin
6202 alloca, so this suggests that it can be clobbered if necessary.
6203 The "Static Link" location is copied by HP builtin alloca, so
6204 we avoid using it. Using the cleanup slot might be a problem
6205 if we have to interoperate with languages that pass cleanup
6206 information. However, it should be possible to handle these
6207 situations with GCC's asm feature.
6209 The "Current RP" slot is reserved for the called procedure, so
6210 we try to use it when we don't have a frame of our own. It's
6211 rather unlikely that we won't have a frame when we need to emit
6214 Really the way to go long term is a register scavenger; goto
6215 the target of the jump and find a register which we can use
6216 as a scratch to hold the value in %r1. Then, we wouldn't have
6217 to free up the delay slot or clobber a slot that may be needed
6218 for other purposes. */
6221 if (actual_fsize == 0 && !regs_ever_live[2])
6222 /* Use the return pointer slot in the frame marker. */
6223 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6225 /* Use the slot at -40 in the frame marker since HP builtin
6226 alloca doesn't copy it. */
6227 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6231 if (actual_fsize == 0 && !regs_ever_live[2])
6232 /* Use the return pointer slot in the frame marker. */
6233 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6235 /* Use the "Clean Up" slot in the frame marker. In GCC,
6236 the only other use of this location is for copying a
6237 floating point double argument from a floating-point
6238 register to two general registers. The copy is done
6239 as an "atomic" operation when outputting a call, so it
6240 won't interfere with our using the location here. */
6241 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6244 if (TARGET_PORTABLE_RUNTIME)
6246 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6247 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6248 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6252 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6253 if (TARGET_SOM || !TARGET_GAS)
6255 xoperands[1] = gen_label_rtx ();
6256 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6257 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6258 CODE_LABEL_NUMBER (xoperands[1]));
6259 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6263 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6264 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6266 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6269 /* Now output a very long branch to the original target. */
6270 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6272 /* Now restore the value of %r1 in the delay slot. */
6275 if (actual_fsize == 0 && !regs_ever_live[2])
6276 return "ldd -16(%%r30),%%r1";
6278 return "ldd -40(%%r30),%%r1";
6282 if (actual_fsize == 0 && !regs_ever_live[2])
6283 return "ldw -20(%%r30),%%r1";
6285 return "ldw -12(%%r30),%%r1";
6289 /* This routine handles all the branch-on-bit conditional branch sequences we
6290 might need to generate. It handles nullification of delay slots,
6291 varying length branches, negated branches and all combinations of the
6292 above. it returns the appropriate output template to emit the branch. */
6295 output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6297 static char buf[100];
6299 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6300 int length = get_attr_length (insn);
6303 /* A conditional branch to the following instruction (e.g. the delay slot) is
6304 asking for a disaster. I do not think this can happen as this pattern
6305 is only used when optimizing; jump optimization should eliminate the
6306 jump. But be prepared just in case. */
6308 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6311 /* If this is a long branch with its delay slot unfilled, set `nullify'
6312 as it can nullify the delay slot and save a nop. */
6313 if (length == 8 && dbr_sequence_length () == 0)
6316 /* If this is a short forward conditional branch which did not get
6317 its delay slot filled, the delay slot can still be nullified. */
6318 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6319 nullify = forward_branch_p (insn);
6321 /* A forward branch over a single nullified insn can be done with a
6322 extrs instruction. This avoids a single cycle penalty due to
6323 mis-predicted branch if we fall through (branch not taken). */
6326 && next_real_insn (insn) != 0
6327 && get_attr_length (next_real_insn (insn)) == 4
6328 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6335 /* All short conditional branches except backwards with an unfilled
6339 strcpy (buf, "{extrs,|extrw,s,}");
6341 strcpy (buf, "bb,");
6342 if (useskip && GET_MODE (operands[0]) == DImode)
6343 strcpy (buf, "extrd,s,*");
6344 else if (GET_MODE (operands[0]) == DImode)
6345 strcpy (buf, "bb,*");
6346 if ((which == 0 && negated)
6347 || (which == 1 && ! negated))
6352 strcat (buf, " %0,%1,1,%%r0");
6353 else if (nullify && negated)
6354 strcat (buf, ",n %0,%1,%3");
6355 else if (nullify && ! negated)
6356 strcat (buf, ",n %0,%1,%2");
6357 else if (! nullify && negated)
6358 strcat (buf, "%0,%1,%3");
6359 else if (! nullify && ! negated)
6360 strcat (buf, " %0,%1,%2");
6363 /* All long conditionals. Note a short backward branch with an
6364 unfilled delay slot is treated just like a long backward branch
6365 with an unfilled delay slot. */
6367 /* Handle weird backwards branch with a filled delay slot
6368 which is nullified. */
6369 if (dbr_sequence_length () != 0
6370 && ! forward_branch_p (insn)
6373 strcpy (buf, "bb,");
6374 if (GET_MODE (operands[0]) == DImode)
6376 if ((which == 0 && negated)
6377 || (which == 1 && ! negated))
6382 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6384 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6386 /* Handle short backwards branch with an unfilled delay slot.
6387 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6388 taken and untaken branches. */
6389 else if (dbr_sequence_length () == 0
6390 && ! forward_branch_p (insn)
6391 && INSN_ADDRESSES_SET_P ()
6392 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6393 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6395 strcpy (buf, "bb,");
6396 if (GET_MODE (operands[0]) == DImode)
6398 if ((which == 0 && negated)
6399 || (which == 1 && ! negated))
6404 strcat (buf, " %0,%1,%3%#");
6406 strcat (buf, " %0,%1,%2%#");
6410 if (GET_MODE (operands[0]) == DImode)
6411 strcpy (buf, "extrd,s,*");
6413 strcpy (buf, "{extrs,|extrw,s,}");
6414 if ((which == 0 && negated)
6415 || (which == 1 && ! negated))
6419 if (nullify && negated)
6420 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6421 else if (nullify && ! negated)
6422 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6424 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6426 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6431 /* The reversed conditional branch must branch over one additional
6432 instruction if the delay slot is filled and needs to be extracted
6433 by output_lbranch. If the delay slot is empty or this is a
6434 nullified forward branch, the instruction after the reversed
6435 condition branch must be nullified. */
6436 if (dbr_sequence_length () == 0
6437 || (nullify && forward_branch_p (insn)))
6441 operands[4] = GEN_INT (length);
6446 operands[4] = GEN_INT (length + 4);
6449 if (GET_MODE (operands[0]) == DImode)
6450 strcpy (buf, "bb,*");
6452 strcpy (buf, "bb,");
6453 if ((which == 0 && negated)
6454 || (which == 1 && !negated))
6459 strcat (buf, ",n %0,%1,.+%4");
6461 strcat (buf, " %0,%1,.+%4");
6462 output_asm_insn (buf, operands);
6463 return output_lbranch (negated ? operands[3] : operands[2],
6469 /* This routine handles all the branch-on-variable-bit conditional branch
6470 sequences we might need to generate. It handles nullification of delay
6471 slots, varying length branches, negated branches and all combinations
6472 of the above. it returns the appropriate output template to emit the
6476 output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6478 static char buf[100];
6480 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6481 int length = get_attr_length (insn);
6484 /* A conditional branch to the following instruction (e.g. the delay slot) is
6485 asking for a disaster. I do not think this can happen as this pattern
6486 is only used when optimizing; jump optimization should eliminate the
6487 jump. But be prepared just in case. */
6489 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6492 /* If this is a long branch with its delay slot unfilled, set `nullify'
6493 as it can nullify the delay slot and save a nop. */
6494 if (length == 8 && dbr_sequence_length () == 0)
6497 /* If this is a short forward conditional branch which did not get
6498 its delay slot filled, the delay slot can still be nullified. */
6499 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6500 nullify = forward_branch_p (insn);
6502 /* A forward branch over a single nullified insn can be done with a
6503 extrs instruction. This avoids a single cycle penalty due to
6504 mis-predicted branch if we fall through (branch not taken). */
6507 && next_real_insn (insn) != 0
6508 && get_attr_length (next_real_insn (insn)) == 4
6509 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6516 /* All short conditional branches except backwards with an unfilled
6520 strcpy (buf, "{vextrs,|extrw,s,}");
6522 strcpy (buf, "{bvb,|bb,}");
6523 if (useskip && GET_MODE (operands[0]) == DImode)
6524 strcpy (buf, "extrd,s,*");
6525 else if (GET_MODE (operands[0]) == DImode)
6526 strcpy (buf, "bb,*");
6527 if ((which == 0 && negated)
6528 || (which == 1 && ! negated))
6533 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6534 else if (nullify && negated)
6535 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6536 else if (nullify && ! negated)
6537 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6538 else if (! nullify && negated)
6539 strcat (buf, "{%0,%3|%0,%%sar,%3}");
6540 else if (! nullify && ! negated)
6541 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6544 /* All long conditionals. Note a short backward branch with an
6545 unfilled delay slot is treated just like a long backward branch
6546 with an unfilled delay slot. */
6548 /* Handle weird backwards branch with a filled delay slot
6549 which is nullified. */
6550 if (dbr_sequence_length () != 0
6551 && ! forward_branch_p (insn)
6554 strcpy (buf, "{bvb,|bb,}");
6555 if (GET_MODE (operands[0]) == DImode)
6557 if ((which == 0 && negated)
6558 || (which == 1 && ! negated))
6563 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6565 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6567 /* Handle short backwards branch with an unfilled delay slot.
6568 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6569 taken and untaken branches. */
6570 else if (dbr_sequence_length () == 0
6571 && ! forward_branch_p (insn)
6572 && INSN_ADDRESSES_SET_P ()
6573 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6574 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6576 strcpy (buf, "{bvb,|bb,}");
6577 if (GET_MODE (operands[0]) == DImode)
6579 if ((which == 0 && negated)
6580 || (which == 1 && ! negated))
6585 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
6587 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
6591 strcpy (buf, "{vextrs,|extrw,s,}");
6592 if (GET_MODE (operands[0]) == DImode)
6593 strcpy (buf, "extrd,s,*");
6594 if ((which == 0 && negated)
6595 || (which == 1 && ! negated))
6599 if (nullify && negated)
6600 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6601 else if (nullify && ! negated)
6602 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6604 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6606 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6611 /* The reversed conditional branch must branch over one additional
6612 instruction if the delay slot is filled and needs to be extracted
6613 by output_lbranch. If the delay slot is empty or this is a
6614 nullified forward branch, the instruction after the reversed
6615 condition branch must be nullified. */
6616 if (dbr_sequence_length () == 0
6617 || (nullify && forward_branch_p (insn)))
6621 operands[4] = GEN_INT (length);
6626 operands[4] = GEN_INT (length + 4);
6629 if (GET_MODE (operands[0]) == DImode)
6630 strcpy (buf, "bb,*");
6632 strcpy (buf, "{bvb,|bb,}");
6633 if ((which == 0 && negated)
6634 || (which == 1 && !negated))
6639 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
6641 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
6642 output_asm_insn (buf, operands);
6643 return output_lbranch (negated ? operands[3] : operands[2],
6649 /* Return the output template for emitting a dbra type insn.
6651 Note it may perform some output operations on its own before
6652 returning the final output string. */
6654 output_dbra (rtx *operands, rtx insn, int which_alternative)
6656 int length = get_attr_length (insn);
6658 /* A conditional branch to the following instruction (e.g. the delay slot) is
6659 asking for a disaster. Be prepared! */
6661 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6663 if (which_alternative == 0)
6664 return "ldo %1(%0),%0";
6665 else if (which_alternative == 1)
6667 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
6668 output_asm_insn ("ldw -16(%%r30),%4", operands);
6669 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6670 return "{fldws|fldw} -16(%%r30),%0";
6674 output_asm_insn ("ldw %0,%4", operands);
6675 return "ldo %1(%4),%4\n\tstw %4,%0";
6679 if (which_alternative == 0)
6681 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6684 /* If this is a long branch with its delay slot unfilled, set `nullify'
6685 as it can nullify the delay slot and save a nop. */
6686 if (length == 8 && dbr_sequence_length () == 0)
6689 /* If this is a short forward conditional branch which did not get
6690 its delay slot filled, the delay slot can still be nullified. */
6691 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6692 nullify = forward_branch_p (insn);
6698 return "addib,%C2,n %1,%0,%3";
6700 return "addib,%C2 %1,%0,%3";
6703 /* Handle weird backwards branch with a fulled delay slot
6704 which is nullified. */
6705 if (dbr_sequence_length () != 0
6706 && ! forward_branch_p (insn)
6708 return "addib,%N2,n %1,%0,.+12\n\tb %3";
6709 /* Handle short backwards branch with an unfilled delay slot.
6710 Using a addb;nop rather than addi;bl saves 1 cycle for both
6711 taken and untaken branches. */
6712 else if (dbr_sequence_length () == 0
6713 && ! forward_branch_p (insn)
6714 && INSN_ADDRESSES_SET_P ()
6715 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6716 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6717 return "addib,%C2 %1,%0,%3%#";
6719 /* Handle normal cases. */
6721 return "addi,%N2 %1,%0,%0\n\tb,n %3";
6723 return "addi,%N2 %1,%0,%0\n\tb %3";
6726 /* The reversed conditional branch must branch over one additional
6727 instruction if the delay slot is filled and needs to be extracted
6728 by output_lbranch. If the delay slot is empty or this is a
6729 nullified forward branch, the instruction after the reversed
6730 condition branch must be nullified. */
6731 if (dbr_sequence_length () == 0
6732 || (nullify && forward_branch_p (insn)))
6736 operands[4] = GEN_INT (length);
6741 operands[4] = GEN_INT (length + 4);
6745 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
6747 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
6749 return output_lbranch (operands[3], insn, xdelay);
6753 /* Deal with gross reload from FP register case. */
6754 else if (which_alternative == 1)
6756 /* Move loop counter from FP register to MEM then into a GR,
6757 increment the GR, store the GR into MEM, and finally reload
6758 the FP register from MEM from within the branch's delay slot. */
6759 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
6761 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6763 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
6764 else if (length == 28)
6765 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6768 operands[5] = GEN_INT (length - 16);
6769 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
6770 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
6771 return output_lbranch (operands[3], insn, 0);
6774 /* Deal with gross reload from memory case. */
6777 /* Reload loop counter from memory, the store back to memory
6778 happens in the branch's delay slot. */
6779 output_asm_insn ("ldw %0,%4", operands);
6781 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
6782 else if (length == 16)
6783 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
6786 operands[5] = GEN_INT (length - 4);
6787 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
6788 return output_lbranch (operands[3], insn, 0);
6793 /* Return the output template for emitting a movb type insn.
6795 Note it may perform some output operations on its own before
6796 returning the final output string. */
6798 output_movb (rtx *operands, rtx insn, int which_alternative,
6799 int reverse_comparison)
6801 int length = get_attr_length (insn);
6803 /* A conditional branch to the following instruction (e.g. the delay slot) is
6804 asking for a disaster. Be prepared! */
6806 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6808 if (which_alternative == 0)
6809 return "copy %1,%0";
6810 else if (which_alternative == 1)
6812 output_asm_insn ("stw %1,-16(%%r30)", operands);
6813 return "{fldws|fldw} -16(%%r30),%0";
6815 else if (which_alternative == 2)
6821 /* Support the second variant. */
6822 if (reverse_comparison)
6823 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
6825 if (which_alternative == 0)
6827 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6830 /* If this is a long branch with its delay slot unfilled, set `nullify'
6831 as it can nullify the delay slot and save a nop. */
6832 if (length == 8 && dbr_sequence_length () == 0)
6835 /* If this is a short forward conditional branch which did not get
6836 its delay slot filled, the delay slot can still be nullified. */
6837 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6838 nullify = forward_branch_p (insn);
6844 return "movb,%C2,n %1,%0,%3";
6846 return "movb,%C2 %1,%0,%3";
6849 /* Handle weird backwards branch with a filled delay slot
6850 which is nullified. */
6851 if (dbr_sequence_length () != 0
6852 && ! forward_branch_p (insn)
6854 return "movb,%N2,n %1,%0,.+12\n\tb %3";
6856 /* Handle short backwards branch with an unfilled delay slot.
6857 Using a movb;nop rather than or;bl saves 1 cycle for both
6858 taken and untaken branches. */
6859 else if (dbr_sequence_length () == 0
6860 && ! forward_branch_p (insn)
6861 && INSN_ADDRESSES_SET_P ()
6862 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6863 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6864 return "movb,%C2 %1,%0,%3%#";
6865 /* Handle normal cases. */
6867 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
6869 return "or,%N2 %1,%%r0,%0\n\tb %3";
6872 /* The reversed conditional branch must branch over one additional
6873 instruction if the delay slot is filled and needs to be extracted
6874 by output_lbranch. If the delay slot is empty or this is a
6875 nullified forward branch, the instruction after the reversed
6876 condition branch must be nullified. */
6877 if (dbr_sequence_length () == 0
6878 || (nullify && forward_branch_p (insn)))
6882 operands[4] = GEN_INT (length);
6887 operands[4] = GEN_INT (length + 4);
6891 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
6893 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
6895 return output_lbranch (operands[3], insn, xdelay);
6898 /* Deal with gross reload for FP destination register case. */
6899 else if (which_alternative == 1)
6901 /* Move source register to MEM, perform the branch test, then
6902 finally load the FP register from MEM from within the branch's
6904 output_asm_insn ("stw %1,-16(%%r30)", operands);
6906 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
6907 else if (length == 16)
6908 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6911 operands[4] = GEN_INT (length - 4);
6912 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
6913 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
6914 return output_lbranch (operands[3], insn, 0);
6917 /* Deal with gross reload from memory case. */
6918 else if (which_alternative == 2)
6920 /* Reload loop counter from memory, the store back to memory
6921 happens in the branch's delay slot. */
6923 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
6924 else if (length == 12)
6925 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
6928 operands[4] = GEN_INT (length);
6929 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
6931 return output_lbranch (operands[3], insn, 0);
6934 /* Handle SAR as a destination. */
6938 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
6939 else if (length == 12)
6940 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
6943 operands[4] = GEN_INT (length);
6944 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
6946 return output_lbranch (operands[3], insn, 0);
6951 /* Copy any FP arguments in INSN into integer registers. */
6953 copy_fp_args (rtx insn)
6958 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
6960 int arg_mode, regno;
6961 rtx use = XEXP (link, 0);
6963 if (! (GET_CODE (use) == USE
6964 && GET_CODE (XEXP (use, 0)) == REG
6965 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6968 arg_mode = GET_MODE (XEXP (use, 0));
6969 regno = REGNO (XEXP (use, 0));
6971 /* Is it a floating point register? */
6972 if (regno >= 32 && regno <= 39)
6974 /* Copy the FP register into an integer register via memory. */
6975 if (arg_mode == SFmode)
6977 xoperands[0] = XEXP (use, 0);
6978 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
6979 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
6980 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6984 xoperands[0] = XEXP (use, 0);
6985 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
6986 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
6987 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
6988 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6994 /* Compute length of the FP argument copy sequence for INSN. */
6996 length_fp_args (rtx insn)
7001 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7003 int arg_mode, regno;
7004 rtx use = XEXP (link, 0);
7006 if (! (GET_CODE (use) == USE
7007 && GET_CODE (XEXP (use, 0)) == REG
7008 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7011 arg_mode = GET_MODE (XEXP (use, 0));
7012 regno = REGNO (XEXP (use, 0));
7014 /* Is it a floating point register? */
7015 if (regno >= 32 && regno <= 39)
7017 if (arg_mode == SFmode)
7027 /* Return the attribute length for the millicode call instruction INSN.
7028 The length must match the code generated by output_millicode_call.
7029 We include the delay slot in the returned length as it is better to
7030 over estimate the length than to under estimate it. */
7033 attr_length_millicode_call (rtx insn)
7035 unsigned long distance = -1;
7036 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7038 if (INSN_ADDRESSES_SET_P ())
7040 distance = (total + insn_current_reference_address (insn));
7041 if (distance < total)
7047 if (!TARGET_LONG_CALLS && distance < 7600000)
7052 else if (TARGET_PORTABLE_RUNTIME)
7056 if (!TARGET_LONG_CALLS && distance < 240000)
7059 if (TARGET_LONG_ABS_CALL && !flag_pic)
7066 /* INSN is a function call. It may have an unconditional jump
7069 CALL_DEST is the routine we are calling. */
7072 output_millicode_call (rtx insn, rtx call_dest)
7074 int attr_length = get_attr_length (insn);
7075 int seq_length = dbr_sequence_length ();
7080 xoperands[0] = call_dest;
7081 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7083 /* Handle the common case where we are sure that the branch will
7084 reach the beginning of the $CODE$ subspace. The within reach
7085 form of the $$sh_func_adrs call has a length of 28. Because
7086 it has an attribute type of multi, it never has a nonzero
7087 sequence length. The length of the $$sh_func_adrs is the same
7088 as certain out of reach PIC calls to other routines. */
7089 if (!TARGET_LONG_CALLS
7090 && ((seq_length == 0
7091 && (attr_length == 12
7092 || (attr_length == 28 && get_attr_type (insn) == TYPE_MULTI)))
7093 || (seq_length != 0 && attr_length == 8)))
7095 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7101 /* It might seem that one insn could be saved by accessing
7102 the millicode function using the linkage table. However,
7103 this doesn't work in shared libraries and other dynamically
7104 loaded objects. Using a pc-relative sequence also avoids
7105 problems related to the implicit use of the gp register. */
7106 output_asm_insn ("b,l .+8,%%r1", xoperands);
7110 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7111 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7115 xoperands[1] = gen_label_rtx ();
7116 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7117 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7118 CODE_LABEL_NUMBER (xoperands[1]));
7119 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7122 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7124 else if (TARGET_PORTABLE_RUNTIME)
7126 /* Pure portable runtime doesn't allow be/ble; we also don't
7127 have PIC support in the assembler/linker, so this sequence
7130 /* Get the address of our target into %r1. */
7131 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7132 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7134 /* Get our return address into %r31. */
7135 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7136 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7138 /* Jump to our target address in %r1. */
7139 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7143 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7145 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7147 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7151 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7152 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7154 if (TARGET_SOM || !TARGET_GAS)
7156 /* The HP assembler can generate relocations for the
7157 difference of two symbols. GAS can do this for a
7158 millicode symbol but not an arbitrary external
7159 symbol when generating SOM output. */
7160 xoperands[1] = gen_label_rtx ();
7161 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7162 CODE_LABEL_NUMBER (xoperands[1]));
7163 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7164 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7168 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7169 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7173 /* Jump to our target address in %r1. */
7174 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7178 if (seq_length == 0)
7179 output_asm_insn ("nop", xoperands);
7181 /* We are done if there isn't a jump in the delay slot. */
7182 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7185 /* This call has an unconditional jump in its delay slot. */
7186 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7188 /* See if the return address can be adjusted. Use the containing
7189 sequence insn's address. */
7190 if (INSN_ADDRESSES_SET_P ())
7192 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7193 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7194 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7196 if (VAL_14_BITS_P (distance))
7198 xoperands[1] = gen_label_rtx ();
7199 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7200 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7201 CODE_LABEL_NUMBER (xoperands[1]));
7204 /* ??? This branch may not reach its target. */
7205 output_asm_insn ("nop\n\tb,n %0", xoperands);
7208 /* ??? This branch may not reach its target. */
7209 output_asm_insn ("nop\n\tb,n %0", xoperands);
7211 /* Delete the jump. */
7212 PUT_CODE (NEXT_INSN (insn), NOTE);
7213 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7214 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7219 /* Return the attribute length of the call instruction INSN. The SIBCALL
7220 flag indicates whether INSN is a regular call or a sibling call. The
7221 length returned must be longer than the code actually generated by
7222 output_call. Since branch shortening is done before delay branch
7223 sequencing, there is no way to determine whether or not the delay
7224 slot will be filled during branch shortening. Even when the delay
7225 slot is filled, we may have to add a nop if the delay slot contains
7226 a branch that can't reach its target. Thus, we always have to include
7227 the delay slot in the length estimate. This used to be done in
7228 pa_adjust_insn_length but we do it here now as some sequences always
7229 fill the delay slot and we can save four bytes in the estimate for
7233 attr_length_call (rtx insn, int sibcall)
7239 rtx pat = PATTERN (insn);
7240 unsigned long distance = -1;
7242 if (INSN_ADDRESSES_SET_P ())
7244 unsigned long total;
7246 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7247 distance = (total + insn_current_reference_address (insn));
7248 if (distance < total)
7252 /* Determine if this is a local call. */
7253 if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL)
7254 call_dest = XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0);
7256 call_dest = XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0);
7258 call_decl = SYMBOL_REF_DECL (call_dest);
7259 local_call = call_decl && (*targetm.binds_local_p) (call_decl);
7261 /* pc-relative branch. */
7262 if (!TARGET_LONG_CALLS
7263 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7264 || distance < 240000))
7267 /* 64-bit plabel sequence. */
7268 else if (TARGET_64BIT && !local_call)
7269 length += sibcall ? 28 : 24;
7271 /* non-pic long absolute branch sequence. */
7272 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7275 /* long pc-relative branch sequence. */
7276 else if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7277 || (TARGET_64BIT && !TARGET_GAS)
7278 || (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7282 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS)
7286 /* 32-bit plabel sequence. */
7292 length += length_fp_args (insn);
7302 if (!TARGET_NO_SPACE_REGS)
7310 /* INSN is a function call. It may have an unconditional jump
7313 CALL_DEST is the routine we are calling. */
7316 output_call (rtx insn, rtx call_dest, int sibcall)
7318 int delay_insn_deleted = 0;
7319 int delay_slot_filled = 0;
7320 int seq_length = dbr_sequence_length ();
7321 tree call_decl = SYMBOL_REF_DECL (call_dest);
7322 int local_call = call_decl && (*targetm.binds_local_p) (call_decl);
7325 xoperands[0] = call_dest;
7327 /* Handle the common case where we're sure that the branch will reach
7328 the beginning of the "$CODE$" subspace. This is the beginning of
7329 the current function if we are in a named section. */
7330 if (!TARGET_LONG_CALLS && attr_length_call (insn, sibcall) == 8)
7332 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7333 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7337 if (TARGET_64BIT && !local_call)
7339 /* ??? As far as I can tell, the HP linker doesn't support the
7340 long pc-relative sequence described in the 64-bit runtime
7341 architecture. So, we use a slightly longer indirect call. */
7342 xoperands[0] = get_deferred_plabel (call_dest);
7343 xoperands[1] = gen_label_rtx ();
7345 /* If this isn't a sibcall, we put the load of %r27 into the
7346 delay slot. We can't do this in a sibcall as we don't
7347 have a second call-clobbered scratch register available. */
7349 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7352 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7355 /* Now delete the delay insn. */
7356 PUT_CODE (NEXT_INSN (insn), NOTE);
7357 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7358 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7359 delay_insn_deleted = 1;
7362 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7363 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7364 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7368 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7369 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7370 output_asm_insn ("bve (%%r1)", xoperands);
7374 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7375 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7376 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7377 delay_slot_filled = 1;
7382 int indirect_call = 0;
7384 /* Emit a long call. There are several different sequences
7385 of increasing length and complexity. In most cases,
7386 they don't allow an instruction in the delay slot. */
7387 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7388 && !(TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7389 && !(TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7394 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7396 && (!TARGET_PA_20 || indirect_call))
7398 /* A non-jump insn in the delay slot. By definition we can
7399 emit this insn before the call (and in fact before argument
7401 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7404 /* Now delete the delay insn. */
7405 PUT_CODE (NEXT_INSN (insn), NOTE);
7406 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7407 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7408 delay_insn_deleted = 1;
7411 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7413 /* This is the best sequence for making long calls in
7414 non-pic code. Unfortunately, GNU ld doesn't provide
7415 the stub needed for external calls, and GAS's support
7416 for this with the SOM linker is buggy. It is safe
7417 to use this for local calls. */
7418 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7420 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7424 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7427 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7429 output_asm_insn ("copy %%r31,%%r2", xoperands);
7430 delay_slot_filled = 1;
7435 if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7436 || (TARGET_64BIT && !TARGET_GAS))
7438 /* The HP assembler and linker can handle relocations
7439 for the difference of two symbols. GAS and the HP
7440 linker can't do this when one of the symbols is
7442 xoperands[1] = gen_label_rtx ();
7443 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7444 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7445 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7446 CODE_LABEL_NUMBER (xoperands[1]));
7447 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7449 else if (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7451 /* GAS currently can't generate the relocations that
7452 are needed for the SOM linker under HP-UX using this
7453 sequence. The GNU linker doesn't generate the stubs
7454 that are needed for external calls on TARGET_ELF32
7455 with this sequence. For now, we have to use a
7456 longer plabel sequence when using GAS. */
7457 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7458 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7460 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7465 /* Emit a long plabel-based call sequence. This is
7466 essentially an inline implementation of $$dyncall.
7467 We don't actually try to call $$dyncall as this is
7468 as difficult as calling the function itself. */
7469 xoperands[0] = get_deferred_plabel (call_dest);
7470 xoperands[1] = gen_label_rtx ();
7472 /* Since the call is indirect, FP arguments in registers
7473 need to be copied to the general registers. Then, the
7474 argument relocation stub will copy them back. */
7476 copy_fp_args (insn);
7480 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7481 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7482 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7486 output_asm_insn ("addil LR'%0-$global$,%%r27",
7488 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7492 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7493 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7494 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7495 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7497 if (!sibcall && !TARGET_PA_20)
7499 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7500 if (TARGET_NO_SPACE_REGS)
7501 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7503 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7510 output_asm_insn ("bve (%%r1)", xoperands);
7515 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7516 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7517 delay_slot_filled = 1;
7520 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7525 if (!TARGET_NO_SPACE_REGS)
7526 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7531 if (TARGET_NO_SPACE_REGS)
7532 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7534 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7538 if (TARGET_NO_SPACE_REGS)
7539 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
7541 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
7544 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
7546 output_asm_insn ("copy %%r31,%%r2", xoperands);
7547 delay_slot_filled = 1;
7554 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
7555 output_asm_insn ("nop", xoperands);
7557 /* We are done if there isn't a jump in the delay slot. */
7559 || delay_insn_deleted
7560 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7563 /* A sibcall should never have a branch in the delay slot. */
7564 gcc_assert (!sibcall);
7566 /* This call has an unconditional jump in its delay slot. */
7567 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7569 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
7571 /* See if the return address can be adjusted. Use the containing
7572 sequence insn's address. */
7573 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7574 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7575 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7577 if (VAL_14_BITS_P (distance))
7579 xoperands[1] = gen_label_rtx ();
7580 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
7581 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7582 CODE_LABEL_NUMBER (xoperands[1]));
7585 output_asm_insn ("nop\n\tb,n %0", xoperands);
7588 output_asm_insn ("b,n %0", xoperands);
7590 /* Delete the jump. */
7591 PUT_CODE (NEXT_INSN (insn), NOTE);
7592 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7593 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7598 /* Return the attribute length of the indirect call instruction INSN.
7599 The length must match the code generated by output_indirect call.
7600 The returned length includes the delay slot. Currently, the delay
7601 slot of an indirect call sequence is not exposed and it is used by
7602 the sequence itself. */
7605 attr_length_indirect_call (rtx insn)
7607 unsigned long distance = -1;
7608 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7610 if (INSN_ADDRESSES_SET_P ())
7612 distance = (total + insn_current_reference_address (insn));
7613 if (distance < total)
7620 if (TARGET_FAST_INDIRECT_CALLS
7621 || (!TARGET_PORTABLE_RUNTIME
7622 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
7623 || distance < 240000)))
7629 if (TARGET_PORTABLE_RUNTIME)
7632 /* Out of reach, can use ble. */
7637 output_indirect_call (rtx insn, rtx call_dest)
7643 xoperands[0] = call_dest;
7644 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
7645 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
7649 /* First the special case for kernels, level 0 systems, etc. */
7650 if (TARGET_FAST_INDIRECT_CALLS)
7651 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
7653 /* Now the normal case -- we can reach $$dyncall directly or
7654 we're sure that we can get there via a long-branch stub.
7656 No need to check target flags as the length uniquely identifies
7657 the remaining cases. */
7658 if (attr_length_indirect_call (insn) == 8)
7660 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
7661 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
7662 variant of the B,L instruction can't be used on the SOM target. */
7663 if (TARGET_PA_20 && !TARGET_SOM)
7664 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
7666 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
7669 /* Long millicode call, but we are not generating PIC or portable runtime
7671 if (attr_length_indirect_call (insn) == 12)
7672 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
7674 /* Long millicode call for portable runtime. */
7675 if (attr_length_indirect_call (insn) == 20)
7676 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
7678 /* We need a long PIC call to $$dyncall. */
7679 xoperands[0] = NULL_RTX;
7680 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7681 if (TARGET_SOM || !TARGET_GAS)
7683 xoperands[0] = gen_label_rtx ();
7684 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
7685 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7686 CODE_LABEL_NUMBER (xoperands[0]));
7687 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
7691 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
7692 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
7695 output_asm_insn ("blr %%r0,%%r2", xoperands);
7696 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
7700 /* Return the total length of the save and restore instructions needed for
7701 the data linkage table pointer (i.e., the PIC register) across the call
7702 instruction INSN. No-return calls do not require a save and restore.
7703 In addition, we may be able to avoid the save and restore for calls
7704 within the same translation unit. */
7707 attr_length_save_restore_dltp (rtx insn)
7709 if (find_reg_note (insn, REG_NORETURN, NULL_RTX))
7715 /* In HPUX 8.0's shared library scheme, special relocations are needed
7716 for function labels if they might be passed to a function
7717 in a shared library (because shared libraries don't live in code
7718 space), and special magic is needed to construct their address. */
7721 hppa_encode_label (rtx sym)
7723 const char *str = XSTR (sym, 0);
7724 int len = strlen (str) + 1;
7727 p = newstr = alloca (len + 1);
7731 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
7735 pa_encode_section_info (tree decl, rtx rtl, int first)
7737 default_encode_section_info (decl, rtl, first);
7739 if (first && TEXT_SPACE_P (decl))
7741 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
7742 if (TREE_CODE (decl) == FUNCTION_DECL)
7743 hppa_encode_label (XEXP (rtl, 0));
7747 /* This is sort of inverse to pa_encode_section_info. */
7750 pa_strip_name_encoding (const char *str)
7752 str += (*str == '@');
7753 str += (*str == '*');
7758 function_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7760 return GET_CODE (op) == SYMBOL_REF && FUNCTION_NAME_P (XSTR (op, 0));
7763 /* Returns 1 if OP is a function label involved in a simple addition
7764 with a constant. Used to keep certain patterns from matching
7765 during instruction combination. */
7767 is_function_label_plus_const (rtx op)
7769 /* Strip off any CONST. */
7770 if (GET_CODE (op) == CONST)
7773 return (GET_CODE (op) == PLUS
7774 && function_label_operand (XEXP (op, 0), Pmode)
7775 && GET_CODE (XEXP (op, 1)) == CONST_INT);
7778 /* Output assembly code for a thunk to FUNCTION. */
7781 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
7782 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
7785 static unsigned int current_thunk_number;
7786 int val_14 = VAL_14_BITS_P (delta);
7791 xoperands[0] = XEXP (DECL_RTL (function), 0);
7792 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
7793 xoperands[2] = GEN_INT (delta);
7795 ASM_OUTPUT_LABEL (file, XSTR (xoperands[1], 0));
7796 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
7798 /* Output the thunk. We know that the function is in the same
7799 translation unit (i.e., the same space) as the thunk, and that
7800 thunks are output after their method. Thus, we don't need an
7801 external branch to reach the function. With SOM and GAS,
7802 functions and thunks are effectively in different sections.
7803 Thus, we can always use a IA-relative branch and the linker
7804 will add a long branch stub if necessary.
7806 However, we have to be careful when generating PIC code on the
7807 SOM port to ensure that the sequence does not transfer to an
7808 import stub for the target function as this could clobber the
7809 return value saved at SP-24. This would also apply to the
7810 32-bit linux port if the multi-space model is implemented. */
7811 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7812 && !(flag_pic && TREE_PUBLIC (function))
7813 && (TARGET_GAS || last_address < 262132))
7814 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7815 && ((targetm.have_named_sections
7816 && DECL_SECTION_NAME (thunk_fndecl) != NULL
7817 /* The GNU 64-bit linker has rather poor stub management.
7818 So, we use a long branch from thunks that aren't in
7819 the same section as the target function. */
7821 && (DECL_SECTION_NAME (thunk_fndecl)
7822 != DECL_SECTION_NAME (function)))
7823 || ((DECL_SECTION_NAME (thunk_fndecl)
7824 == DECL_SECTION_NAME (function))
7825 && last_address < 262132)))
7826 || (!targetm.have_named_sections && last_address < 262132))))
7829 output_asm_insn ("addil L'%2,%%r26", xoperands);
7831 output_asm_insn ("b %0", xoperands);
7835 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7840 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7844 else if (TARGET_64BIT)
7846 /* We only have one call-clobbered scratch register, so we can't
7847 make use of the delay slot if delta doesn't fit in 14 bits. */
7850 output_asm_insn ("addil L'%2,%%r26", xoperands);
7851 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7854 output_asm_insn ("b,l .+8,%%r1", xoperands);
7858 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7859 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7863 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
7864 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
7869 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7870 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7875 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
7879 else if (TARGET_PORTABLE_RUNTIME)
7881 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7882 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
7885 output_asm_insn ("addil L'%2,%%r26", xoperands);
7887 output_asm_insn ("bv %%r0(%%r22)", xoperands);
7891 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7896 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7900 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
7902 /* The function is accessible from outside this module. The only
7903 way to avoid an import stub between the thunk and function is to
7904 call the function directly with an indirect sequence similar to
7905 that used by $$dyncall. This is possible because $$dyncall acts
7906 as the import stub in an indirect call. */
7907 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
7908 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
7909 output_asm_insn ("addil LT'%3,%%r19", xoperands);
7910 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
7911 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
7912 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
7913 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
7914 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
7915 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
7919 output_asm_insn ("addil L'%2,%%r26", xoperands);
7925 output_asm_insn ("bve (%%r22)", xoperands);
7928 else if (TARGET_NO_SPACE_REGS)
7930 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
7935 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
7936 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
7937 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
7942 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7944 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7948 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7950 if (TARGET_SOM || !TARGET_GAS)
7952 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
7953 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
7957 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7958 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
7962 output_asm_insn ("addil L'%2,%%r26", xoperands);
7964 output_asm_insn ("bv %%r0(%%r22)", xoperands);
7968 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7973 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7980 output_asm_insn ("addil L'%2,%%r26", xoperands);
7982 output_asm_insn ("ldil L'%0,%%r22", xoperands);
7983 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
7987 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7992 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7997 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
7999 if (TARGET_SOM && TARGET_GAS)
8001 /* We done with this subspace except possibly for some additional
8002 debug information. Forget that we are in this subspace to ensure
8003 that the next function is output in its own subspace. */
8005 cfun->machine->in_nsubspa = 2;
8008 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8010 switch_to_section (data_section);
8011 output_asm_insn (".align 4", xoperands);
8012 ASM_OUTPUT_LABEL (file, label);
8013 output_asm_insn (".word P'%0", xoperands);
8016 current_thunk_number++;
8017 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8018 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8019 last_address += nbytes;
8020 update_total_code_bytes (nbytes);
8023 /* Only direct calls to static functions are allowed to be sibling (tail)
8026 This restriction is necessary because some linker generated stubs will
8027 store return pointers into rp' in some cases which might clobber a
8028 live value already in rp'.
8030 In a sibcall the current function and the target function share stack
8031 space. Thus if the path to the current function and the path to the
8032 target function save a value in rp', they save the value into the
8033 same stack slot, which has undesirable consequences.
8035 Because of the deferred binding nature of shared libraries any function
8036 with external scope could be in a different load module and thus require
8037 rp' to be saved when calling that function. So sibcall optimizations
8038 can only be safe for static function.
8040 Note that GCC never needs return value relocations, so we don't have to
8041 worry about static calls with return value relocations (which require
8044 It is safe to perform a sibcall optimization when the target function
8045 will never return. */
8047 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8049 if (TARGET_PORTABLE_RUNTIME)
8052 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8053 single subspace mode and the call is not indirect. As far as I know,
8054 there is no operating system support for the multiple subspace mode.
8055 It might be possible to support indirect calls if we didn't use
8056 $$dyncall (see the indirect sequence generated in output_call). */
8058 return (decl != NULL_TREE);
8060 /* Sibcalls are not ok because the arg pointer register is not a fixed
8061 register. This prevents the sibcall optimization from occurring. In
8062 addition, there are problems with stub placement using GNU ld. This
8063 is because a normal sibcall branch uses a 17-bit relocation while
8064 a regular call branch uses a 22-bit relocation. As a result, more
8065 care needs to be taken in the placement of long-branch stubs. */
8069 /* Sibcalls are only ok within a translation unit. */
8070 return (decl && !TREE_PUBLIC (decl));
8073 /* ??? Addition is not commutative on the PA due to the weird implicit
8074 space register selection rules for memory addresses. Therefore, we
8075 don't consider a + b == b + a, as this might be inside a MEM. */
8077 pa_commutative_p (rtx x, int outer_code)
8079 return (COMMUTATIVE_P (x)
8080 && (TARGET_NO_SPACE_REGS
8081 || (outer_code != UNKNOWN && outer_code != MEM)
8082 || GET_CODE (x) != PLUS));
8085 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8086 use in fmpyadd instructions. */
8088 fmpyaddoperands (rtx *operands)
8090 enum machine_mode mode = GET_MODE (operands[0]);
8092 /* Must be a floating point mode. */
8093 if (mode != SFmode && mode != DFmode)
8096 /* All modes must be the same. */
8097 if (! (mode == GET_MODE (operands[1])
8098 && mode == GET_MODE (operands[2])
8099 && mode == GET_MODE (operands[3])
8100 && mode == GET_MODE (operands[4])
8101 && mode == GET_MODE (operands[5])))
8104 /* All operands must be registers. */
8105 if (! (GET_CODE (operands[1]) == REG
8106 && GET_CODE (operands[2]) == REG
8107 && GET_CODE (operands[3]) == REG
8108 && GET_CODE (operands[4]) == REG
8109 && GET_CODE (operands[5]) == REG))
8112 /* Only 2 real operands to the addition. One of the input operands must
8113 be the same as the output operand. */
8114 if (! rtx_equal_p (operands[3], operands[4])
8115 && ! rtx_equal_p (operands[3], operands[5]))
8118 /* Inout operand of add cannot conflict with any operands from multiply. */
8119 if (rtx_equal_p (operands[3], operands[0])
8120 || rtx_equal_p (operands[3], operands[1])
8121 || rtx_equal_p (operands[3], operands[2]))
8124 /* multiply cannot feed into addition operands. */
8125 if (rtx_equal_p (operands[4], operands[0])
8126 || rtx_equal_p (operands[5], operands[0]))
8129 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8131 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8132 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8133 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8134 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8135 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8136 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8139 /* Passed. Operands are suitable for fmpyadd. */
8143 #if !defined(USE_COLLECT2)
8145 pa_asm_out_constructor (rtx symbol, int priority)
8147 if (!function_label_operand (symbol, VOIDmode))
8148 hppa_encode_label (symbol);
8150 #ifdef CTORS_SECTION_ASM_OP
8151 default_ctor_section_asm_out_constructor (symbol, priority);
8153 # ifdef TARGET_ASM_NAMED_SECTION
8154 default_named_section_asm_out_constructor (symbol, priority);
8156 default_stabs_asm_out_constructor (symbol, priority);
8162 pa_asm_out_destructor (rtx symbol, int priority)
8164 if (!function_label_operand (symbol, VOIDmode))
8165 hppa_encode_label (symbol);
8167 #ifdef DTORS_SECTION_ASM_OP
8168 default_dtor_section_asm_out_destructor (symbol, priority);
8170 # ifdef TARGET_ASM_NAMED_SECTION
8171 default_named_section_asm_out_destructor (symbol, priority);
8173 default_stabs_asm_out_destructor (symbol, priority);
8179 /* This function places uninitialized global data in the bss section.
8180 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8181 function on the SOM port to prevent uninitialized global data from
8182 being placed in the data section. */
8185 pa_asm_output_aligned_bss (FILE *stream,
8187 unsigned HOST_WIDE_INT size,
8190 switch_to_section (bss_section);
8191 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8193 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8194 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8197 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8198 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8201 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8202 ASM_OUTPUT_LABEL (stream, name);
8203 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8206 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8207 that doesn't allow the alignment of global common storage to be directly
8208 specified. The SOM linker aligns common storage based on the rounded
8209 value of the NUM_BYTES parameter in the .comm directive. It's not
8210 possible to use the .align directive as it doesn't affect the alignment
8211 of the label associated with a .comm directive. */
8214 pa_asm_output_aligned_common (FILE *stream,
8216 unsigned HOST_WIDE_INT size,
8219 unsigned int max_common_align;
8221 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8222 if (align > max_common_align)
8224 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8225 "for global common data. Using %u",
8226 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8227 align = max_common_align;
8230 switch_to_section (bss_section);
8232 assemble_name (stream, name);
8233 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8234 MAX (size, align / BITS_PER_UNIT));
8237 /* We can't use .comm for local common storage as the SOM linker effectively
8238 treats the symbol as universal and uses the same storage for local symbols
8239 with the same name in different object files. The .block directive
8240 reserves an uninitialized block of storage. However, it's not common
8241 storage. Fortunately, GCC never requests common storage with the same
8242 name in any given translation unit. */
8245 pa_asm_output_aligned_local (FILE *stream,
8247 unsigned HOST_WIDE_INT size,
8250 switch_to_section (bss_section);
8251 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8254 fprintf (stream, "%s", LOCAL_ASM_OP);
8255 assemble_name (stream, name);
8256 fprintf (stream, "\n");
8259 ASM_OUTPUT_LABEL (stream, name);
8260 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8263 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8264 use in fmpysub instructions. */
8266 fmpysuboperands (rtx *operands)
8268 enum machine_mode mode = GET_MODE (operands[0]);
8270 /* Must be a floating point mode. */
8271 if (mode != SFmode && mode != DFmode)
8274 /* All modes must be the same. */
8275 if (! (mode == GET_MODE (operands[1])
8276 && mode == GET_MODE (operands[2])
8277 && mode == GET_MODE (operands[3])
8278 && mode == GET_MODE (operands[4])
8279 && mode == GET_MODE (operands[5])))
8282 /* All operands must be registers. */
8283 if (! (GET_CODE (operands[1]) == REG
8284 && GET_CODE (operands[2]) == REG
8285 && GET_CODE (operands[3]) == REG
8286 && GET_CODE (operands[4]) == REG
8287 && GET_CODE (operands[5]) == REG))
8290 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8291 operation, so operands[4] must be the same as operand[3]. */
8292 if (! rtx_equal_p (operands[3], operands[4]))
8295 /* multiply cannot feed into subtraction. */
8296 if (rtx_equal_p (operands[5], operands[0]))
8299 /* Inout operand of sub cannot conflict with any operands from multiply. */
8300 if (rtx_equal_p (operands[3], operands[0])
8301 || rtx_equal_p (operands[3], operands[1])
8302 || rtx_equal_p (operands[3], operands[2]))
8305 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8307 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8308 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8309 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8310 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8311 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8312 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8315 /* Passed. Operands are suitable for fmpysub. */
8319 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8320 constants for shadd instructions. */
8322 shadd_constant_p (int val)
8324 if (val == 2 || val == 4 || val == 8)
8330 /* Return 1 if OP is valid as a base or index register in a
8334 borx_reg_operand (rtx op, enum machine_mode mode)
8336 if (GET_CODE (op) != REG)
8339 /* We must reject virtual registers as the only expressions that
8340 can be instantiated are REG and REG+CONST. */
8341 if (op == virtual_incoming_args_rtx
8342 || op == virtual_stack_vars_rtx
8343 || op == virtual_stack_dynamic_rtx
8344 || op == virtual_outgoing_args_rtx
8345 || op == virtual_cfa_rtx)
8348 /* While it's always safe to index off the frame pointer, it's not
8349 profitable to do so when the frame pointer is being eliminated. */
8350 if (!reload_completed
8351 && flag_omit_frame_pointer
8352 && !current_function_calls_alloca
8353 && op == frame_pointer_rtx)
8356 return register_operand (op, mode);
8359 /* Return 1 if this operand is anything other than a hard register. */
8362 non_hard_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8364 return ! (GET_CODE (op) == REG && REGNO (op) < FIRST_PSEUDO_REGISTER);
8367 /* Return 1 if INSN branches forward. Should be using insn_addresses
8368 to avoid walking through all the insns... */
8370 forward_branch_p (rtx insn)
8372 rtx label = JUMP_LABEL (insn);
8379 insn = NEXT_INSN (insn);
8382 return (insn == label);
8385 /* Return 1 if OP is an equality comparison, else return 0. */
8387 eq_neq_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8389 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
8392 /* Return 1 if INSN is in the delay slot of a call instruction. */
8394 jump_in_call_delay (rtx insn)
8397 if (GET_CODE (insn) != JUMP_INSN)
8400 if (PREV_INSN (insn)
8401 && PREV_INSN (PREV_INSN (insn))
8402 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8404 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8406 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8407 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8414 /* Output an unconditional move and branch insn. */
8417 output_parallel_movb (rtx *operands, rtx insn)
8419 int length = get_attr_length (insn);
8421 /* These are the cases in which we win. */
8423 return "mov%I1b,tr %1,%0,%2";
8425 /* None of the following cases win, but they don't lose either. */
8428 if (dbr_sequence_length () == 0)
8430 /* Nothing in the delay slot, fake it by putting the combined
8431 insn (the copy or add) in the delay slot of a bl. */
8432 if (GET_CODE (operands[1]) == CONST_INT)
8433 return "b %2\n\tldi %1,%0";
8435 return "b %2\n\tcopy %1,%0";
8439 /* Something in the delay slot, but we've got a long branch. */
8440 if (GET_CODE (operands[1]) == CONST_INT)
8441 return "ldi %1,%0\n\tb %2";
8443 return "copy %1,%0\n\tb %2";
8447 if (GET_CODE (operands[1]) == CONST_INT)
8448 output_asm_insn ("ldi %1,%0", operands);
8450 output_asm_insn ("copy %1,%0", operands);
8451 return output_lbranch (operands[2], insn, 1);
8454 /* Output an unconditional add and branch insn. */
8457 output_parallel_addb (rtx *operands, rtx insn)
8459 int length = get_attr_length (insn);
8461 /* To make life easy we want operand0 to be the shared input/output
8462 operand and operand1 to be the readonly operand. */
8463 if (operands[0] == operands[1])
8464 operands[1] = operands[2];
8466 /* These are the cases in which we win. */
8468 return "add%I1b,tr %1,%0,%3";
8470 /* None of the following cases win, but they don't lose either. */
8473 if (dbr_sequence_length () == 0)
8474 /* Nothing in the delay slot, fake it by putting the combined
8475 insn (the copy or add) in the delay slot of a bl. */
8476 return "b %3\n\tadd%I1 %1,%0,%0";
8478 /* Something in the delay slot, but we've got a long branch. */
8479 return "add%I1 %1,%0,%0\n\tb %3";
8482 output_asm_insn ("add%I1 %1,%0,%0", operands);
8483 return output_lbranch (operands[3], insn, 1);
8486 /* Return nonzero if INSN (a jump insn) immediately follows a call
8487 to a named function. This is used to avoid filling the delay slot
8488 of the jump since it can usually be eliminated by modifying RP in
8489 the delay slot of the call. */
8492 following_call (rtx insn)
8494 if (! TARGET_JUMP_IN_DELAY)
8497 /* Find the previous real insn, skipping NOTEs. */
8498 insn = PREV_INSN (insn);
8499 while (insn && GET_CODE (insn) == NOTE)
8500 insn = PREV_INSN (insn);
8502 /* Check for CALL_INSNs and millicode calls. */
8504 && ((GET_CODE (insn) == CALL_INSN
8505 && get_attr_type (insn) != TYPE_DYNCALL)
8506 || (GET_CODE (insn) == INSN
8507 && GET_CODE (PATTERN (insn)) != SEQUENCE
8508 && GET_CODE (PATTERN (insn)) != USE
8509 && GET_CODE (PATTERN (insn)) != CLOBBER
8510 && get_attr_type (insn) == TYPE_MILLI)))
8516 /* We use this hook to perform a PA specific optimization which is difficult
8517 to do in earlier passes.
8519 We want the delay slots of branches within jump tables to be filled.
8520 None of the compiler passes at the moment even has the notion that a
8521 PA jump table doesn't contain addresses, but instead contains actual
8524 Because we actually jump into the table, the addresses of each entry
8525 must stay constant in relation to the beginning of the table (which
8526 itself must stay constant relative to the instruction to jump into
8527 it). I don't believe we can guarantee earlier passes of the compiler
8528 will adhere to those rules.
8530 So, late in the compilation process we find all the jump tables, and
8531 expand them into real code -- e.g. each entry in the jump table vector
8532 will get an appropriate label followed by a jump to the final target.
8534 Reorg and the final jump pass can then optimize these branches and
8535 fill their delay slots. We end up with smaller, more efficient code.
8537 The jump instructions within the table are special; we must be able
8538 to identify them during assembly output (if the jumps don't get filled
8539 we need to emit a nop rather than nullifying the delay slot)). We
8540 identify jumps in switch tables by using insns with the attribute
8541 type TYPE_BTABLE_BRANCH.
8543 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8544 insns. This serves two purposes, first it prevents jump.c from
8545 noticing that the last N entries in the table jump to the instruction
8546 immediately after the table and deleting the jumps. Second, those
8547 insns mark where we should emit .begin_brtab and .end_brtab directives
8548 when using GAS (allows for better link time optimizations). */
8555 remove_useless_addtr_insns (1);
8557 if (pa_cpu < PROCESSOR_8000)
8558 pa_combine_instructions ();
8561 /* This is fairly cheap, so always run it if optimizing. */
8562 if (optimize > 0 && !TARGET_BIG_SWITCH)
8564 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8565 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8567 rtx pattern, tmp, location, label;
8568 unsigned int length, i;
8570 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8571 if (GET_CODE (insn) != JUMP_INSN
8572 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8573 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8576 /* Emit marker for the beginning of the branch table. */
8577 emit_insn_before (gen_begin_brtab (), insn);
8579 pattern = PATTERN (insn);
8580 location = PREV_INSN (insn);
8581 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
8583 for (i = 0; i < length; i++)
8585 /* Emit a label before each jump to keep jump.c from
8586 removing this code. */
8587 tmp = gen_label_rtx ();
8588 LABEL_NUSES (tmp) = 1;
8589 emit_label_after (tmp, location);
8590 location = NEXT_INSN (location);
8592 if (GET_CODE (pattern) == ADDR_VEC)
8593 label = XEXP (XVECEXP (pattern, 0, i), 0);
8595 label = XEXP (XVECEXP (pattern, 1, i), 0);
8597 tmp = gen_short_jump (label);
8599 /* Emit the jump itself. */
8600 tmp = emit_jump_insn_after (tmp, location);
8601 JUMP_LABEL (tmp) = label;
8602 LABEL_NUSES (label)++;
8603 location = NEXT_INSN (location);
8605 /* Emit a BARRIER after the jump. */
8606 emit_barrier_after (location);
8607 location = NEXT_INSN (location);
8610 /* Emit marker for the end of the branch table. */
8611 emit_insn_before (gen_end_brtab (), location);
8612 location = NEXT_INSN (location);
8613 emit_barrier_after (location);
8615 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8621 /* Still need brtab marker insns. FIXME: the presence of these
8622 markers disables output of the branch table to readonly memory,
8623 and any alignment directives that might be needed. Possibly,
8624 the begin_brtab insn should be output before the label for the
8625 table. This doesn't matter at the moment since the tables are
8626 always output in the text section. */
8627 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8629 /* Find an ADDR_VEC insn. */
8630 if (GET_CODE (insn) != JUMP_INSN
8631 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8632 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8635 /* Now generate markers for the beginning and end of the
8637 emit_insn_before (gen_begin_brtab (), insn);
8638 emit_insn_after (gen_end_brtab (), insn);
8643 /* The PA has a number of odd instructions which can perform multiple
8644 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8645 it may be profitable to combine two instructions into one instruction
8646 with two outputs. It's not profitable PA2.0 machines because the
8647 two outputs would take two slots in the reorder buffers.
8649 This routine finds instructions which can be combined and combines
8650 them. We only support some of the potential combinations, and we
8651 only try common ways to find suitable instructions.
8653 * addb can add two registers or a register and a small integer
8654 and jump to a nearby (+-8k) location. Normally the jump to the
8655 nearby location is conditional on the result of the add, but by
8656 using the "true" condition we can make the jump unconditional.
8657 Thus addb can perform two independent operations in one insn.
8659 * movb is similar to addb in that it can perform a reg->reg
8660 or small immediate->reg copy and jump to a nearby (+-8k location).
8662 * fmpyadd and fmpysub can perform a FP multiply and either an
8663 FP add or FP sub if the operands of the multiply and add/sub are
8664 independent (there are other minor restrictions). Note both
8665 the fmpy and fadd/fsub can in theory move to better spots according
8666 to data dependencies, but for now we require the fmpy stay at a
8669 * Many of the memory operations can perform pre & post updates
8670 of index registers. GCC's pre/post increment/decrement addressing
8671 is far too simple to take advantage of all the possibilities. This
8672 pass may not be suitable since those insns may not be independent.
8674 * comclr can compare two ints or an int and a register, nullify
8675 the following instruction and zero some other register. This
8676 is more difficult to use as it's harder to find an insn which
8677 will generate a comclr than finding something like an unconditional
8678 branch. (conditional moves & long branches create comclr insns).
8680 * Most arithmetic operations can conditionally skip the next
8681 instruction. They can be viewed as "perform this operation
8682 and conditionally jump to this nearby location" (where nearby
8683 is an insns away). These are difficult to use due to the
8684 branch length restrictions. */
8687 pa_combine_instructions (void)
8691 /* This can get expensive since the basic algorithm is on the
8692 order of O(n^2) (or worse). Only do it for -O2 or higher
8693 levels of optimization. */
8697 /* Walk down the list of insns looking for "anchor" insns which
8698 may be combined with "floating" insns. As the name implies,
8699 "anchor" instructions don't move, while "floating" insns may
8701 new = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
8702 new = make_insn_raw (new);
8704 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
8706 enum attr_pa_combine_type anchor_attr;
8707 enum attr_pa_combine_type floater_attr;
8709 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8710 Also ignore any special USE insns. */
8711 if ((GET_CODE (anchor) != INSN
8712 && GET_CODE (anchor) != JUMP_INSN
8713 && GET_CODE (anchor) != CALL_INSN)
8714 || GET_CODE (PATTERN (anchor)) == USE
8715 || GET_CODE (PATTERN (anchor)) == CLOBBER
8716 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
8717 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
8720 anchor_attr = get_attr_pa_combine_type (anchor);
8721 /* See if anchor is an insn suitable for combination. */
8722 if (anchor_attr == PA_COMBINE_TYPE_FMPY
8723 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
8724 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8725 && ! forward_branch_p (anchor)))
8729 for (floater = PREV_INSN (anchor);
8731 floater = PREV_INSN (floater))
8733 if (GET_CODE (floater) == NOTE
8734 || (GET_CODE (floater) == INSN
8735 && (GET_CODE (PATTERN (floater)) == USE
8736 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8739 /* Anything except a regular INSN will stop our search. */
8740 if (GET_CODE (floater) != INSN
8741 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8742 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8748 /* See if FLOATER is suitable for combination with the
8750 floater_attr = get_attr_pa_combine_type (floater);
8751 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8752 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8753 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8754 && floater_attr == PA_COMBINE_TYPE_FMPY))
8756 /* If ANCHOR and FLOATER can be combined, then we're
8757 done with this pass. */
8758 if (pa_can_combine_p (new, anchor, floater, 0,
8759 SET_DEST (PATTERN (floater)),
8760 XEXP (SET_SRC (PATTERN (floater)), 0),
8761 XEXP (SET_SRC (PATTERN (floater)), 1)))
8765 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8766 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
8768 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
8770 if (pa_can_combine_p (new, anchor, floater, 0,
8771 SET_DEST (PATTERN (floater)),
8772 XEXP (SET_SRC (PATTERN (floater)), 0),
8773 XEXP (SET_SRC (PATTERN (floater)), 1)))
8778 if (pa_can_combine_p (new, anchor, floater, 0,
8779 SET_DEST (PATTERN (floater)),
8780 SET_SRC (PATTERN (floater)),
8781 SET_SRC (PATTERN (floater))))
8787 /* If we didn't find anything on the backwards scan try forwards. */
8789 && (anchor_attr == PA_COMBINE_TYPE_FMPY
8790 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
8792 for (floater = anchor; floater; floater = NEXT_INSN (floater))
8794 if (GET_CODE (floater) == NOTE
8795 || (GET_CODE (floater) == INSN
8796 && (GET_CODE (PATTERN (floater)) == USE
8797 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8801 /* Anything except a regular INSN will stop our search. */
8802 if (GET_CODE (floater) != INSN
8803 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8804 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8810 /* See if FLOATER is suitable for combination with the
8812 floater_attr = get_attr_pa_combine_type (floater);
8813 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8814 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8815 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8816 && floater_attr == PA_COMBINE_TYPE_FMPY))
8818 /* If ANCHOR and FLOATER can be combined, then we're
8819 done with this pass. */
8820 if (pa_can_combine_p (new, anchor, floater, 1,
8821 SET_DEST (PATTERN (floater)),
8822 XEXP (SET_SRC (PATTERN (floater)),
8824 XEXP (SET_SRC (PATTERN (floater)),
8831 /* FLOATER will be nonzero if we found a suitable floating
8832 insn for combination with ANCHOR. */
8834 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8835 || anchor_attr == PA_COMBINE_TYPE_FMPY))
8837 /* Emit the new instruction and delete the old anchor. */
8838 emit_insn_before (gen_rtx_PARALLEL
8840 gen_rtvec (2, PATTERN (anchor),
8841 PATTERN (floater))),
8844 PUT_CODE (anchor, NOTE);
8845 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8846 NOTE_SOURCE_FILE (anchor) = 0;
8848 /* Emit a special USE insn for FLOATER, then delete
8849 the floating insn. */
8850 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8851 delete_insn (floater);
8856 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
8859 /* Emit the new_jump instruction and delete the old anchor. */
8861 = emit_jump_insn_before (gen_rtx_PARALLEL
8863 gen_rtvec (2, PATTERN (anchor),
8864 PATTERN (floater))),
8867 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
8868 PUT_CODE (anchor, NOTE);
8869 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8870 NOTE_SOURCE_FILE (anchor) = 0;
8872 /* Emit a special USE insn for FLOATER, then delete
8873 the floating insn. */
8874 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8875 delete_insn (floater);
8883 pa_can_combine_p (rtx new, rtx anchor, rtx floater, int reversed, rtx dest,
8886 int insn_code_number;
8889 /* Create a PARALLEL with the patterns of ANCHOR and
8890 FLOATER, try to recognize it, then test constraints
8891 for the resulting pattern.
8893 If the pattern doesn't match or the constraints
8894 aren't met keep searching for a suitable floater
8896 XVECEXP (PATTERN (new), 0, 0) = PATTERN (anchor);
8897 XVECEXP (PATTERN (new), 0, 1) = PATTERN (floater);
8898 INSN_CODE (new) = -1;
8899 insn_code_number = recog_memoized (new);
8900 if (insn_code_number < 0
8901 || (extract_insn (new), ! constrain_operands (1)))
8915 /* There's up to three operands to consider. One
8916 output and two inputs.
8918 The output must not be used between FLOATER & ANCHOR
8919 exclusive. The inputs must not be set between
8920 FLOATER and ANCHOR exclusive. */
8922 if (reg_used_between_p (dest, start, end))
8925 if (reg_set_between_p (src1, start, end))
8928 if (reg_set_between_p (src2, start, end))
8931 /* If we get here, then everything is good. */
8935 /* Return nonzero if references for INSN are delayed.
8937 Millicode insns are actually function calls with some special
8938 constraints on arguments and register usage.
8940 Millicode calls always expect their arguments in the integer argument
8941 registers, and always return their result in %r29 (ret1). They
8942 are expected to clobber their arguments, %r1, %r29, and the return
8943 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
8945 This function tells reorg that the references to arguments and
8946 millicode calls do not appear to happen until after the millicode call.
8947 This allows reorg to put insns which set the argument registers into the
8948 delay slot of the millicode call -- thus they act more like traditional
8951 Note we cannot consider side effects of the insn to be delayed because
8952 the branch and link insn will clobber the return pointer. If we happened
8953 to use the return pointer in the delay slot of the call, then we lose.
8955 get_attr_type will try to recognize the given insn, so make sure to
8956 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
8959 insn_refs_are_delayed (rtx insn)
8961 return ((GET_CODE (insn) == INSN
8962 && GET_CODE (PATTERN (insn)) != SEQUENCE
8963 && GET_CODE (PATTERN (insn)) != USE
8964 && GET_CODE (PATTERN (insn)) != CLOBBER
8965 && get_attr_type (insn) == TYPE_MILLI));
8968 /* On the HP-PA the value is found in register(s) 28(-29), unless
8969 the mode is SF or DF. Then the value is returned in fr4 (32).
8971 This must perform the same promotions as PROMOTE_MODE, else
8972 TARGET_PROMOTE_FUNCTION_RETURN will not work correctly.
8974 Small structures must be returned in a PARALLEL on PA64 in order
8975 to match the HP Compiler ABI. */
8978 function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
8980 enum machine_mode valmode;
8982 if (AGGREGATE_TYPE_P (valtype)
8983 || TREE_CODE (valtype) == COMPLEX_TYPE
8984 || TREE_CODE (valtype) == VECTOR_TYPE)
8988 /* Aggregates with a size less than or equal to 128 bits are
8989 returned in GR 28(-29). They are left justified. The pad
8990 bits are undefined. Larger aggregates are returned in
8994 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
8996 for (i = 0; i < ub; i++)
8998 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
8999 gen_rtx_REG (DImode, 28 + i),
9004 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9006 else if (int_size_in_bytes (valtype) > UNITS_PER_WORD)
9008 /* Aggregates 5 to 8 bytes in size are returned in general
9009 registers r28-r29 in the same manner as other non
9010 floating-point objects. The data is right-justified and
9011 zero-extended to 64 bits. This is opposite to the normal
9012 justification used on big endian targets and requires
9013 special treatment. */
9014 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9015 gen_rtx_REG (DImode, 28), const0_rtx);
9016 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9020 if ((INTEGRAL_TYPE_P (valtype)
9021 && TYPE_PRECISION (valtype) < BITS_PER_WORD)
9022 || POINTER_TYPE_P (valtype))
9023 valmode = word_mode;
9025 valmode = TYPE_MODE (valtype);
9027 if (TREE_CODE (valtype) == REAL_TYPE
9028 && !AGGREGATE_TYPE_P (valtype)
9029 && TYPE_MODE (valtype) != TFmode
9030 && !TARGET_SOFT_FLOAT)
9031 return gen_rtx_REG (valmode, 32);
9033 return gen_rtx_REG (valmode, 28);
9036 /* Return the location of a parameter that is passed in a register or NULL
9037 if the parameter has any component that is passed in memory.
9039 This is new code and will be pushed to into the net sources after
9042 ??? We might want to restructure this so that it looks more like other
9045 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
9046 int named ATTRIBUTE_UNUSED)
9048 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9055 if (mode == VOIDmode)
9058 arg_size = FUNCTION_ARG_SIZE (mode, type);
9060 /* If this arg would be passed partially or totally on the stack, then
9061 this routine should return zero. pa_arg_partial_bytes will
9062 handle arguments which are split between regs and stack slots if
9063 the ABI mandates split arguments. */
9066 /* The 32-bit ABI does not split arguments. */
9067 if (cum->words + arg_size > max_arg_words)
9073 alignment = cum->words & 1;
9074 if (cum->words + alignment >= max_arg_words)
9078 /* The 32bit ABIs and the 64bit ABIs are rather different,
9079 particularly in their handling of FP registers. We might
9080 be able to cleverly share code between them, but I'm not
9081 going to bother in the hope that splitting them up results
9082 in code that is more easily understood. */
9086 /* Advance the base registers to their current locations.
9088 Remember, gprs grow towards smaller register numbers while
9089 fprs grow to higher register numbers. Also remember that
9090 although FP regs are 32-bit addressable, we pretend that
9091 the registers are 64-bits wide. */
9092 gpr_reg_base = 26 - cum->words;
9093 fpr_reg_base = 32 + cum->words;
9095 /* Arguments wider than one word and small aggregates need special
9099 || (type && (AGGREGATE_TYPE_P (type)
9100 || TREE_CODE (type) == COMPLEX_TYPE
9101 || TREE_CODE (type) == VECTOR_TYPE)))
9103 /* Double-extended precision (80-bit), quad-precision (128-bit)
9104 and aggregates including complex numbers are aligned on
9105 128-bit boundaries. The first eight 64-bit argument slots
9106 are associated one-to-one, with general registers r26
9107 through r19, and also with floating-point registers fr4
9108 through fr11. Arguments larger than one word are always
9109 passed in general registers.
9111 Using a PARALLEL with a word mode register results in left
9112 justified data on a big-endian target. */
9115 int i, offset = 0, ub = arg_size;
9117 /* Align the base register. */
9118 gpr_reg_base -= alignment;
9120 ub = MIN (ub, max_arg_words - cum->words - alignment);
9121 for (i = 0; i < ub; i++)
9123 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9124 gen_rtx_REG (DImode, gpr_reg_base),
9130 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9135 /* If the argument is larger than a word, then we know precisely
9136 which registers we must use. */
9150 /* Structures 5 to 8 bytes in size are passed in the general
9151 registers in the same manner as other non floating-point
9152 objects. The data is right-justified and zero-extended
9153 to 64 bits. This is opposite to the normal justification
9154 used on big endian targets and requires special treatment.
9155 We now define BLOCK_REG_PADDING to pad these objects.
9156 Aggregates, complex and vector types are passed in the same
9157 manner as structures. */
9159 || (type && (AGGREGATE_TYPE_P (type)
9160 || TREE_CODE (type) == COMPLEX_TYPE
9161 || TREE_CODE (type) == VECTOR_TYPE)))
9163 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9164 gen_rtx_REG (DImode, gpr_reg_base),
9166 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9171 /* We have a single word (32 bits). A simple computation
9172 will get us the register #s we need. */
9173 gpr_reg_base = 26 - cum->words;
9174 fpr_reg_base = 32 + 2 * cum->words;
9178 /* Determine if the argument needs to be passed in both general and
9179 floating point registers. */
9180 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9181 /* If we are doing soft-float with portable runtime, then there
9182 is no need to worry about FP regs. */
9183 && !TARGET_SOFT_FLOAT
9184 /* The parameter must be some kind of scalar float, else we just
9185 pass it in integer registers. */
9186 && GET_MODE_CLASS (mode) == MODE_FLOAT
9187 /* The target function must not have a prototype. */
9188 && cum->nargs_prototype <= 0
9189 /* libcalls do not need to pass items in both FP and general
9191 && type != NULL_TREE
9192 /* All this hair applies to "outgoing" args only. This includes
9193 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9195 /* Also pass outgoing floating arguments in both registers in indirect
9196 calls with the 32 bit ABI and the HP assembler since there is no
9197 way to the specify argument locations in static functions. */
9202 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9208 gen_rtx_EXPR_LIST (VOIDmode,
9209 gen_rtx_REG (mode, fpr_reg_base),
9211 gen_rtx_EXPR_LIST (VOIDmode,
9212 gen_rtx_REG (mode, gpr_reg_base),
9217 /* See if we should pass this parameter in a general register. */
9218 if (TARGET_SOFT_FLOAT
9219 /* Indirect calls in the normal 32bit ABI require all arguments
9220 to be passed in general registers. */
9221 || (!TARGET_PORTABLE_RUNTIME
9225 /* If the parameter is not a scalar floating-point parameter,
9226 then it belongs in GPRs. */
9227 || GET_MODE_CLASS (mode) != MODE_FLOAT
9228 /* Structure with single SFmode field belongs in GPR. */
9229 || (type && AGGREGATE_TYPE_P (type)))
9230 retval = gen_rtx_REG (mode, gpr_reg_base);
9232 retval = gen_rtx_REG (mode, fpr_reg_base);
9238 /* If this arg would be passed totally in registers or totally on the stack,
9239 then this routine should return zero. */
9242 pa_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
9243 tree type, bool named ATTRIBUTE_UNUSED)
9245 unsigned int max_arg_words = 8;
9246 unsigned int offset = 0;
9251 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9254 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9255 /* Arg fits fully into registers. */
9257 else if (cum->words + offset >= max_arg_words)
9258 /* Arg fully on the stack. */
9262 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9266 /* A get_unnamed_section callback for switching to the text section.
9268 This function is only used with SOM. Because we don't support
9269 named subspaces, we can only create a new subspace or switch back
9270 to the default text subspace. */
9273 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9275 gcc_assert (TARGET_SOM);
9278 if (cfun && !cfun->machine->in_nsubspa)
9280 /* We only want to emit a .nsubspa directive once at the
9281 start of the function. */
9282 cfun->machine->in_nsubspa = 1;
9284 /* Create a new subspace for the text. This provides
9285 better stub placement and one-only functions. */
9287 && DECL_ONE_ONLY (cfun->decl)
9288 && !DECL_WEAK (cfun->decl))
9290 output_section_asm_op ("\t.SPACE $TEXT$\n"
9291 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9292 "ACCESS=44,SORT=24,COMDAT");
9298 /* There isn't a current function or the body of the current
9299 function has been completed. So, we are changing to the
9300 text section to output debugging information. Thus, we
9301 need to forget that we are in the text section so that
9302 varasm.c will call us when text_section is selected again. */
9303 gcc_assert (!cfun || cfun->machine->in_nsubspa == 2);
9306 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9309 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9312 /* A get_unnamed_section callback for switching to comdat data
9313 sections. This function is only used with SOM. */
9316 som_output_comdat_data_section_asm_op (const void *data)
9319 output_section_asm_op (data);
9322 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9325 pa_som_asm_init_sections (void)
9328 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9330 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9331 is not being generated. */
9332 som_readonly_data_section
9333 = get_unnamed_section (0, output_section_asm_op,
9334 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9336 /* When secondary definitions are not supported, SOM makes readonly
9337 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9339 som_one_only_readonly_data_section
9340 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9342 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9343 "ACCESS=0x2c,SORT=16,COMDAT");
9346 /* When secondary definitions are not supported, SOM makes data one-only
9347 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9348 som_one_only_data_section
9349 = get_unnamed_section (SECTION_WRITE,
9350 som_output_comdat_data_section_asm_op,
9351 "\t.SPACE $PRIVATE$\n"
9352 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9353 "ACCESS=31,SORT=24,COMDAT");
9355 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9356 which reference data within the $TEXT$ space (for example constant
9357 strings in the $LIT$ subspace).
9359 The assemblers (GAS and HP as) both have problems with handling
9360 the difference of two symbols which is the other correct way to
9361 reference constant data during PIC code generation.
9363 So, there's no way to reference constant data which is in the
9364 $TEXT$ space during PIC generation. Instead place all constant
9365 data into the $PRIVATE$ subspace (this reduces sharing, but it
9366 works correctly). */
9367 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9369 /* We must not have a reference to an external symbol defined in a
9370 shared library in a readonly section, else the SOM linker will
9373 So, we force exception information into the data section. */
9374 exception_section = data_section;
9377 /* On hpux10, the linker will give an error if we have a reference
9378 in the read-only data section to a symbol defined in a shared
9379 library. Therefore, expressions that might require a reloc can
9380 not be placed in the read-only data section. */
9383 pa_select_section (tree exp, int reloc,
9384 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9386 if (TREE_CODE (exp) == VAR_DECL
9387 && TREE_READONLY (exp)
9388 && !TREE_THIS_VOLATILE (exp)
9389 && DECL_INITIAL (exp)
9390 && (DECL_INITIAL (exp) == error_mark_node
9391 || TREE_CONSTANT (DECL_INITIAL (exp)))
9395 && DECL_ONE_ONLY (exp)
9396 && !DECL_WEAK (exp))
9397 return som_one_only_readonly_data_section;
9399 return readonly_data_section;
9401 else if (CONSTANT_CLASS_P (exp) && !reloc)
9402 return readonly_data_section;
9404 && TREE_CODE (exp) == VAR_DECL
9405 && DECL_ONE_ONLY (exp)
9406 && !DECL_WEAK (exp))
9407 return som_one_only_data_section;
9409 return data_section;
9413 pa_globalize_label (FILE *stream, const char *name)
9415 /* We only handle DATA objects here, functions are globalized in
9416 ASM_DECLARE_FUNCTION_NAME. */
9417 if (! FUNCTION_NAME_P (name))
9419 fputs ("\t.EXPORT ", stream);
9420 assemble_name (stream, name);
9421 fputs (",DATA\n", stream);
9425 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9428 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9429 int incoming ATTRIBUTE_UNUSED)
9431 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9434 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9437 pa_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
9439 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9440 PA64 ABI says that objects larger than 128 bits are returned in memory.
9441 Note, int_size_in_bytes can return -1 if the size of the object is
9442 variable or larger than the maximum value that can be expressed as
9443 a HOST_WIDE_INT. It can also return zero for an empty type. The
9444 simplest way to handle variable and empty types is to pass them in
9445 memory. This avoids problems in defining the boundaries of argument
9446 slots, allocating registers, etc. */
9447 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9448 || int_size_in_bytes (type) <= 0);
9451 /* Structure to hold declaration and name of external symbols that are
9452 emitted by GCC. We generate a vector of these symbols and output them
9453 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9454 This avoids putting out names that are never really used. */
9456 typedef struct extern_symbol GTY(())
9462 /* Define gc'd vector type for extern_symbol. */
9463 DEF_VEC_O(extern_symbol);
9464 DEF_VEC_ALLOC_O(extern_symbol,gc);
9466 /* Vector of extern_symbol pointers. */
9467 static GTY(()) VEC(extern_symbol,gc) *extern_symbols;
9469 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9470 /* Mark DECL (name NAME) as an external reference (assembler output
9471 file FILE). This saves the names to output at the end of the file
9472 if actually referenced. */
9475 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9477 extern_symbol * p = VEC_safe_push (extern_symbol, gc, extern_symbols, NULL);
9479 gcc_assert (file == asm_out_file);
9484 /* Output text required at the end of an assembler file.
9485 This includes deferred plabels and .import directives for
9486 all external symbols that were actually referenced. */
9489 pa_hpux_file_end (void)
9494 if (!NO_DEFERRED_PROFILE_COUNTERS)
9495 output_deferred_profile_counters ();
9497 output_deferred_plabels ();
9499 for (i = 0; VEC_iterate (extern_symbol, extern_symbols, i, p); i++)
9501 tree decl = p->decl;
9503 if (!TREE_ASM_WRITTEN (decl)
9504 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9505 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9508 VEC_free (extern_symbol, gc, extern_symbols);