1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
25 #include "coretypes.h"
29 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
41 #include "integrate.h"
49 #include "target-def.h"
51 /* Return nonzero if there is a bypass for the output of
52 OUT_INSN and the fp store IN_INSN. */
54 hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
56 enum machine_mode store_mode;
57 enum machine_mode other_mode;
60 if (recog_memoized (in_insn) < 0
61 || (get_attr_type (in_insn) != TYPE_FPSTORE
62 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
63 || recog_memoized (out_insn) < 0)
66 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
68 set = single_set (out_insn);
72 other_mode = GET_MODE (SET_SRC (set));
74 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
78 #ifndef DO_FRAME_NOTES
79 #ifdef INCOMING_RETURN_ADDR_RTX
80 #define DO_FRAME_NOTES 1
82 #define DO_FRAME_NOTES 0
86 static void copy_reg_pointer (rtx, rtx);
87 static void fix_range (const char *);
88 static bool pa_handle_option (size_t, const char *, int);
89 static int hppa_address_cost (rtx);
90 static bool hppa_rtx_costs (rtx, int, int, int *);
91 static inline rtx force_mode (enum machine_mode, rtx);
92 static void pa_reorg (void);
93 static void pa_combine_instructions (void);
94 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
95 static int forward_branch_p (rtx);
96 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
97 static int compute_movmem_length (rtx);
98 static int compute_clrmem_length (rtx);
99 static bool pa_assemble_integer (rtx, unsigned int, int);
100 static void remove_useless_addtr_insns (int);
101 static void store_reg (int, HOST_WIDE_INT, int);
102 static void store_reg_modify (int, int, HOST_WIDE_INT);
103 static void load_reg (int, HOST_WIDE_INT, int);
104 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
105 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
106 static void update_total_code_bytes (int);
107 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
108 static int pa_adjust_cost (rtx, rtx, rtx, int);
109 static int pa_adjust_priority (rtx, int);
110 static int pa_issue_rate (void);
111 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
112 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
114 static void pa_encode_section_info (tree, rtx, int);
115 static const char *pa_strip_name_encoding (const char *);
116 static bool pa_function_ok_for_sibcall (tree, tree);
117 static void pa_globalize_label (FILE *, const char *)
119 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
120 HOST_WIDE_INT, tree);
121 #if !defined(USE_COLLECT2)
122 static void pa_asm_out_constructor (rtx, int);
123 static void pa_asm_out_destructor (rtx, int);
125 static void pa_init_builtins (void);
126 static rtx hppa_builtin_saveregs (void);
127 static tree hppa_gimplify_va_arg_expr (tree, tree, tree *, tree *);
128 static bool pa_scalar_mode_supported_p (enum machine_mode);
129 static bool pa_commutative_p (rtx x, int outer_code);
130 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
131 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
132 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
133 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
134 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
135 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
136 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
137 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
138 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
139 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
140 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
141 static void output_deferred_plabels (void);
142 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
143 #ifdef ASM_OUTPUT_EXTERNAL_REAL
144 static void pa_hpux_file_end (void);
146 #ifdef HPUX_LONG_DOUBLE_LIBRARY
147 static void pa_hpux_init_libfuncs (void);
149 static rtx pa_struct_value_rtx (tree, int);
150 static bool pa_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
152 static int pa_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
154 static struct machine_function * pa_init_machine_status (void);
155 static enum reg_class pa_secondary_reload (bool, rtx, enum reg_class,
157 secondary_reload_info *);
160 /* The following extra sections are only used for SOM. */
161 static GTY(()) section *som_readonly_data_section;
162 static GTY(()) section *som_one_only_readonly_data_section;
163 static GTY(()) section *som_one_only_data_section;
165 /* Save the operands last given to a compare for use when we
166 generate a scc or bcc insn. */
167 rtx hppa_compare_op0, hppa_compare_op1;
168 enum cmp_type hppa_branch_type;
170 /* Which cpu we are scheduling for. */
171 enum processor_type pa_cpu = TARGET_SCHED_DEFAULT;
173 /* The UNIX standard to use for predefines and linking. */
174 int flag_pa_unix = TARGET_HPUX_11_11 ? 1998 : TARGET_HPUX_10_10 ? 1995 : 1993;
176 /* Counts for the number of callee-saved general and floating point
177 registers which were saved by the current function's prologue. */
178 static int gr_saved, fr_saved;
180 static rtx find_addr_reg (rtx);
182 /* Keep track of the number of bytes we have output in the CODE subspace
183 during this compilation so we'll know when to emit inline long-calls. */
184 unsigned long total_code_bytes;
186 /* The last address of the previous function plus the number of bytes in
187 associated thunks that have been output. This is used to determine if
188 a thunk can use an IA-relative branch to reach its target function. */
189 static int last_address;
191 /* Variables to handle plabels that we discover are necessary at assembly
192 output time. They are output after the current function. */
193 struct deferred_plabel GTY(())
198 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
200 static size_t n_deferred_plabels = 0;
203 /* Initialize the GCC target structure. */
205 #undef TARGET_ASM_ALIGNED_HI_OP
206 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
207 #undef TARGET_ASM_ALIGNED_SI_OP
208 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
209 #undef TARGET_ASM_ALIGNED_DI_OP
210 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
211 #undef TARGET_ASM_UNALIGNED_HI_OP
212 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
213 #undef TARGET_ASM_UNALIGNED_SI_OP
214 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
215 #undef TARGET_ASM_UNALIGNED_DI_OP
216 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
217 #undef TARGET_ASM_INTEGER
218 #define TARGET_ASM_INTEGER pa_assemble_integer
220 #undef TARGET_ASM_FUNCTION_PROLOGUE
221 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
222 #undef TARGET_ASM_FUNCTION_EPILOGUE
223 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
225 #undef TARGET_SCHED_ADJUST_COST
226 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
227 #undef TARGET_SCHED_ADJUST_PRIORITY
228 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
229 #undef TARGET_SCHED_ISSUE_RATE
230 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
232 #undef TARGET_ENCODE_SECTION_INFO
233 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
234 #undef TARGET_STRIP_NAME_ENCODING
235 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
237 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
238 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
240 #undef TARGET_COMMUTATIVE_P
241 #define TARGET_COMMUTATIVE_P pa_commutative_p
243 #undef TARGET_ASM_OUTPUT_MI_THUNK
244 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
245 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
246 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
248 #undef TARGET_ASM_FILE_END
249 #ifdef ASM_OUTPUT_EXTERNAL_REAL
250 #define TARGET_ASM_FILE_END pa_hpux_file_end
252 #define TARGET_ASM_FILE_END output_deferred_plabels
255 #if !defined(USE_COLLECT2)
256 #undef TARGET_ASM_CONSTRUCTOR
257 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
258 #undef TARGET_ASM_DESTRUCTOR
259 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
262 #undef TARGET_DEFAULT_TARGET_FLAGS
263 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
264 #undef TARGET_HANDLE_OPTION
265 #define TARGET_HANDLE_OPTION pa_handle_option
267 #undef TARGET_INIT_BUILTINS
268 #define TARGET_INIT_BUILTINS pa_init_builtins
270 #undef TARGET_RTX_COSTS
271 #define TARGET_RTX_COSTS hppa_rtx_costs
272 #undef TARGET_ADDRESS_COST
273 #define TARGET_ADDRESS_COST hppa_address_cost
275 #undef TARGET_MACHINE_DEPENDENT_REORG
276 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
278 #ifdef HPUX_LONG_DOUBLE_LIBRARY
279 #undef TARGET_INIT_LIBFUNCS
280 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
283 #undef TARGET_PROMOTE_FUNCTION_RETURN
284 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
285 #undef TARGET_PROMOTE_PROTOTYPES
286 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
288 #undef TARGET_STRUCT_VALUE_RTX
289 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
290 #undef TARGET_RETURN_IN_MEMORY
291 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
292 #undef TARGET_MUST_PASS_IN_STACK
293 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
294 #undef TARGET_PASS_BY_REFERENCE
295 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
296 #undef TARGET_CALLEE_COPIES
297 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
298 #undef TARGET_ARG_PARTIAL_BYTES
299 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
301 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
302 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
303 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
304 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
306 #undef TARGET_SCALAR_MODE_SUPPORTED_P
307 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
309 #undef TARGET_CANNOT_FORCE_CONST_MEM
310 #define TARGET_CANNOT_FORCE_CONST_MEM pa_tls_referenced_p
312 #undef TARGET_SECONDARY_RELOAD
313 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
315 struct gcc_target targetm = TARGET_INITIALIZER;
317 /* Parse the -mfixed-range= option string. */
320 fix_range (const char *const_str)
323 char *str, *dash, *comma;
325 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
326 REG2 are either register names or register numbers. The effect
327 of this option is to mark the registers in the range from REG1 to
328 REG2 as ``fixed'' so they won't be used by the compiler. This is
329 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
331 i = strlen (const_str);
332 str = (char *) alloca (i + 1);
333 memcpy (str, const_str, i + 1);
337 dash = strchr (str, '-');
340 warning (0, "value of -mfixed-range must have form REG1-REG2");
345 comma = strchr (dash + 1, ',');
349 first = decode_reg_name (str);
352 warning (0, "unknown register name: %s", str);
356 last = decode_reg_name (dash + 1);
359 warning (0, "unknown register name: %s", dash + 1);
367 warning (0, "%s-%s is an empty range", str, dash + 1);
371 for (i = first; i <= last; ++i)
372 fixed_regs[i] = call_used_regs[i] = 1;
381 /* Check if all floating point registers have been fixed. */
382 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
387 target_flags |= MASK_DISABLE_FPREGS;
390 /* Implement TARGET_HANDLE_OPTION. */
393 pa_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
398 case OPT_mpa_risc_1_0:
400 target_flags &= ~(MASK_PA_11 | MASK_PA_20);
404 case OPT_mpa_risc_1_1:
406 target_flags &= ~MASK_PA_20;
407 target_flags |= MASK_PA_11;
410 case OPT_mpa_risc_2_0:
412 target_flags |= MASK_PA_11 | MASK_PA_20;
416 if (strcmp (arg, "8000") == 0)
417 pa_cpu = PROCESSOR_8000;
418 else if (strcmp (arg, "7100") == 0)
419 pa_cpu = PROCESSOR_7100;
420 else if (strcmp (arg, "700") == 0)
421 pa_cpu = PROCESSOR_700;
422 else if (strcmp (arg, "7100LC") == 0)
423 pa_cpu = PROCESSOR_7100LC;
424 else if (strcmp (arg, "7200") == 0)
425 pa_cpu = PROCESSOR_7200;
426 else if (strcmp (arg, "7300") == 0)
427 pa_cpu = PROCESSOR_7300;
432 case OPT_mfixed_range_:
442 #if TARGET_HPUX_10_10
448 #if TARGET_HPUX_11_11
460 override_options (void)
462 /* Unconditional branches in the delay slot are not compatible with dwarf2
463 call frame information. There is no benefit in using this optimization
464 on PA8000 and later processors. */
465 if (pa_cpu >= PROCESSOR_8000
466 || (! USING_SJLJ_EXCEPTIONS && flag_exceptions)
467 || flag_unwind_tables)
468 target_flags &= ~MASK_JUMP_IN_DELAY;
470 if (flag_pic && TARGET_PORTABLE_RUNTIME)
472 warning (0, "PIC code generation is not supported in the portable runtime model");
475 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
477 warning (0, "PIC code generation is not compatible with fast indirect calls");
480 if (! TARGET_GAS && write_symbols != NO_DEBUG)
482 warning (0, "-g is only supported when using GAS on this processor,");
483 warning (0, "-g option disabled");
484 write_symbols = NO_DEBUG;
487 /* We only support the "big PIC" model now. And we always generate PIC
488 code when in 64bit mode. */
489 if (flag_pic == 1 || TARGET_64BIT)
492 /* We can't guarantee that .dword is available for 32-bit targets. */
493 if (UNITS_PER_WORD == 4)
494 targetm.asm_out.aligned_op.di = NULL;
496 /* The unaligned ops are only available when using GAS. */
499 targetm.asm_out.unaligned_op.hi = NULL;
500 targetm.asm_out.unaligned_op.si = NULL;
501 targetm.asm_out.unaligned_op.di = NULL;
504 init_machine_status = pa_init_machine_status;
508 pa_init_builtins (void)
510 #ifdef DONT_HAVE_FPUTC_UNLOCKED
511 built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] =
512 built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
513 implicit_built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED]
514 = implicit_built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
517 if (built_in_decls [BUILT_IN_FINITE])
518 set_user_assembler_name (built_in_decls [BUILT_IN_FINITE], "_Isfinite");
519 if (built_in_decls [BUILT_IN_FINITEF])
520 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF], "_Isfinitef");
524 /* Function to init struct machine_function.
525 This will be called, via a pointer variable,
526 from push_function_context. */
528 static struct machine_function *
529 pa_init_machine_status (void)
531 return ggc_alloc_cleared (sizeof (machine_function));
534 /* If FROM is a probable pointer register, mark TO as a probable
535 pointer register with the same pointer alignment as FROM. */
538 copy_reg_pointer (rtx to, rtx from)
540 if (REG_POINTER (from))
541 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
544 /* Return 1 if X contains a symbolic expression. We know these
545 expressions will have one of a few well defined forms, so
546 we need only check those forms. */
548 symbolic_expression_p (rtx x)
551 /* Strip off any HIGH. */
552 if (GET_CODE (x) == HIGH)
555 return (symbolic_operand (x, VOIDmode));
558 /* Accept any constant that can be moved in one instruction into a
561 cint_ok_for_move (HOST_WIDE_INT intval)
563 /* OK if ldo, ldil, or zdepi, can be used. */
564 return (CONST_OK_FOR_LETTER_P (intval, 'J')
565 || CONST_OK_FOR_LETTER_P (intval, 'N')
566 || CONST_OK_FOR_LETTER_P (intval, 'K'));
569 /* Return truth value of whether OP can be used as an operand in a
572 adddi3_operand (rtx op, enum machine_mode mode)
574 return (register_operand (op, mode)
575 || (GET_CODE (op) == CONST_INT
576 && (TARGET_64BIT ? INT_14_BITS (op) : INT_11_BITS (op))));
579 /* True iff zdepi can be used to generate this CONST_INT.
580 zdepi first sign extends a 5-bit signed number to a given field
581 length, then places this field anywhere in a zero. */
583 zdepi_cint_p (unsigned HOST_WIDE_INT x)
585 unsigned HOST_WIDE_INT lsb_mask, t;
587 /* This might not be obvious, but it's at least fast.
588 This function is critical; we don't have the time loops would take. */
590 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
591 /* Return true iff t is a power of two. */
592 return ((t & (t - 1)) == 0);
595 /* True iff depi or extru can be used to compute (reg & mask).
596 Accept bit pattern like these:
601 and_mask_p (unsigned HOST_WIDE_INT mask)
604 mask += mask & -mask;
605 return (mask & (mask - 1)) == 0;
608 /* True iff depi can be used to compute (reg | MASK). */
610 ior_mask_p (unsigned HOST_WIDE_INT mask)
612 mask += mask & -mask;
613 return (mask & (mask - 1)) == 0;
616 /* Legitimize PIC addresses. If the address is already
617 position-independent, we return ORIG. Newly generated
618 position-independent addresses go to REG. If we need more
619 than one register, we lose. */
622 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
626 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
628 /* Labels need special handling. */
629 if (pic_label_operand (orig, mode))
631 /* We do not want to go through the movXX expanders here since that
632 would create recursion.
634 Nor do we really want to call a generator for a named pattern
635 since that requires multiple patterns if we want to support
638 So instead we just emit the raw set, which avoids the movXX
639 expanders completely. */
640 mark_reg_pointer (reg, BITS_PER_UNIT);
641 emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
642 current_function_uses_pic_offset_table = 1;
645 if (GET_CODE (orig) == SYMBOL_REF)
651 /* Before reload, allocate a temporary register for the intermediate
652 result. This allows the sequence to be deleted when the final
653 result is unused and the insns are trivially dead. */
654 tmp_reg = ((reload_in_progress || reload_completed)
655 ? reg : gen_reg_rtx (Pmode));
657 emit_move_insn (tmp_reg,
658 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
659 gen_rtx_HIGH (word_mode, orig)));
661 = gen_const_mem (Pmode,
662 gen_rtx_LO_SUM (Pmode, tmp_reg,
663 gen_rtx_UNSPEC (Pmode,
667 current_function_uses_pic_offset_table = 1;
668 mark_reg_pointer (reg, BITS_PER_UNIT);
669 insn = emit_move_insn (reg, pic_ref);
671 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
672 set_unique_reg_note (insn, REG_EQUAL, orig);
676 else if (GET_CODE (orig) == CONST)
680 if (GET_CODE (XEXP (orig, 0)) == PLUS
681 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
685 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
687 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
688 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
689 base == reg ? 0 : reg);
691 if (GET_CODE (orig) == CONST_INT)
693 if (INT_14_BITS (orig))
694 return plus_constant (base, INTVAL (orig));
695 orig = force_reg (Pmode, orig);
697 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
698 /* Likewise, should we set special REG_NOTEs here? */
704 static GTY(()) rtx gen_tls_tga;
707 gen_tls_get_addr (void)
710 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
715 hppa_tls_call (rtx arg)
719 ret = gen_reg_rtx (Pmode);
720 emit_library_call_value (gen_tls_get_addr (), ret,
721 LCT_CONST, Pmode, 1, arg, Pmode);
727 legitimize_tls_address (rtx addr)
729 rtx ret, insn, tmp, t1, t2, tp;
730 enum tls_model model = SYMBOL_REF_TLS_MODEL (addr);
734 case TLS_MODEL_GLOBAL_DYNAMIC:
735 tmp = gen_reg_rtx (Pmode);
737 emit_insn (gen_tgd_load_pic (tmp, addr));
739 emit_insn (gen_tgd_load (tmp, addr));
740 ret = hppa_tls_call (tmp);
743 case TLS_MODEL_LOCAL_DYNAMIC:
744 ret = gen_reg_rtx (Pmode);
745 tmp = gen_reg_rtx (Pmode);
748 emit_insn (gen_tld_load_pic (tmp, addr));
750 emit_insn (gen_tld_load (tmp, addr));
751 t1 = hppa_tls_call (tmp);
754 t2 = gen_reg_rtx (Pmode);
755 emit_libcall_block (insn, t2, t1,
756 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
758 emit_insn (gen_tld_offset_load (ret, addr, t2));
761 case TLS_MODEL_INITIAL_EXEC:
762 tp = gen_reg_rtx (Pmode);
763 tmp = gen_reg_rtx (Pmode);
764 ret = gen_reg_rtx (Pmode);
765 emit_insn (gen_tp_load (tp));
767 emit_insn (gen_tie_load_pic (tmp, addr));
769 emit_insn (gen_tie_load (tmp, addr));
770 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
773 case TLS_MODEL_LOCAL_EXEC:
774 tp = gen_reg_rtx (Pmode);
775 ret = gen_reg_rtx (Pmode);
776 emit_insn (gen_tp_load (tp));
777 emit_insn (gen_tle_load (ret, addr, tp));
787 /* Try machine-dependent ways of modifying an illegitimate address
788 to be legitimate. If we find one, return the new, valid address.
789 This macro is used in only one place: `memory_address' in explow.c.
791 OLDX is the address as it was before break_out_memory_refs was called.
792 In some cases it is useful to look at this to decide what needs to be done.
794 MODE and WIN are passed so that this macro can use
795 GO_IF_LEGITIMATE_ADDRESS.
797 It is always safe for this macro to do nothing. It exists to recognize
798 opportunities to optimize the output.
800 For the PA, transform:
802 memory(X + <large int>)
806 if (<large int> & mask) >= 16
807 Y = (<large int> & ~mask) + mask + 1 Round up.
809 Y = (<large int> & ~mask) Round down.
811 memory (Z + (<large int> - Y));
813 This is for CSE to find several similar references, and only use one Z.
815 X can either be a SYMBOL_REF or REG, but because combine cannot
816 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
817 D will not fit in 14 bits.
819 MODE_FLOAT references allow displacements which fit in 5 bits, so use
822 MODE_INT references allow displacements which fit in 14 bits, so use
825 This relies on the fact that most mode MODE_FLOAT references will use FP
826 registers and most mode MODE_INT references will use integer registers.
827 (In the rare case of an FP register used in an integer MODE, we depend
828 on secondary reloads to clean things up.)
831 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
832 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
833 addressing modes to be used).
835 Put X and Z into registers. Then put the entire expression into
839 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
840 enum machine_mode mode)
844 /* We need to canonicalize the order of operands in unscaled indexed
845 addresses since the code that checks if an address is valid doesn't
846 always try both orders. */
847 if (!TARGET_NO_SPACE_REGS
848 && GET_CODE (x) == PLUS
849 && GET_MODE (x) == Pmode
850 && REG_P (XEXP (x, 0))
851 && REG_P (XEXP (x, 1))
852 && REG_POINTER (XEXP (x, 0))
853 && !REG_POINTER (XEXP (x, 1)))
854 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
856 if (PA_SYMBOL_REF_TLS_P (x))
857 return legitimize_tls_address (x);
859 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
861 /* Strip off CONST. */
862 if (GET_CODE (x) == CONST)
865 /* Special case. Get the SYMBOL_REF into a register and use indexing.
866 That should always be safe. */
867 if (GET_CODE (x) == PLUS
868 && GET_CODE (XEXP (x, 0)) == REG
869 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
871 rtx reg = force_reg (Pmode, XEXP (x, 1));
872 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
875 /* Note we must reject symbols which represent function addresses
876 since the assembler/linker can't handle arithmetic on plabels. */
877 if (GET_CODE (x) == PLUS
878 && GET_CODE (XEXP (x, 1)) == CONST_INT
879 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
880 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
881 || GET_CODE (XEXP (x, 0)) == REG))
883 rtx int_part, ptr_reg;
885 int offset = INTVAL (XEXP (x, 1));
888 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
889 ? (TARGET_PA_20 ? 0x3fff : 0x1f) : 0x3fff);
891 /* Choose which way to round the offset. Round up if we
892 are >= halfway to the next boundary. */
893 if ((offset & mask) >= ((mask + 1) / 2))
894 newoffset = (offset & ~ mask) + mask + 1;
896 newoffset = (offset & ~ mask);
898 /* If the newoffset will not fit in 14 bits (ldo), then
899 handling this would take 4 or 5 instructions (2 to load
900 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
901 add the new offset and the SYMBOL_REF.) Combine can
902 not handle 4->2 or 5->2 combinations, so do not create
904 if (! VAL_14_BITS_P (newoffset)
905 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
907 rtx const_part = plus_constant (XEXP (x, 0), newoffset);
910 gen_rtx_HIGH (Pmode, const_part));
913 gen_rtx_LO_SUM (Pmode,
914 tmp_reg, const_part));
918 if (! VAL_14_BITS_P (newoffset))
919 int_part = force_reg (Pmode, GEN_INT (newoffset));
921 int_part = GEN_INT (newoffset);
923 ptr_reg = force_reg (Pmode,
925 force_reg (Pmode, XEXP (x, 0)),
928 return plus_constant (ptr_reg, offset - newoffset);
931 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
933 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
934 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
935 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
936 && (OBJECT_P (XEXP (x, 1))
937 || GET_CODE (XEXP (x, 1)) == SUBREG)
938 && GET_CODE (XEXP (x, 1)) != CONST)
940 int val = INTVAL (XEXP (XEXP (x, 0), 1));
944 if (GET_CODE (reg1) != REG)
945 reg1 = force_reg (Pmode, force_operand (reg1, 0));
947 reg2 = XEXP (XEXP (x, 0), 0);
948 if (GET_CODE (reg2) != REG)
949 reg2 = force_reg (Pmode, force_operand (reg2, 0));
951 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
958 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
960 Only do so for floating point modes since this is more speculative
961 and we lose if it's an integer store. */
962 if (GET_CODE (x) == PLUS
963 && GET_CODE (XEXP (x, 0)) == PLUS
964 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
965 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
966 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
967 && (mode == SFmode || mode == DFmode))
970 /* First, try and figure out what to use as a base register. */
971 rtx reg1, reg2, base, idx, orig_base;
973 reg1 = XEXP (XEXP (x, 0), 1);
978 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
979 then emit_move_sequence will turn on REG_POINTER so we'll know
980 it's a base register below. */
981 if (GET_CODE (reg1) != REG)
982 reg1 = force_reg (Pmode, force_operand (reg1, 0));
984 if (GET_CODE (reg2) != REG)
985 reg2 = force_reg (Pmode, force_operand (reg2, 0));
987 /* Figure out what the base and index are. */
989 if (GET_CODE (reg1) == REG
990 && REG_POINTER (reg1))
993 orig_base = XEXP (XEXP (x, 0), 1);
994 idx = gen_rtx_PLUS (Pmode,
996 XEXP (XEXP (XEXP (x, 0), 0), 0),
997 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1000 else if (GET_CODE (reg2) == REG
1001 && REG_POINTER (reg2))
1004 orig_base = XEXP (x, 1);
1011 /* If the index adds a large constant, try to scale the
1012 constant so that it can be loaded with only one insn. */
1013 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1014 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1015 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1016 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1018 /* Divide the CONST_INT by the scale factor, then add it to A. */
1019 int val = INTVAL (XEXP (idx, 1));
1021 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1022 reg1 = XEXP (XEXP (idx, 0), 0);
1023 if (GET_CODE (reg1) != REG)
1024 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1026 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1028 /* We can now generate a simple scaled indexed address. */
1031 (Pmode, gen_rtx_PLUS (Pmode,
1032 gen_rtx_MULT (Pmode, reg1,
1033 XEXP (XEXP (idx, 0), 1)),
1037 /* If B + C is still a valid base register, then add them. */
1038 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1039 && INTVAL (XEXP (idx, 1)) <= 4096
1040 && INTVAL (XEXP (idx, 1)) >= -4096)
1042 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1045 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1047 reg2 = XEXP (XEXP (idx, 0), 0);
1048 if (GET_CODE (reg2) != CONST_INT)
1049 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1051 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1052 gen_rtx_MULT (Pmode,
1058 /* Get the index into a register, then add the base + index and
1059 return a register holding the result. */
1061 /* First get A into a register. */
1062 reg1 = XEXP (XEXP (idx, 0), 0);
1063 if (GET_CODE (reg1) != REG)
1064 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1066 /* And get B into a register. */
1067 reg2 = XEXP (idx, 1);
1068 if (GET_CODE (reg2) != REG)
1069 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1071 reg1 = force_reg (Pmode,
1072 gen_rtx_PLUS (Pmode,
1073 gen_rtx_MULT (Pmode, reg1,
1074 XEXP (XEXP (idx, 0), 1)),
1077 /* Add the result to our base register and return. */
1078 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1082 /* Uh-oh. We might have an address for x[n-100000]. This needs
1083 special handling to avoid creating an indexed memory address
1084 with x-100000 as the base.
1086 If the constant part is small enough, then it's still safe because
1087 there is a guard page at the beginning and end of the data segment.
1089 Scaled references are common enough that we want to try and rearrange the
1090 terms so that we can use indexing for these addresses too. Only
1091 do the optimization for floatint point modes. */
1093 if (GET_CODE (x) == PLUS
1094 && symbolic_expression_p (XEXP (x, 1)))
1096 /* Ugly. We modify things here so that the address offset specified
1097 by the index expression is computed first, then added to x to form
1098 the entire address. */
1100 rtx regx1, regx2, regy1, regy2, y;
1102 /* Strip off any CONST. */
1104 if (GET_CODE (y) == CONST)
1107 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1109 /* See if this looks like
1110 (plus (mult (reg) (shadd_const))
1111 (const (plus (symbol_ref) (const_int))))
1113 Where const_int is small. In that case the const
1114 expression is a valid pointer for indexing.
1116 If const_int is big, but can be divided evenly by shadd_const
1117 and added to (reg). This allows more scaled indexed addresses. */
1118 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1119 && GET_CODE (XEXP (x, 0)) == MULT
1120 && GET_CODE (XEXP (y, 1)) == CONST_INT
1121 && INTVAL (XEXP (y, 1)) >= -4096
1122 && INTVAL (XEXP (y, 1)) <= 4095
1123 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1124 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1126 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1130 if (GET_CODE (reg1) != REG)
1131 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1133 reg2 = XEXP (XEXP (x, 0), 0);
1134 if (GET_CODE (reg2) != REG)
1135 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1137 return force_reg (Pmode,
1138 gen_rtx_PLUS (Pmode,
1139 gen_rtx_MULT (Pmode,
1144 else if ((mode == DFmode || mode == SFmode)
1145 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1146 && GET_CODE (XEXP (x, 0)) == MULT
1147 && GET_CODE (XEXP (y, 1)) == CONST_INT
1148 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1149 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1150 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1153 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1154 / INTVAL (XEXP (XEXP (x, 0), 1))));
1155 regx2 = XEXP (XEXP (x, 0), 0);
1156 if (GET_CODE (regx2) != REG)
1157 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1158 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1162 gen_rtx_PLUS (Pmode,
1163 gen_rtx_MULT (Pmode, regx2,
1164 XEXP (XEXP (x, 0), 1)),
1165 force_reg (Pmode, XEXP (y, 0))));
1167 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1168 && INTVAL (XEXP (y, 1)) >= -4096
1169 && INTVAL (XEXP (y, 1)) <= 4095)
1171 /* This is safe because of the guard page at the
1172 beginning and end of the data space. Just
1173 return the original address. */
1178 /* Doesn't look like one we can optimize. */
1179 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1180 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1181 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1182 regx1 = force_reg (Pmode,
1183 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1185 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1193 /* For the HPPA, REG and REG+CONST is cost 0
1194 and addresses involving symbolic constants are cost 2.
1196 PIC addresses are very expensive.
1198 It is no coincidence that this has the same structure
1199 as GO_IF_LEGITIMATE_ADDRESS. */
1202 hppa_address_cost (rtx X)
1204 switch (GET_CODE (X))
1217 /* Compute a (partial) cost for rtx X. Return true if the complete
1218 cost has been computed, and false if subexpressions should be
1219 scanned. In either case, *TOTAL contains the cost result. */
1222 hppa_rtx_costs (rtx x, int code, int outer_code, int *total)
1227 if (INTVAL (x) == 0)
1229 else if (INT_14_BITS (x))
1246 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1247 && outer_code != SET)
1254 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1255 *total = COSTS_N_INSNS (3);
1256 else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1257 *total = COSTS_N_INSNS (8);
1259 *total = COSTS_N_INSNS (20);
1263 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1265 *total = COSTS_N_INSNS (14);
1273 *total = COSTS_N_INSNS (60);
1276 case PLUS: /* this includes shNadd insns */
1278 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1279 *total = COSTS_N_INSNS (3);
1281 *total = COSTS_N_INSNS (1);
1287 *total = COSTS_N_INSNS (1);
1295 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1296 new rtx with the correct mode. */
1298 force_mode (enum machine_mode mode, rtx orig)
1300 if (mode == GET_MODE (orig))
1303 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1305 return gen_rtx_REG (mode, REGNO (orig));
1308 /* Return 1 if *X is a thread-local symbol. */
1311 pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1313 return PA_SYMBOL_REF_TLS_P (*x);
1316 /* Return 1 if X contains a thread-local symbol. */
1319 pa_tls_referenced_p (rtx x)
1321 if (!TARGET_HAVE_TLS)
1324 return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0);
1327 /* Emit insns to move operands[1] into operands[0].
1329 Return 1 if we have written out everything that needs to be done to
1330 do the move. Otherwise, return 0 and the caller will emit the move
1333 Note SCRATCH_REG may not be in the proper mode depending on how it
1334 will be used. This routine is responsible for creating a new copy
1335 of SCRATCH_REG in the proper mode. */
1338 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1340 register rtx operand0 = operands[0];
1341 register rtx operand1 = operands[1];
1344 /* We can only handle indexed addresses in the destination operand
1345 of floating point stores. Thus, we need to break out indexed
1346 addresses from the destination operand. */
1347 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1349 /* This is only safe up to the beginning of life analysis. */
1350 gcc_assert (!no_new_pseudos);
1352 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1353 operand0 = replace_equiv_address (operand0, tem);
1356 /* On targets with non-equivalent space registers, break out unscaled
1357 indexed addresses from the source operand before the final CSE.
1358 We have to do this because the REG_POINTER flag is not correctly
1359 carried through various optimization passes and CSE may substitute
1360 a pseudo without the pointer set for one with the pointer set. As
1361 a result, we loose various opportunities to create insns with
1362 unscaled indexed addresses. */
1363 if (!TARGET_NO_SPACE_REGS
1364 && !cse_not_expected
1365 && GET_CODE (operand1) == MEM
1366 && GET_CODE (XEXP (operand1, 0)) == PLUS
1367 && REG_P (XEXP (XEXP (operand1, 0), 0))
1368 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1370 = replace_equiv_address (operand1,
1371 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1374 && reload_in_progress && GET_CODE (operand0) == REG
1375 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1376 operand0 = reg_equiv_mem[REGNO (operand0)];
1377 else if (scratch_reg
1378 && reload_in_progress && GET_CODE (operand0) == SUBREG
1379 && GET_CODE (SUBREG_REG (operand0)) == REG
1380 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1382 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1383 the code which tracks sets/uses for delete_output_reload. */
1384 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1385 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
1386 SUBREG_BYTE (operand0));
1387 operand0 = alter_subreg (&temp);
1391 && reload_in_progress && GET_CODE (operand1) == REG
1392 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1393 operand1 = reg_equiv_mem[REGNO (operand1)];
1394 else if (scratch_reg
1395 && reload_in_progress && GET_CODE (operand1) == SUBREG
1396 && GET_CODE (SUBREG_REG (operand1)) == REG
1397 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1399 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1400 the code which tracks sets/uses for delete_output_reload. */
1401 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1402 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
1403 SUBREG_BYTE (operand1));
1404 operand1 = alter_subreg (&temp);
1407 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1408 && ((tem = find_replacement (&XEXP (operand0, 0)))
1409 != XEXP (operand0, 0)))
1410 operand0 = replace_equiv_address (operand0, tem);
1412 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1413 && ((tem = find_replacement (&XEXP (operand1, 0)))
1414 != XEXP (operand1, 0)))
1415 operand1 = replace_equiv_address (operand1, tem);
1417 /* Handle secondary reloads for loads/stores of FP registers from
1418 REG+D addresses where D does not fit in 5 or 14 bits, including
1419 (subreg (mem (addr))) cases. */
1421 && fp_reg_operand (operand0, mode)
1422 && ((GET_CODE (operand1) == MEM
1423 && !memory_address_p ((GET_MODE_SIZE (mode) == 4 ? SFmode : DFmode),
1424 XEXP (operand1, 0)))
1425 || ((GET_CODE (operand1) == SUBREG
1426 && GET_CODE (XEXP (operand1, 0)) == MEM
1427 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1429 XEXP (XEXP (operand1, 0), 0))))))
1431 if (GET_CODE (operand1) == SUBREG)
1432 operand1 = XEXP (operand1, 0);
1434 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1435 it in WORD_MODE regardless of what mode it was originally given
1437 scratch_reg = force_mode (word_mode, scratch_reg);
1439 /* D might not fit in 14 bits either; for such cases load D into
1441 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
1443 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1444 emit_move_insn (scratch_reg,
1445 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1447 XEXP (XEXP (operand1, 0), 0),
1451 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1452 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1453 replace_equiv_address (operand1, scratch_reg)));
1456 else if (scratch_reg
1457 && fp_reg_operand (operand1, mode)
1458 && ((GET_CODE (operand0) == MEM
1459 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1461 XEXP (operand0, 0)))
1462 || ((GET_CODE (operand0) == SUBREG)
1463 && GET_CODE (XEXP (operand0, 0)) == MEM
1464 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1466 XEXP (XEXP (operand0, 0), 0)))))
1468 if (GET_CODE (operand0) == SUBREG)
1469 operand0 = XEXP (operand0, 0);
1471 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1472 it in WORD_MODE regardless of what mode it was originally given
1474 scratch_reg = force_mode (word_mode, scratch_reg);
1476 /* D might not fit in 14 bits either; for such cases load D into
1478 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
1480 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1481 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1484 XEXP (XEXP (operand0, 0),
1489 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1490 emit_insn (gen_rtx_SET (VOIDmode,
1491 replace_equiv_address (operand0, scratch_reg),
1495 /* Handle secondary reloads for loads of FP registers from constant
1496 expressions by forcing the constant into memory.
1498 Use scratch_reg to hold the address of the memory location.
1500 The proper fix is to change PREFERRED_RELOAD_CLASS to return
1501 NO_REGS when presented with a const_int and a register class
1502 containing only FP registers. Doing so unfortunately creates
1503 more problems than it solves. Fix this for 2.5. */
1504 else if (scratch_reg
1505 && CONSTANT_P (operand1)
1506 && fp_reg_operand (operand0, mode))
1508 rtx const_mem, xoperands[2];
1510 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1511 it in WORD_MODE regardless of what mode it was originally given
1513 scratch_reg = force_mode (word_mode, scratch_reg);
1515 /* Force the constant into memory and put the address of the
1516 memory location into scratch_reg. */
1517 const_mem = force_const_mem (mode, operand1);
1518 xoperands[0] = scratch_reg;
1519 xoperands[1] = XEXP (const_mem, 0);
1520 emit_move_sequence (xoperands, Pmode, 0);
1522 /* Now load the destination register. */
1523 emit_insn (gen_rtx_SET (mode, operand0,
1524 replace_equiv_address (const_mem, scratch_reg)));
1527 /* Handle secondary reloads for SAR. These occur when trying to load
1528 the SAR from memory, FP register, or with a constant. */
1529 else if (scratch_reg
1530 && GET_CODE (operand0) == REG
1531 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1532 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1533 && (GET_CODE (operand1) == MEM
1534 || GET_CODE (operand1) == CONST_INT
1535 || (GET_CODE (operand1) == REG
1536 && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1))))))
1538 /* D might not fit in 14 bits either; for such cases load D into
1540 if (GET_CODE (operand1) == MEM
1541 && !memory_address_p (Pmode, XEXP (operand1, 0)))
1543 /* We are reloading the address into the scratch register, so we
1544 want to make sure the scratch register is a full register. */
1545 scratch_reg = force_mode (word_mode, scratch_reg);
1547 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1548 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1551 XEXP (XEXP (operand1, 0),
1555 /* Now we are going to load the scratch register from memory,
1556 we want to load it in the same width as the original MEM,
1557 which must be the same as the width of the ultimate destination,
1559 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1561 emit_move_insn (scratch_reg,
1562 replace_equiv_address (operand1, scratch_reg));
1566 /* We want to load the scratch register using the same mode as
1567 the ultimate destination. */
1568 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1570 emit_move_insn (scratch_reg, operand1);
1573 /* And emit the insn to set the ultimate destination. We know that
1574 the scratch register has the same mode as the destination at this
1576 emit_move_insn (operand0, scratch_reg);
1579 /* Handle the most common case: storing into a register. */
1580 else if (register_operand (operand0, mode))
1582 if (register_operand (operand1, mode)
1583 || (GET_CODE (operand1) == CONST_INT
1584 && cint_ok_for_move (INTVAL (operand1)))
1585 || (operand1 == CONST0_RTX (mode))
1586 || (GET_CODE (operand1) == HIGH
1587 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1588 /* Only `general_operands' can come here, so MEM is ok. */
1589 || GET_CODE (operand1) == MEM)
1591 /* Various sets are created during RTL generation which don't
1592 have the REG_POINTER flag correctly set. After the CSE pass,
1593 instruction recognition can fail if we don't consistently
1594 set this flag when performing register copies. This should
1595 also improve the opportunities for creating insns that use
1596 unscaled indexing. */
1597 if (REG_P (operand0) && REG_P (operand1))
1599 if (REG_POINTER (operand1)
1600 && !REG_POINTER (operand0)
1601 && !HARD_REGISTER_P (operand0))
1602 copy_reg_pointer (operand0, operand1);
1603 else if (REG_POINTER (operand0)
1604 && !REG_POINTER (operand1)
1605 && !HARD_REGISTER_P (operand1))
1606 copy_reg_pointer (operand1, operand0);
1609 /* When MEMs are broken out, the REG_POINTER flag doesn't
1610 get set. In some cases, we can set the REG_POINTER flag
1611 from the declaration for the MEM. */
1612 if (REG_P (operand0)
1613 && GET_CODE (operand1) == MEM
1614 && !REG_POINTER (operand0))
1616 tree decl = MEM_EXPR (operand1);
1618 /* Set the register pointer flag and register alignment
1619 if the declaration for this memory reference is a
1620 pointer type. Fortran indirect argument references
1623 && !(flag_argument_noalias > 1
1624 && TREE_CODE (decl) == INDIRECT_REF
1625 && TREE_CODE (TREE_OPERAND (decl, 0)) == PARM_DECL))
1629 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1631 if (TREE_CODE (decl) == COMPONENT_REF)
1632 decl = TREE_OPERAND (decl, 1);
1634 type = TREE_TYPE (decl);
1635 if (TREE_CODE (type) == ARRAY_TYPE)
1636 type = get_inner_array_type (type);
1638 if (POINTER_TYPE_P (type))
1642 type = TREE_TYPE (type);
1643 /* Using TYPE_ALIGN_OK is rather conservative as
1644 only the ada frontend actually sets it. */
1645 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1647 mark_reg_pointer (operand0, align);
1652 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1656 else if (GET_CODE (operand0) == MEM)
1658 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1659 && !(reload_in_progress || reload_completed))
1661 rtx temp = gen_reg_rtx (DFmode);
1663 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1664 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1667 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1669 /* Run this case quickly. */
1670 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1673 if (! (reload_in_progress || reload_completed))
1675 operands[0] = validize_mem (operand0);
1676 operands[1] = operand1 = force_reg (mode, operand1);
1680 /* Simplify the source if we need to.
1681 Note we do have to handle function labels here, even though we do
1682 not consider them legitimate constants. Loop optimizations can
1683 call the emit_move_xxx with one as a source. */
1684 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1685 || function_label_operand (operand1, mode)
1686 || (GET_CODE (operand1) == HIGH
1687 && symbolic_operand (XEXP (operand1, 0), mode)))
1691 if (GET_CODE (operand1) == HIGH)
1694 operand1 = XEXP (operand1, 0);
1696 if (symbolic_operand (operand1, mode))
1698 /* Argh. The assembler and linker can't handle arithmetic
1701 So we force the plabel into memory, load operand0 from
1702 the memory location, then add in the constant part. */
1703 if ((GET_CODE (operand1) == CONST
1704 && GET_CODE (XEXP (operand1, 0)) == PLUS
1705 && function_label_operand (XEXP (XEXP (operand1, 0), 0), Pmode))
1706 || function_label_operand (operand1, mode))
1708 rtx temp, const_part;
1710 /* Figure out what (if any) scratch register to use. */
1711 if (reload_in_progress || reload_completed)
1713 scratch_reg = scratch_reg ? scratch_reg : operand0;
1714 /* SCRATCH_REG will hold an address and maybe the actual
1715 data. We want it in WORD_MODE regardless of what mode it
1716 was originally given to us. */
1717 scratch_reg = force_mode (word_mode, scratch_reg);
1720 scratch_reg = gen_reg_rtx (Pmode);
1722 if (GET_CODE (operand1) == CONST)
1724 /* Save away the constant part of the expression. */
1725 const_part = XEXP (XEXP (operand1, 0), 1);
1726 gcc_assert (GET_CODE (const_part) == CONST_INT);
1728 /* Force the function label into memory. */
1729 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1733 /* No constant part. */
1734 const_part = NULL_RTX;
1736 /* Force the function label into memory. */
1737 temp = force_const_mem (mode, operand1);
1741 /* Get the address of the memory location. PIC-ify it if
1743 temp = XEXP (temp, 0);
1745 temp = legitimize_pic_address (temp, mode, scratch_reg);
1747 /* Put the address of the memory location into our destination
1750 emit_move_sequence (operands, mode, scratch_reg);
1752 /* Now load from the memory location into our destination
1754 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1755 emit_move_sequence (operands, mode, scratch_reg);
1757 /* And add back in the constant part. */
1758 if (const_part != NULL_RTX)
1759 expand_inc (operand0, const_part);
1768 if (reload_in_progress || reload_completed)
1770 temp = scratch_reg ? scratch_reg : operand0;
1771 /* TEMP will hold an address and maybe the actual
1772 data. We want it in WORD_MODE regardless of what mode it
1773 was originally given to us. */
1774 temp = force_mode (word_mode, temp);
1777 temp = gen_reg_rtx (Pmode);
1779 /* (const (plus (symbol) (const_int))) must be forced to
1780 memory during/after reload if the const_int will not fit
1782 if (GET_CODE (operand1) == CONST
1783 && GET_CODE (XEXP (operand1, 0)) == PLUS
1784 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
1785 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
1786 && (reload_completed || reload_in_progress)
1789 rtx const_mem = force_const_mem (mode, operand1);
1790 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
1792 operands[1] = replace_equiv_address (const_mem, operands[1]);
1793 emit_move_sequence (operands, mode, temp);
1797 operands[1] = legitimize_pic_address (operand1, mode, temp);
1798 if (REG_P (operand0) && REG_P (operands[1]))
1799 copy_reg_pointer (operand0, operands[1]);
1800 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
1803 /* On the HPPA, references to data space are supposed to use dp,
1804 register 27, but showing it in the RTL inhibits various cse
1805 and loop optimizations. */
1810 if (reload_in_progress || reload_completed)
1812 temp = scratch_reg ? scratch_reg : operand0;
1813 /* TEMP will hold an address and maybe the actual
1814 data. We want it in WORD_MODE regardless of what mode it
1815 was originally given to us. */
1816 temp = force_mode (word_mode, temp);
1819 temp = gen_reg_rtx (mode);
1821 /* Loading a SYMBOL_REF into a register makes that register
1822 safe to be used as the base in an indexed address.
1824 Don't mark hard registers though. That loses. */
1825 if (GET_CODE (operand0) == REG
1826 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1827 mark_reg_pointer (operand0, BITS_PER_UNIT);
1828 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
1829 mark_reg_pointer (temp, BITS_PER_UNIT);
1832 set = gen_rtx_SET (mode, operand0, temp);
1834 set = gen_rtx_SET (VOIDmode,
1836 gen_rtx_LO_SUM (mode, temp, operand1));
1838 emit_insn (gen_rtx_SET (VOIDmode,
1840 gen_rtx_HIGH (mode, operand1)));
1846 else if (pa_tls_referenced_p (operand1))
1851 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
1853 addend = XEXP (XEXP (tmp, 0), 1);
1854 tmp = XEXP (XEXP (tmp, 0), 0);
1857 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
1858 tmp = legitimize_tls_address (tmp);
1861 tmp = gen_rtx_PLUS (mode, tmp, addend);
1862 tmp = force_operand (tmp, operands[0]);
1866 else if (GET_CODE (operand1) != CONST_INT
1867 || !cint_ok_for_move (INTVAL (operand1)))
1871 HOST_WIDE_INT value = 0;
1872 HOST_WIDE_INT insv = 0;
1875 if (GET_CODE (operand1) == CONST_INT)
1876 value = INTVAL (operand1);
1879 && GET_CODE (operand1) == CONST_INT
1880 && HOST_BITS_PER_WIDE_INT > 32
1881 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
1885 /* Extract the low order 32 bits of the value and sign extend.
1886 If the new value is the same as the original value, we can
1887 can use the original value as-is. If the new value is
1888 different, we use it and insert the most-significant 32-bits
1889 of the original value into the final result. */
1890 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
1891 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
1894 #if HOST_BITS_PER_WIDE_INT > 32
1895 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
1899 operand1 = GEN_INT (nval);
1903 if (reload_in_progress || reload_completed)
1904 temp = scratch_reg ? scratch_reg : operand0;
1906 temp = gen_reg_rtx (mode);
1908 /* We don't directly split DImode constants on 32-bit targets
1909 because PLUS uses an 11-bit immediate and the insn sequence
1910 generated is not as efficient as the one using HIGH/LO_SUM. */
1911 if (GET_CODE (operand1) == CONST_INT
1912 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
1913 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1916 /* Directly break constant into high and low parts. This
1917 provides better optimization opportunities because various
1918 passes recognize constants split with PLUS but not LO_SUM.
1919 We use a 14-bit signed low part except when the addition
1920 of 0x4000 to the high part might change the sign of the
1922 HOST_WIDE_INT low = value & 0x3fff;
1923 HOST_WIDE_INT high = value & ~ 0x3fff;
1927 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
1935 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
1936 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1940 emit_insn (gen_rtx_SET (VOIDmode, temp,
1941 gen_rtx_HIGH (mode, operand1)));
1942 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
1945 insn = emit_move_insn (operands[0], operands[1]);
1947 /* Now insert the most significant 32 bits of the value
1948 into the register. When we don't have a second register
1949 available, it could take up to nine instructions to load
1950 a 64-bit integer constant. Prior to reload, we force
1951 constants that would take more than three instructions
1952 to load to the constant pool. During and after reload,
1953 we have to handle all possible values. */
1956 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
1957 register and the value to be inserted is outside the
1958 range that can be loaded with three depdi instructions. */
1959 if (temp != operand0 && (insv >= 16384 || insv < -16384))
1961 operand1 = GEN_INT (insv);
1963 emit_insn (gen_rtx_SET (VOIDmode, temp,
1964 gen_rtx_HIGH (mode, operand1)));
1965 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
1966 emit_insn (gen_insv (operand0, GEN_INT (32),
1971 int len = 5, pos = 27;
1973 /* Insert the bits using the depdi instruction. */
1976 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
1977 HOST_WIDE_INT sign = v5 < 0;
1979 /* Left extend the insertion. */
1980 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
1981 while (pos > 0 && (insv & 1) == sign)
1983 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
1988 emit_insn (gen_insv (operand0, GEN_INT (len),
1989 GEN_INT (pos), GEN_INT (v5)));
1991 len = pos > 0 && pos < 5 ? pos : 5;
1997 set_unique_reg_note (insn, REG_EQUAL, op1);
2002 /* Now have insn-emit do whatever it normally does. */
2006 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2007 it will need a link/runtime reloc). */
2010 reloc_needed (tree exp)
2014 switch (TREE_CODE (exp))
2021 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2022 reloc |= reloc_needed (TREE_OPERAND (exp, 1));
2027 case NON_LVALUE_EXPR:
2028 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2034 unsigned HOST_WIDE_INT ix;
2036 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2038 reloc |= reloc_needed (value);
2051 /* Does operand (which is a symbolic_operand) live in text space?
2052 If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
2056 read_only_operand (rtx operand, enum machine_mode mode ATTRIBUTE_UNUSED)
2058 if (GET_CODE (operand) == CONST)
2059 operand = XEXP (XEXP (operand, 0), 0);
2062 if (GET_CODE (operand) == SYMBOL_REF)
2063 return SYMBOL_REF_FLAG (operand) && !CONSTANT_POOL_ADDRESS_P (operand);
2067 if (GET_CODE (operand) == SYMBOL_REF)
2068 return SYMBOL_REF_FLAG (operand) || CONSTANT_POOL_ADDRESS_P (operand);
2074 /* Return the best assembler insn template
2075 for moving operands[1] into operands[0] as a fullword. */
2077 singlemove_string (rtx *operands)
2079 HOST_WIDE_INT intval;
2081 if (GET_CODE (operands[0]) == MEM)
2082 return "stw %r1,%0";
2083 if (GET_CODE (operands[1]) == MEM)
2085 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2090 gcc_assert (GET_MODE (operands[1]) == SFmode);
2092 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2094 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2095 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2097 operands[1] = GEN_INT (i);
2098 /* Fall through to CONST_INT case. */
2100 if (GET_CODE (operands[1]) == CONST_INT)
2102 intval = INTVAL (operands[1]);
2104 if (VAL_14_BITS_P (intval))
2106 else if ((intval & 0x7ff) == 0)
2107 return "ldil L'%1,%0";
2108 else if (zdepi_cint_p (intval))
2109 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2111 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2113 return "copy %1,%0";
2117 /* Compute position (in OP[1]) and width (in OP[2])
2118 useful for copying IMM to a register using the zdepi
2119 instructions. Store the immediate value to insert in OP[0]. */
2121 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2125 /* Find the least significant set bit in IMM. */
2126 for (lsb = 0; lsb < 32; lsb++)
2133 /* Choose variants based on *sign* of the 5-bit field. */
2134 if ((imm & 0x10) == 0)
2135 len = (lsb <= 28) ? 4 : 32 - lsb;
2138 /* Find the width of the bitstring in IMM. */
2139 for (len = 5; len < 32; len++)
2141 if ((imm & (1 << len)) == 0)
2145 /* Sign extend IMM as a 5-bit value. */
2146 imm = (imm & 0xf) - 0x10;
2154 /* Compute position (in OP[1]) and width (in OP[2])
2155 useful for copying IMM to a register using the depdi,z
2156 instructions. Store the immediate value to insert in OP[0]. */
2158 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2160 HOST_WIDE_INT lsb, len;
2162 /* Find the least significant set bit in IMM. */
2163 for (lsb = 0; lsb < HOST_BITS_PER_WIDE_INT; lsb++)
2170 /* Choose variants based on *sign* of the 5-bit field. */
2171 if ((imm & 0x10) == 0)
2172 len = ((lsb <= HOST_BITS_PER_WIDE_INT - 4)
2173 ? 4 : HOST_BITS_PER_WIDE_INT - lsb);
2176 /* Find the width of the bitstring in IMM. */
2177 for (len = 5; len < HOST_BITS_PER_WIDE_INT; len++)
2179 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2183 /* Sign extend IMM as a 5-bit value. */
2184 imm = (imm & 0xf) - 0x10;
2192 /* Output assembler code to perform a doubleword move insn
2193 with operands OPERANDS. */
2196 output_move_double (rtx *operands)
2198 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2200 rtx addreg0 = 0, addreg1 = 0;
2202 /* First classify both operands. */
2204 if (REG_P (operands[0]))
2206 else if (offsettable_memref_p (operands[0]))
2208 else if (GET_CODE (operands[0]) == MEM)
2213 if (REG_P (operands[1]))
2215 else if (CONSTANT_P (operands[1]))
2217 else if (offsettable_memref_p (operands[1]))
2219 else if (GET_CODE (operands[1]) == MEM)
2224 /* Check for the cases that the operand constraints are not
2225 supposed to allow to happen. */
2226 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2228 /* Handle copies between general and floating registers. */
2230 if (optype0 == REGOP && optype1 == REGOP
2231 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2233 if (FP_REG_P (operands[0]))
2235 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2236 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2237 return "{fldds|fldd} -16(%%sp),%0";
2241 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2242 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2243 return "{ldws|ldw} -12(%%sp),%R0";
2247 /* Handle auto decrementing and incrementing loads and stores
2248 specifically, since the structure of the function doesn't work
2249 for them without major modification. Do it better when we learn
2250 this port about the general inc/dec addressing of PA.
2251 (This was written by tege. Chide him if it doesn't work.) */
2253 if (optype0 == MEMOP)
2255 /* We have to output the address syntax ourselves, since print_operand
2256 doesn't deal with the addresses we want to use. Fix this later. */
2258 rtx addr = XEXP (operands[0], 0);
2259 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2261 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2263 operands[0] = XEXP (addr, 0);
2264 gcc_assert (GET_CODE (operands[1]) == REG
2265 && GET_CODE (operands[0]) == REG);
2267 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2269 /* No overlap between high target register and address
2270 register. (We do this in a non-obvious way to
2271 save a register file writeback) */
2272 if (GET_CODE (addr) == POST_INC)
2273 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2274 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2276 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2278 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2280 operands[0] = XEXP (addr, 0);
2281 gcc_assert (GET_CODE (operands[1]) == REG
2282 && GET_CODE (operands[0]) == REG);
2284 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2285 /* No overlap between high target register and address
2286 register. (We do this in a non-obvious way to save a
2287 register file writeback) */
2288 if (GET_CODE (addr) == PRE_INC)
2289 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2290 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2293 if (optype1 == MEMOP)
2295 /* We have to output the address syntax ourselves, since print_operand
2296 doesn't deal with the addresses we want to use. Fix this later. */
2298 rtx addr = XEXP (operands[1], 0);
2299 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2301 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2303 operands[1] = XEXP (addr, 0);
2304 gcc_assert (GET_CODE (operands[0]) == REG
2305 && GET_CODE (operands[1]) == REG);
2307 if (!reg_overlap_mentioned_p (high_reg, addr))
2309 /* No overlap between high target register and address
2310 register. (We do this in a non-obvious way to
2311 save a register file writeback) */
2312 if (GET_CODE (addr) == POST_INC)
2313 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2314 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2318 /* This is an undefined situation. We should load into the
2319 address register *and* update that register. Probably
2320 we don't need to handle this at all. */
2321 if (GET_CODE (addr) == POST_INC)
2322 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2323 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2326 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2328 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2330 operands[1] = XEXP (addr, 0);
2331 gcc_assert (GET_CODE (operands[0]) == REG
2332 && GET_CODE (operands[1]) == REG);
2334 if (!reg_overlap_mentioned_p (high_reg, addr))
2336 /* No overlap between high target register and address
2337 register. (We do this in a non-obvious way to
2338 save a register file writeback) */
2339 if (GET_CODE (addr) == PRE_INC)
2340 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2341 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2345 /* This is an undefined situation. We should load into the
2346 address register *and* update that register. Probably
2347 we don't need to handle this at all. */
2348 if (GET_CODE (addr) == PRE_INC)
2349 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2350 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2353 else if (GET_CODE (addr) == PLUS
2354 && GET_CODE (XEXP (addr, 0)) == MULT)
2357 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2359 if (!reg_overlap_mentioned_p (high_reg, addr))
2361 xoperands[0] = high_reg;
2362 xoperands[1] = XEXP (addr, 1);
2363 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2364 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2365 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2367 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2371 xoperands[0] = high_reg;
2372 xoperands[1] = XEXP (addr, 1);
2373 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2374 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2375 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2377 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2382 /* If an operand is an unoffsettable memory ref, find a register
2383 we can increment temporarily to make it refer to the second word. */
2385 if (optype0 == MEMOP)
2386 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2388 if (optype1 == MEMOP)
2389 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2391 /* Ok, we can do one word at a time.
2392 Normally we do the low-numbered word first.
2394 In either case, set up in LATEHALF the operands to use
2395 for the high-numbered word and in some cases alter the
2396 operands in OPERANDS to be suitable for the low-numbered word. */
2398 if (optype0 == REGOP)
2399 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2400 else if (optype0 == OFFSOP)
2401 latehalf[0] = adjust_address (operands[0], SImode, 4);
2403 latehalf[0] = operands[0];
2405 if (optype1 == REGOP)
2406 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2407 else if (optype1 == OFFSOP)
2408 latehalf[1] = adjust_address (operands[1], SImode, 4);
2409 else if (optype1 == CNSTOP)
2410 split_double (operands[1], &operands[1], &latehalf[1]);
2412 latehalf[1] = operands[1];
2414 /* If the first move would clobber the source of the second one,
2415 do them in the other order.
2417 This can happen in two cases:
2419 mem -> register where the first half of the destination register
2420 is the same register used in the memory's address. Reload
2421 can create such insns.
2423 mem in this case will be either register indirect or register
2424 indirect plus a valid offset.
2426 register -> register move where REGNO(dst) == REGNO(src + 1)
2427 someone (Tim/Tege?) claimed this can happen for parameter loads.
2429 Handle mem -> register case first. */
2430 if (optype0 == REGOP
2431 && (optype1 == MEMOP || optype1 == OFFSOP)
2432 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2435 /* Do the late half first. */
2437 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2438 output_asm_insn (singlemove_string (latehalf), latehalf);
2442 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2443 return singlemove_string (operands);
2446 /* Now handle register -> register case. */
2447 if (optype0 == REGOP && optype1 == REGOP
2448 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2450 output_asm_insn (singlemove_string (latehalf), latehalf);
2451 return singlemove_string (operands);
2454 /* Normal case: do the two words, low-numbered first. */
2456 output_asm_insn (singlemove_string (operands), operands);
2458 /* Make any unoffsettable addresses point at high-numbered word. */
2460 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2462 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2465 output_asm_insn (singlemove_string (latehalf), latehalf);
2467 /* Undo the adds we just did. */
2469 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2471 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2477 output_fp_move_double (rtx *operands)
2479 if (FP_REG_P (operands[0]))
2481 if (FP_REG_P (operands[1])
2482 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2483 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2485 output_asm_insn ("fldd%F1 %1,%0", operands);
2487 else if (FP_REG_P (operands[1]))
2489 output_asm_insn ("fstd%F0 %1,%0", operands);
2495 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2497 /* This is a pain. You have to be prepared to deal with an
2498 arbitrary address here including pre/post increment/decrement.
2500 so avoid this in the MD. */
2501 gcc_assert (GET_CODE (operands[0]) == REG);
2503 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2504 xoperands[0] = operands[0];
2505 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2510 /* Return a REG that occurs in ADDR with coefficient 1.
2511 ADDR can be effectively incremented by incrementing REG. */
2514 find_addr_reg (rtx addr)
2516 while (GET_CODE (addr) == PLUS)
2518 if (GET_CODE (XEXP (addr, 0)) == REG)
2519 addr = XEXP (addr, 0);
2520 else if (GET_CODE (XEXP (addr, 1)) == REG)
2521 addr = XEXP (addr, 1);
2522 else if (CONSTANT_P (XEXP (addr, 0)))
2523 addr = XEXP (addr, 1);
2524 else if (CONSTANT_P (XEXP (addr, 1)))
2525 addr = XEXP (addr, 0);
2529 gcc_assert (GET_CODE (addr) == REG);
2533 /* Emit code to perform a block move.
2535 OPERANDS[0] is the destination pointer as a REG, clobbered.
2536 OPERANDS[1] is the source pointer as a REG, clobbered.
2537 OPERANDS[2] is a register for temporary storage.
2538 OPERANDS[3] is a register for temporary storage.
2539 OPERANDS[4] is the size as a CONST_INT
2540 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2541 OPERANDS[6] is another temporary register. */
2544 output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2546 int align = INTVAL (operands[5]);
2547 unsigned long n_bytes = INTVAL (operands[4]);
2549 /* We can't move more than a word at a time because the PA
2550 has no longer integer move insns. (Could use fp mem ops?) */
2551 if (align > (TARGET_64BIT ? 8 : 4))
2552 align = (TARGET_64BIT ? 8 : 4);
2554 /* Note that we know each loop below will execute at least twice
2555 (else we would have open-coded the copy). */
2559 /* Pre-adjust the loop counter. */
2560 operands[4] = GEN_INT (n_bytes - 16);
2561 output_asm_insn ("ldi %4,%2", operands);
2564 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2565 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2566 output_asm_insn ("std,ma %3,8(%0)", operands);
2567 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2568 output_asm_insn ("std,ma %6,8(%0)", operands);
2570 /* Handle the residual. There could be up to 7 bytes of
2571 residual to copy! */
2572 if (n_bytes % 16 != 0)
2574 operands[4] = GEN_INT (n_bytes % 8);
2575 if (n_bytes % 16 >= 8)
2576 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2577 if (n_bytes % 8 != 0)
2578 output_asm_insn ("ldd 0(%1),%6", operands);
2579 if (n_bytes % 16 >= 8)
2580 output_asm_insn ("std,ma %3,8(%0)", operands);
2581 if (n_bytes % 8 != 0)
2582 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2587 /* Pre-adjust the loop counter. */
2588 operands[4] = GEN_INT (n_bytes - 8);
2589 output_asm_insn ("ldi %4,%2", operands);
2592 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2593 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2594 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2595 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2596 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2598 /* Handle the residual. There could be up to 7 bytes of
2599 residual to copy! */
2600 if (n_bytes % 8 != 0)
2602 operands[4] = GEN_INT (n_bytes % 4);
2603 if (n_bytes % 8 >= 4)
2604 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2605 if (n_bytes % 4 != 0)
2606 output_asm_insn ("ldw 0(%1),%6", operands);
2607 if (n_bytes % 8 >= 4)
2608 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2609 if (n_bytes % 4 != 0)
2610 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2615 /* Pre-adjust the loop counter. */
2616 operands[4] = GEN_INT (n_bytes - 4);
2617 output_asm_insn ("ldi %4,%2", operands);
2620 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2621 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2622 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2623 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2624 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2626 /* Handle the residual. */
2627 if (n_bytes % 4 != 0)
2629 if (n_bytes % 4 >= 2)
2630 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2631 if (n_bytes % 2 != 0)
2632 output_asm_insn ("ldb 0(%1),%6", operands);
2633 if (n_bytes % 4 >= 2)
2634 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2635 if (n_bytes % 2 != 0)
2636 output_asm_insn ("stb %6,0(%0)", operands);
2641 /* Pre-adjust the loop counter. */
2642 operands[4] = GEN_INT (n_bytes - 2);
2643 output_asm_insn ("ldi %4,%2", operands);
2646 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2647 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2648 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2649 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2650 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2652 /* Handle the residual. */
2653 if (n_bytes % 2 != 0)
2655 output_asm_insn ("ldb 0(%1),%3", operands);
2656 output_asm_insn ("stb %3,0(%0)", operands);
2665 /* Count the number of insns necessary to handle this block move.
2667 Basic structure is the same as emit_block_move, except that we
2668 count insns rather than emit them. */
2671 compute_movmem_length (rtx insn)
2673 rtx pat = PATTERN (insn);
2674 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2675 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2676 unsigned int n_insns = 0;
2678 /* We can't move more than four bytes at a time because the PA
2679 has no longer integer move insns. (Could use fp mem ops?) */
2680 if (align > (TARGET_64BIT ? 8 : 4))
2681 align = (TARGET_64BIT ? 8 : 4);
2683 /* The basic copying loop. */
2687 if (n_bytes % (2 * align) != 0)
2689 if ((n_bytes % (2 * align)) >= align)
2692 if ((n_bytes % align) != 0)
2696 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2700 /* Emit code to perform a block clear.
2702 OPERANDS[0] is the destination pointer as a REG, clobbered.
2703 OPERANDS[1] is a register for temporary storage.
2704 OPERANDS[2] is the size as a CONST_INT
2705 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2708 output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2710 int align = INTVAL (operands[3]);
2711 unsigned long n_bytes = INTVAL (operands[2]);
2713 /* We can't clear more than a word at a time because the PA
2714 has no longer integer move insns. */
2715 if (align > (TARGET_64BIT ? 8 : 4))
2716 align = (TARGET_64BIT ? 8 : 4);
2718 /* Note that we know each loop below will execute at least twice
2719 (else we would have open-coded the copy). */
2723 /* Pre-adjust the loop counter. */
2724 operands[2] = GEN_INT (n_bytes - 16);
2725 output_asm_insn ("ldi %2,%1", operands);
2728 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2729 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2730 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2732 /* Handle the residual. There could be up to 7 bytes of
2733 residual to copy! */
2734 if (n_bytes % 16 != 0)
2736 operands[2] = GEN_INT (n_bytes % 8);
2737 if (n_bytes % 16 >= 8)
2738 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2739 if (n_bytes % 8 != 0)
2740 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2745 /* Pre-adjust the loop counter. */
2746 operands[2] = GEN_INT (n_bytes - 8);
2747 output_asm_insn ("ldi %2,%1", operands);
2750 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2751 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2752 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2754 /* Handle the residual. There could be up to 7 bytes of
2755 residual to copy! */
2756 if (n_bytes % 8 != 0)
2758 operands[2] = GEN_INT (n_bytes % 4);
2759 if (n_bytes % 8 >= 4)
2760 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2761 if (n_bytes % 4 != 0)
2762 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2767 /* Pre-adjust the loop counter. */
2768 operands[2] = GEN_INT (n_bytes - 4);
2769 output_asm_insn ("ldi %2,%1", operands);
2772 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2773 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2774 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2776 /* Handle the residual. */
2777 if (n_bytes % 4 != 0)
2779 if (n_bytes % 4 >= 2)
2780 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2781 if (n_bytes % 2 != 0)
2782 output_asm_insn ("stb %%r0,0(%0)", operands);
2787 /* Pre-adjust the loop counter. */
2788 operands[2] = GEN_INT (n_bytes - 2);
2789 output_asm_insn ("ldi %2,%1", operands);
2792 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2793 output_asm_insn ("addib,>= -2,%1,.-4", operands);
2794 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2796 /* Handle the residual. */
2797 if (n_bytes % 2 != 0)
2798 output_asm_insn ("stb %%r0,0(%0)", operands);
2807 /* Count the number of insns necessary to handle this block move.
2809 Basic structure is the same as emit_block_move, except that we
2810 count insns rather than emit them. */
2813 compute_clrmem_length (rtx insn)
2815 rtx pat = PATTERN (insn);
2816 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
2817 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
2818 unsigned int n_insns = 0;
2820 /* We can't clear more than a word at a time because the PA
2821 has no longer integer move insns. */
2822 if (align > (TARGET_64BIT ? 8 : 4))
2823 align = (TARGET_64BIT ? 8 : 4);
2825 /* The basic loop. */
2829 if (n_bytes % (2 * align) != 0)
2831 if ((n_bytes % (2 * align)) >= align)
2834 if ((n_bytes % align) != 0)
2838 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2844 output_and (rtx *operands)
2846 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2848 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2849 int ls0, ls1, ms0, p, len;
2851 for (ls0 = 0; ls0 < 32; ls0++)
2852 if ((mask & (1 << ls0)) == 0)
2855 for (ls1 = ls0; ls1 < 32; ls1++)
2856 if ((mask & (1 << ls1)) != 0)
2859 for (ms0 = ls1; ms0 < 32; ms0++)
2860 if ((mask & (1 << ms0)) == 0)
2863 gcc_assert (ms0 == 32);
2871 operands[2] = GEN_INT (len);
2872 return "{extru|extrw,u} %1,31,%2,%0";
2876 /* We could use this `depi' for the case above as well, but `depi'
2877 requires one more register file access than an `extru'. */
2882 operands[2] = GEN_INT (p);
2883 operands[3] = GEN_INT (len);
2884 return "{depi|depwi} 0,%2,%3,%0";
2888 return "and %1,%2,%0";
2891 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2892 storing the result in operands[0]. */
2894 output_64bit_and (rtx *operands)
2896 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2898 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2899 int ls0, ls1, ms0, p, len;
2901 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
2902 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
2905 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
2906 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
2909 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
2910 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
2913 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
2915 if (ls1 == HOST_BITS_PER_WIDE_INT)
2921 operands[2] = GEN_INT (len);
2922 return "extrd,u %1,63,%2,%0";
2926 /* We could use this `depi' for the case above as well, but `depi'
2927 requires one more register file access than an `extru'. */
2932 operands[2] = GEN_INT (p);
2933 operands[3] = GEN_INT (len);
2934 return "depdi 0,%2,%3,%0";
2938 return "and %1,%2,%0";
2942 output_ior (rtx *operands)
2944 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2945 int bs0, bs1, p, len;
2947 if (INTVAL (operands[2]) == 0)
2948 return "copy %1,%0";
2950 for (bs0 = 0; bs0 < 32; bs0++)
2951 if ((mask & (1 << bs0)) != 0)
2954 for (bs1 = bs0; bs1 < 32; bs1++)
2955 if ((mask & (1 << bs1)) == 0)
2958 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
2963 operands[2] = GEN_INT (p);
2964 operands[3] = GEN_INT (len);
2965 return "{depi|depwi} -1,%2,%3,%0";
2968 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2969 storing the result in operands[0]. */
2971 output_64bit_ior (rtx *operands)
2973 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2974 int bs0, bs1, p, len;
2976 if (INTVAL (operands[2]) == 0)
2977 return "copy %1,%0";
2979 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
2980 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
2983 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
2984 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
2987 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
2988 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
2993 operands[2] = GEN_INT (p);
2994 operands[3] = GEN_INT (len);
2995 return "depdi -1,%2,%3,%0";
2998 /* Target hook for assembling integer objects. This code handles
2999 aligned SI and DI integers specially since function references
3000 must be preceded by P%. */
3003 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3005 if (size == UNITS_PER_WORD
3007 && function_label_operand (x, VOIDmode))
3009 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
3010 output_addr_const (asm_out_file, x);
3011 fputc ('\n', asm_out_file);
3014 return default_assemble_integer (x, size, aligned_p);
3017 /* Output an ascii string. */
3019 output_ascii (FILE *file, const char *p, int size)
3023 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3025 /* The HP assembler can only take strings of 256 characters at one
3026 time. This is a limitation on input line length, *not* the
3027 length of the string. Sigh. Even worse, it seems that the
3028 restriction is in number of input characters (see \xnn &
3029 \whatever). So we have to do this very carefully. */
3031 fputs ("\t.STRING \"", file);
3034 for (i = 0; i < size; i += 4)
3038 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3040 register unsigned int c = (unsigned char) p[i + io];
3042 if (c == '\"' || c == '\\')
3043 partial_output[co++] = '\\';
3044 if (c >= ' ' && c < 0177)
3045 partial_output[co++] = c;
3049 partial_output[co++] = '\\';
3050 partial_output[co++] = 'x';
3051 hexd = c / 16 - 0 + '0';
3053 hexd -= '9' - 'a' + 1;
3054 partial_output[co++] = hexd;
3055 hexd = c % 16 - 0 + '0';
3057 hexd -= '9' - 'a' + 1;
3058 partial_output[co++] = hexd;
3061 if (chars_output + co > 243)
3063 fputs ("\"\n\t.STRING \"", file);
3066 fwrite (partial_output, 1, (size_t) co, file);
3070 fputs ("\"\n", file);
3073 /* Try to rewrite floating point comparisons & branches to avoid
3074 useless add,tr insns.
3076 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3077 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3078 first attempt to remove useless add,tr insns. It is zero
3079 for the second pass as reorg sometimes leaves bogus REG_DEAD
3082 When CHECK_NOTES is zero we can only eliminate add,tr insns
3083 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3086 remove_useless_addtr_insns (int check_notes)
3089 static int pass = 0;
3091 /* This is fairly cheap, so always run it when optimizing. */
3095 int fbranch_count = 0;
3097 /* Walk all the insns in this function looking for fcmp & fbranch
3098 instructions. Keep track of how many of each we find. */
3099 for (insn = get_insns (); insn; insn = next_insn (insn))
3103 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3104 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
3107 tmp = PATTERN (insn);
3109 /* It must be a set. */
3110 if (GET_CODE (tmp) != SET)
3113 /* If the destination is CCFP, then we've found an fcmp insn. */
3114 tmp = SET_DEST (tmp);
3115 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3121 tmp = PATTERN (insn);
3122 /* If this is an fbranch instruction, bump the fbranch counter. */
3123 if (GET_CODE (tmp) == SET
3124 && SET_DEST (tmp) == pc_rtx
3125 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3126 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3127 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3128 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3136 /* Find all floating point compare + branch insns. If possible,
3137 reverse the comparison & the branch to avoid add,tr insns. */
3138 for (insn = get_insns (); insn; insn = next_insn (insn))
3142 /* Ignore anything that isn't an INSN. */
3143 if (GET_CODE (insn) != INSN)
3146 tmp = PATTERN (insn);
3148 /* It must be a set. */
3149 if (GET_CODE (tmp) != SET)
3152 /* The destination must be CCFP, which is register zero. */
3153 tmp = SET_DEST (tmp);
3154 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3157 /* INSN should be a set of CCFP.
3159 See if the result of this insn is used in a reversed FP
3160 conditional branch. If so, reverse our condition and
3161 the branch. Doing so avoids useless add,tr insns. */
3162 next = next_insn (insn);
3165 /* Jumps, calls and labels stop our search. */
3166 if (GET_CODE (next) == JUMP_INSN
3167 || GET_CODE (next) == CALL_INSN
3168 || GET_CODE (next) == CODE_LABEL)
3171 /* As does another fcmp insn. */
3172 if (GET_CODE (next) == INSN
3173 && GET_CODE (PATTERN (next)) == SET
3174 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3175 && REGNO (SET_DEST (PATTERN (next))) == 0)
3178 next = next_insn (next);
3181 /* Is NEXT_INSN a branch? */
3183 && GET_CODE (next) == JUMP_INSN)
3185 rtx pattern = PATTERN (next);
3187 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3188 and CCFP dies, then reverse our conditional and the branch
3189 to avoid the add,tr. */
3190 if (GET_CODE (pattern) == SET
3191 && SET_DEST (pattern) == pc_rtx
3192 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3193 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3194 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3195 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3196 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3197 && (fcmp_count == fbranch_count
3199 && find_regno_note (next, REG_DEAD, 0))))
3201 /* Reverse the branch. */
3202 tmp = XEXP (SET_SRC (pattern), 1);
3203 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3204 XEXP (SET_SRC (pattern), 2) = tmp;
3205 INSN_CODE (next) = -1;
3207 /* Reverse our condition. */
3208 tmp = PATTERN (insn);
3209 PUT_CODE (XEXP (tmp, 1),
3210 (reverse_condition_maybe_unordered
3211 (GET_CODE (XEXP (tmp, 1)))));
3221 /* You may have trouble believing this, but this is the 32 bit HP-PA
3226 Variable arguments (optional; any number may be allocated)
3228 SP-(4*(N+9)) arg word N
3233 Fixed arguments (must be allocated; may remain unused)
3242 SP-32 External Data Pointer (DP)
3244 SP-24 External/stub RP (RP')
3248 SP-8 Calling Stub RP (RP'')
3253 SP-0 Stack Pointer (points to next available address)
3257 /* This function saves registers as follows. Registers marked with ' are
3258 this function's registers (as opposed to the previous function's).
3259 If a frame_pointer isn't needed, r4 is saved as a general register;
3260 the space for the frame pointer is still allocated, though, to keep
3266 SP (FP') Previous FP
3267 SP + 4 Alignment filler (sigh)
3268 SP + 8 Space for locals reserved here.
3272 SP + n All call saved register used.
3276 SP + o All call saved fp registers used.
3280 SP + p (SP') points to next available address.
3284 /* Global variables set by output_function_prologue(). */
3285 /* Size of frame. Need to know this to emit return insns from
3287 static HOST_WIDE_INT actual_fsize, local_fsize;
3288 static int save_fregs;
3290 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3291 Handle case where DISP > 8k by using the add_high_const patterns.
3293 Note in DISP > 8k case, we will leave the high part of the address
3294 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3297 store_reg (int reg, HOST_WIDE_INT disp, int base)
3299 rtx insn, dest, src, basereg;
3301 src = gen_rtx_REG (word_mode, reg);
3302 basereg = gen_rtx_REG (Pmode, base);
3303 if (VAL_14_BITS_P (disp))
3305 dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3306 insn = emit_move_insn (dest, src);
3308 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3310 rtx delta = GEN_INT (disp);
3311 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3313 emit_move_insn (tmpreg, delta);
3314 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3318 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3319 gen_rtx_SET (VOIDmode, tmpreg,
3320 gen_rtx_PLUS (Pmode, basereg, delta)),
3322 RTX_FRAME_RELATED_P (insn) = 1;
3324 dest = gen_rtx_MEM (word_mode, tmpreg);
3325 insn = emit_move_insn (dest, src);
3329 rtx delta = GEN_INT (disp);
3330 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3331 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3333 emit_move_insn (tmpreg, high);
3334 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3335 insn = emit_move_insn (dest, src);
3339 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3340 gen_rtx_SET (VOIDmode,
3341 gen_rtx_MEM (word_mode,
3342 gen_rtx_PLUS (word_mode, basereg,
3350 RTX_FRAME_RELATED_P (insn) = 1;
3353 /* Emit RTL to store REG at the memory location specified by BASE and then
3354 add MOD to BASE. MOD must be <= 8k. */
3357 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3359 rtx insn, basereg, srcreg, delta;
3361 gcc_assert (VAL_14_BITS_P (mod));
3363 basereg = gen_rtx_REG (Pmode, base);
3364 srcreg = gen_rtx_REG (word_mode, reg);
3365 delta = GEN_INT (mod);
3367 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3370 RTX_FRAME_RELATED_P (insn) = 1;
3372 /* RTX_FRAME_RELATED_P must be set on each frame related set
3373 in a parallel with more than one element. */
3374 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3375 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3379 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3380 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3381 whether to add a frame note or not.
3383 In the DISP > 8k case, we leave the high part of the address in %r1.
3384 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3387 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3391 if (VAL_14_BITS_P (disp))
3393 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3394 plus_constant (gen_rtx_REG (Pmode, base), disp));
3396 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3398 rtx basereg = gen_rtx_REG (Pmode, base);
3399 rtx delta = GEN_INT (disp);
3400 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3402 emit_move_insn (tmpreg, delta);
3403 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3404 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3407 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3408 gen_rtx_SET (VOIDmode, tmpreg,
3409 gen_rtx_PLUS (Pmode, basereg, delta)),
3414 rtx basereg = gen_rtx_REG (Pmode, base);
3415 rtx delta = GEN_INT (disp);
3416 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3418 emit_move_insn (tmpreg,
3419 gen_rtx_PLUS (Pmode, basereg,
3420 gen_rtx_HIGH (Pmode, delta)));
3421 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3422 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3425 if (DO_FRAME_NOTES && note)
3426 RTX_FRAME_RELATED_P (insn) = 1;
3430 compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3435 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3436 be consistent with the rounding and size calculation done here.
3437 Change them at the same time. */
3439 /* We do our own stack alignment. First, round the size of the
3440 stack locals up to a word boundary. */
3441 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3443 /* Space for previous frame pointer + filler. If any frame is
3444 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3445 waste some space here for the sake of HP compatibility. The
3446 first slot is only used when the frame pointer is needed. */
3447 if (size || frame_pointer_needed)
3448 size += STARTING_FRAME_OFFSET;
3450 /* If the current function calls __builtin_eh_return, then we need
3451 to allocate stack space for registers that will hold data for
3452 the exception handler. */
3453 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3457 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3459 size += i * UNITS_PER_WORD;
3462 /* Account for space used by the callee general register saves. */
3463 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3464 if (regs_ever_live[i])
3465 size += UNITS_PER_WORD;
3467 /* Account for space used by the callee floating point register saves. */
3468 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3469 if (regs_ever_live[i]
3470 || (!TARGET_64BIT && regs_ever_live[i + 1]))
3474 /* We always save both halves of the FP register, so always
3475 increment the frame size by 8 bytes. */
3479 /* If any of the floating registers are saved, account for the
3480 alignment needed for the floating point register save block. */
3483 size = (size + 7) & ~7;
3488 /* The various ABIs include space for the outgoing parameters in the
3489 size of the current function's stack frame. We don't need to align
3490 for the outgoing arguments as their alignment is set by the final
3491 rounding for the frame as a whole. */
3492 size += current_function_outgoing_args_size;
3494 /* Allocate space for the fixed frame marker. This space must be
3495 allocated for any function that makes calls or allocates
3497 if (!current_function_is_leaf || size)
3498 size += TARGET_64BIT ? 48 : 32;
3500 /* Finally, round to the preferred stack boundary. */
3501 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3502 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3505 /* Generate the assembly code for function entry. FILE is a stdio
3506 stream to output the code to. SIZE is an int: how many units of
3507 temporary storage to allocate.
3509 Refer to the array `regs_ever_live' to determine which registers to
3510 save; `regs_ever_live[I]' is nonzero if register number I is ever
3511 used in the function. This function is responsible for knowing
3512 which registers should not be saved even if used. */
3514 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3515 of memory. If any fpu reg is used in the function, we allocate
3516 such a block here, at the bottom of the frame, just in case it's needed.
3518 If this function is a leaf procedure, then we may choose not
3519 to do a "save" insn. The decision about whether or not
3520 to do this is made in regclass.c. */
3523 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3525 /* The function's label and associated .PROC must never be
3526 separated and must be output *after* any profiling declarations
3527 to avoid changing spaces/subspaces within a procedure. */
3528 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3529 fputs ("\t.PROC\n", file);
3531 /* hppa_expand_prologue does the dirty work now. We just need
3532 to output the assembler directives which denote the start
3534 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3535 if (regs_ever_live[2])
3536 fputs (",CALLS,SAVE_RP", file);
3538 fputs (",NO_CALLS", file);
3540 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3541 at the beginning of the frame and that it is used as the frame
3542 pointer for the frame. We do this because our current frame
3543 layout doesn't conform to that specified in the HP runtime
3544 documentation and we need a way to indicate to programs such as
3545 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3546 isn't used by HP compilers but is supported by the assembler.
3547 However, SAVE_SP is supposed to indicate that the previous stack
3548 pointer has been saved in the frame marker. */
3549 if (frame_pointer_needed)
3550 fputs (",SAVE_SP", file);
3552 /* Pass on information about the number of callee register saves
3553 performed in the prologue.
3555 The compiler is supposed to pass the highest register number
3556 saved, the assembler then has to adjust that number before
3557 entering it into the unwind descriptor (to account for any
3558 caller saved registers with lower register numbers than the
3559 first callee saved register). */
3561 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3564 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3566 fputs ("\n\t.ENTRY\n", file);
3568 remove_useless_addtr_insns (0);
3572 hppa_expand_prologue (void)
3574 int merge_sp_adjust_with_store = 0;
3575 HOST_WIDE_INT size = get_frame_size ();
3576 HOST_WIDE_INT offset;
3584 /* Compute total size for frame pointer, filler, locals and rounding to
3585 the next word boundary. Similar code appears in compute_frame_size
3586 and must be changed in tandem with this code. */
3587 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3588 if (local_fsize || frame_pointer_needed)
3589 local_fsize += STARTING_FRAME_OFFSET;
3591 actual_fsize = compute_frame_size (size, &save_fregs);
3593 /* Compute a few things we will use often. */
3594 tmpreg = gen_rtx_REG (word_mode, 1);
3596 /* Save RP first. The calling conventions manual states RP will
3597 always be stored into the caller's frame at sp - 20 or sp - 16
3598 depending on which ABI is in use. */
3599 if (regs_ever_live[2] || current_function_calls_eh_return)
3600 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3602 /* Allocate the local frame and set up the frame pointer if needed. */
3603 if (actual_fsize != 0)
3605 if (frame_pointer_needed)
3607 /* Copy the old frame pointer temporarily into %r1. Set up the
3608 new stack pointer, then store away the saved old frame pointer
3609 into the stack at sp and at the same time update the stack
3610 pointer by actual_fsize bytes. Two versions, first
3611 handles small (<8k) frames. The second handles large (>=8k)
3613 insn = emit_move_insn (tmpreg, frame_pointer_rtx);
3615 RTX_FRAME_RELATED_P (insn) = 1;
3617 insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
3619 RTX_FRAME_RELATED_P (insn) = 1;
3621 if (VAL_14_BITS_P (actual_fsize))
3622 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3625 /* It is incorrect to store the saved frame pointer at *sp,
3626 then increment sp (writes beyond the current stack boundary).
3628 So instead use stwm to store at *sp and post-increment the
3629 stack pointer as an atomic operation. Then increment sp to
3630 finish allocating the new frame. */
3631 HOST_WIDE_INT adjust1 = 8192 - 64;
3632 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3634 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3635 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3639 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3640 we need to store the previous stack pointer (frame pointer)
3641 into the frame marker on targets that use the HP unwind
3642 library. This allows the HP unwind library to be used to
3643 unwind GCC frames. However, we are not fully compatible
3644 with the HP library because our frame layout differs from
3645 that specified in the HP runtime specification.
3647 We don't want a frame note on this instruction as the frame
3648 marker moves during dynamic stack allocation.
3650 This instruction also serves as a blockage to prevent
3651 register spills from being scheduled before the stack
3652 pointer is raised. This is necessary as we store
3653 registers using the frame pointer as a base register,
3654 and the frame pointer is set before sp is raised. */
3655 if (TARGET_HPUX_UNWIND_LIBRARY)
3657 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3658 GEN_INT (TARGET_64BIT ? -8 : -4));
3660 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3664 emit_insn (gen_blockage ());
3666 /* no frame pointer needed. */
3669 /* In some cases we can perform the first callee register save
3670 and allocating the stack frame at the same time. If so, just
3671 make a note of it and defer allocating the frame until saving
3672 the callee registers. */
3673 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3674 merge_sp_adjust_with_store = 1;
3675 /* Can not optimize. Adjust the stack frame by actual_fsize
3678 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3683 /* Normal register save.
3685 Do not save the frame pointer in the frame_pointer_needed case. It
3686 was done earlier. */
3687 if (frame_pointer_needed)
3689 offset = local_fsize;
3691 /* Saving the EH return data registers in the frame is the simplest
3692 way to get the frame unwind information emitted. We put them
3693 just before the general registers. */
3694 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3696 unsigned int i, regno;
3700 regno = EH_RETURN_DATA_REGNO (i);
3701 if (regno == INVALID_REGNUM)
3704 store_reg (regno, offset, FRAME_POINTER_REGNUM);
3705 offset += UNITS_PER_WORD;
3709 for (i = 18; i >= 4; i--)
3710 if (regs_ever_live[i] && ! call_used_regs[i])
3712 store_reg (i, offset, FRAME_POINTER_REGNUM);
3713 offset += UNITS_PER_WORD;
3716 /* Account for %r3 which is saved in a special place. */
3719 /* No frame pointer needed. */
3722 offset = local_fsize - actual_fsize;
3724 /* Saving the EH return data registers in the frame is the simplest
3725 way to get the frame unwind information emitted. */
3726 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3728 unsigned int i, regno;
3732 regno = EH_RETURN_DATA_REGNO (i);
3733 if (regno == INVALID_REGNUM)
3736 /* If merge_sp_adjust_with_store is nonzero, then we can
3737 optimize the first save. */
3738 if (merge_sp_adjust_with_store)
3740 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3741 merge_sp_adjust_with_store = 0;
3744 store_reg (regno, offset, STACK_POINTER_REGNUM);
3745 offset += UNITS_PER_WORD;
3749 for (i = 18; i >= 3; i--)
3750 if (regs_ever_live[i] && ! call_used_regs[i])
3752 /* If merge_sp_adjust_with_store is nonzero, then we can
3753 optimize the first GR save. */
3754 if (merge_sp_adjust_with_store)
3756 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3757 merge_sp_adjust_with_store = 0;
3760 store_reg (i, offset, STACK_POINTER_REGNUM);
3761 offset += UNITS_PER_WORD;
3765 /* If we wanted to merge the SP adjustment with a GR save, but we never
3766 did any GR saves, then just emit the adjustment here. */
3767 if (merge_sp_adjust_with_store)
3768 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3772 /* The hppa calling conventions say that %r19, the pic offset
3773 register, is saved at sp - 32 (in this function's frame)
3774 when generating PIC code. FIXME: What is the correct thing
3775 to do for functions which make no calls and allocate no
3776 frame? Do we need to allocate a frame, or can we just omit
3777 the save? For now we'll just omit the save.
3779 We don't want a note on this insn as the frame marker can
3780 move if there is a dynamic stack allocation. */
3781 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
3783 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
3785 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
3789 /* Align pointer properly (doubleword boundary). */
3790 offset = (offset + 7) & ~7;
3792 /* Floating point register store. */
3797 /* First get the frame or stack pointer to the start of the FP register
3799 if (frame_pointer_needed)
3801 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
3802 base = frame_pointer_rtx;
3806 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
3807 base = stack_pointer_rtx;
3810 /* Now actually save the FP registers. */
3811 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3813 if (regs_ever_live[i]
3814 || (! TARGET_64BIT && regs_ever_live[i + 1]))
3816 rtx addr, insn, reg;
3817 addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
3818 reg = gen_rtx_REG (DFmode, i);
3819 insn = emit_move_insn (addr, reg);
3822 RTX_FRAME_RELATED_P (insn) = 1;
3825 rtx mem = gen_rtx_MEM (DFmode,
3826 plus_constant (base, offset));
3828 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3829 gen_rtx_SET (VOIDmode, mem, reg),
3834 rtx meml = gen_rtx_MEM (SFmode,
3835 plus_constant (base, offset));
3836 rtx memr = gen_rtx_MEM (SFmode,
3837 plus_constant (base, offset + 4));
3838 rtx regl = gen_rtx_REG (SFmode, i);
3839 rtx regr = gen_rtx_REG (SFmode, i + 1);
3840 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
3841 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
3844 RTX_FRAME_RELATED_P (setl) = 1;
3845 RTX_FRAME_RELATED_P (setr) = 1;
3846 vec = gen_rtvec (2, setl, setr);
3848 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3849 gen_rtx_SEQUENCE (VOIDmode, vec),
3853 offset += GET_MODE_SIZE (DFmode);
3860 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
3861 Handle case where DISP > 8k by using the add_high_const patterns. */
3864 load_reg (int reg, HOST_WIDE_INT disp, int base)
3866 rtx dest = gen_rtx_REG (word_mode, reg);
3867 rtx basereg = gen_rtx_REG (Pmode, base);
3870 if (VAL_14_BITS_P (disp))
3871 src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3872 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3874 rtx delta = GEN_INT (disp);
3875 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3877 emit_move_insn (tmpreg, delta);
3878 if (TARGET_DISABLE_INDEXING)
3880 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3881 src = gen_rtx_MEM (word_mode, tmpreg);
3884 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3888 rtx delta = GEN_INT (disp);
3889 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3890 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3892 emit_move_insn (tmpreg, high);
3893 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3896 emit_move_insn (dest, src);
3899 /* Update the total code bytes output to the text section. */
3902 update_total_code_bytes (int nbytes)
3904 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
3905 && !IN_NAMED_SECTION_P (cfun->decl))
3907 if (INSN_ADDRESSES_SET_P ())
3909 unsigned long old_total = total_code_bytes;
3911 total_code_bytes += nbytes;
3913 /* Be prepared to handle overflows. */
3914 if (old_total > total_code_bytes)
3915 total_code_bytes = -1;
3918 total_code_bytes = -1;
3922 /* This function generates the assembly code for function exit.
3923 Args are as for output_function_prologue ().
3925 The function epilogue should not depend on the current stack
3926 pointer! It should use the frame pointer only. This is mandatory
3927 because of alloca; we also take advantage of it to omit stack
3928 adjustments before returning. */
3931 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3933 rtx insn = get_last_insn ();
3937 /* hppa_expand_epilogue does the dirty work now. We just need
3938 to output the assembler directives which denote the end
3941 To make debuggers happy, emit a nop if the epilogue was completely
3942 eliminated due to a volatile call as the last insn in the
3943 current function. That way the return address (in %r2) will
3944 always point to a valid instruction in the current function. */
3946 /* Get the last real insn. */
3947 if (GET_CODE (insn) == NOTE)
3948 insn = prev_real_insn (insn);
3950 /* If it is a sequence, then look inside. */
3951 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
3952 insn = XVECEXP (PATTERN (insn), 0, 0);
3954 /* If insn is a CALL_INSN, then it must be a call to a volatile
3955 function (otherwise there would be epilogue insns). */
3956 if (insn && GET_CODE (insn) == CALL_INSN)
3958 fputs ("\tnop\n", file);
3962 fputs ("\t.EXIT\n\t.PROCEND\n", file);
3964 if (TARGET_SOM && TARGET_GAS)
3966 /* We done with this subspace except possibly for some additional
3967 debug information. Forget that we are in this subspace to ensure
3968 that the next function is output in its own subspace. */
3970 cfun->machine->in_nsubspa = 2;
3973 if (INSN_ADDRESSES_SET_P ())
3975 insn = get_last_nonnote_insn ();
3976 last_address += INSN_ADDRESSES (INSN_UID (insn));
3978 last_address += insn_default_length (insn);
3979 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
3980 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
3983 /* Finally, update the total number of code bytes output so far. */
3984 update_total_code_bytes (last_address);
3988 hppa_expand_epilogue (void)
3991 HOST_WIDE_INT offset;
3992 HOST_WIDE_INT ret_off = 0;
3994 int merge_sp_adjust_with_load = 0;
3996 /* We will use this often. */
3997 tmpreg = gen_rtx_REG (word_mode, 1);
3999 /* Try to restore RP early to avoid load/use interlocks when
4000 RP gets used in the return (bv) instruction. This appears to still
4001 be necessary even when we schedule the prologue and epilogue. */
4002 if (regs_ever_live [2] || current_function_calls_eh_return)
4004 ret_off = TARGET_64BIT ? -16 : -20;
4005 if (frame_pointer_needed)
4007 load_reg (2, ret_off, FRAME_POINTER_REGNUM);
4012 /* No frame pointer, and stack is smaller than 8k. */
4013 if (VAL_14_BITS_P (ret_off - actual_fsize))
4015 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4021 /* General register restores. */
4022 if (frame_pointer_needed)
4024 offset = local_fsize;
4026 /* If the current function calls __builtin_eh_return, then we need
4027 to restore the saved EH data registers. */
4028 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4030 unsigned int i, regno;
4034 regno = EH_RETURN_DATA_REGNO (i);
4035 if (regno == INVALID_REGNUM)
4038 load_reg (regno, offset, FRAME_POINTER_REGNUM);
4039 offset += UNITS_PER_WORD;
4043 for (i = 18; i >= 4; i--)
4044 if (regs_ever_live[i] && ! call_used_regs[i])
4046 load_reg (i, offset, FRAME_POINTER_REGNUM);
4047 offset += UNITS_PER_WORD;
4052 offset = local_fsize - actual_fsize;
4054 /* If the current function calls __builtin_eh_return, then we need
4055 to restore the saved EH data registers. */
4056 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4058 unsigned int i, regno;
4062 regno = EH_RETURN_DATA_REGNO (i);
4063 if (regno == INVALID_REGNUM)
4066 /* Only for the first load.
4067 merge_sp_adjust_with_load holds the register load
4068 with which we will merge the sp adjustment. */
4069 if (merge_sp_adjust_with_load == 0
4071 && VAL_14_BITS_P (-actual_fsize))
4072 merge_sp_adjust_with_load = regno;
4074 load_reg (regno, offset, STACK_POINTER_REGNUM);
4075 offset += UNITS_PER_WORD;
4079 for (i = 18; i >= 3; i--)
4081 if (regs_ever_live[i] && ! call_used_regs[i])
4083 /* Only for the first load.
4084 merge_sp_adjust_with_load holds the register load
4085 with which we will merge the sp adjustment. */
4086 if (merge_sp_adjust_with_load == 0
4088 && VAL_14_BITS_P (-actual_fsize))
4089 merge_sp_adjust_with_load = i;
4091 load_reg (i, offset, STACK_POINTER_REGNUM);
4092 offset += UNITS_PER_WORD;
4097 /* Align pointer properly (doubleword boundary). */
4098 offset = (offset + 7) & ~7;
4100 /* FP register restores. */
4103 /* Adjust the register to index off of. */
4104 if (frame_pointer_needed)
4105 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
4107 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4109 /* Actually do the restores now. */
4110 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4111 if (regs_ever_live[i]
4112 || (! TARGET_64BIT && regs_ever_live[i + 1]))
4114 rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4115 rtx dest = gen_rtx_REG (DFmode, i);
4116 emit_move_insn (dest, src);
4120 /* Emit a blockage insn here to keep these insns from being moved to
4121 an earlier spot in the epilogue, or into the main instruction stream.
4123 This is necessary as we must not cut the stack back before all the
4124 restores are finished. */
4125 emit_insn (gen_blockage ());
4127 /* Reset stack pointer (and possibly frame pointer). The stack
4128 pointer is initially set to fp + 64 to avoid a race condition. */
4129 if (frame_pointer_needed)
4131 rtx delta = GEN_INT (-64);
4133 set_reg_plus_d (STACK_POINTER_REGNUM, FRAME_POINTER_REGNUM, 64, 0);
4134 emit_insn (gen_pre_load (frame_pointer_rtx, stack_pointer_rtx, delta));
4136 /* If we were deferring a callee register restore, do it now. */
4137 else if (merge_sp_adjust_with_load)
4139 rtx delta = GEN_INT (-actual_fsize);
4140 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4142 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4144 else if (actual_fsize != 0)
4145 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4148 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4149 frame greater than 8k), do so now. */
4151 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4153 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4155 rtx sa = EH_RETURN_STACKADJ_RTX;
4157 emit_insn (gen_blockage ());
4158 emit_insn (TARGET_64BIT
4159 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4160 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4165 hppa_pic_save_rtx (void)
4167 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4170 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4171 #define NO_DEFERRED_PROFILE_COUNTERS 0
4175 /* Vector of funcdef numbers. */
4176 static VEC(int,heap) *funcdef_nos;
4178 /* Output deferred profile counters. */
4180 output_deferred_profile_counters (void)
4185 if (VEC_empty (int, funcdef_nos))
4188 switch_to_section (data_section);
4189 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4190 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4192 for (i = 0; VEC_iterate (int, funcdef_nos, i, n); i++)
4194 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4195 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4198 VEC_free (int, heap, funcdef_nos);
4202 hppa_profile_hook (int label_no)
4204 /* We use SImode for the address of the function in both 32 and
4205 64-bit code to avoid having to provide DImode versions of the
4206 lcla2 and load_offset_label_address insn patterns. */
4207 rtx reg = gen_reg_rtx (SImode);
4208 rtx label_rtx = gen_label_rtx ();
4209 rtx begin_label_rtx, call_insn;
4210 char begin_label_name[16];
4212 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4214 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4217 emit_move_insn (arg_pointer_rtx,
4218 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4221 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4223 /* The address of the function is loaded into %r25 with a instruction-
4224 relative sequence that avoids the use of relocations. The sequence
4225 is split so that the load_offset_label_address instruction can
4226 occupy the delay slot of the call to _mcount. */
4228 emit_insn (gen_lcla2 (reg, label_rtx));
4230 emit_insn (gen_lcla1 (reg, label_rtx));
4232 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4233 reg, begin_label_rtx, label_rtx));
4235 #if !NO_DEFERRED_PROFILE_COUNTERS
4237 rtx count_label_rtx, addr, r24;
4238 char count_label_name[16];
4240 VEC_safe_push (int, heap, funcdef_nos, label_no);
4241 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4242 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4244 addr = force_reg (Pmode, count_label_rtx);
4245 r24 = gen_rtx_REG (Pmode, 24);
4246 emit_move_insn (r24, addr);
4249 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4250 gen_rtx_SYMBOL_REF (Pmode,
4252 GEN_INT (TARGET_64BIT ? 24 : 12)));
4254 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4259 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4260 gen_rtx_SYMBOL_REF (Pmode,
4262 GEN_INT (TARGET_64BIT ? 16 : 8)));
4266 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4267 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4269 /* Indicate the _mcount call cannot throw, nor will it execute a
4271 REG_NOTES (call_insn)
4272 = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx, REG_NOTES (call_insn));
4275 /* Fetch the return address for the frame COUNT steps up from
4276 the current frame, after the prologue. FRAMEADDR is the
4277 frame pointer of the COUNT frame.
4279 We want to ignore any export stub remnants here. To handle this,
4280 we examine the code at the return address, and if it is an export
4281 stub, we return a memory rtx for the stub return address stored
4284 The value returned is used in two different ways:
4286 1. To find a function's caller.
4288 2. To change the return address for a function.
4290 This function handles most instances of case 1; however, it will
4291 fail if there are two levels of stubs to execute on the return
4292 path. The only way I believe that can happen is if the return value
4293 needs a parameter relocation, which never happens for C code.
4295 This function handles most instances of case 2; however, it will
4296 fail if we did not originally have stub code on the return path
4297 but will need stub code on the new return path. This can happen if
4298 the caller & callee are both in the main program, but the new
4299 return location is in a shared library. */
4302 return_addr_rtx (int count, rtx frameaddr)
4312 rp = get_hard_reg_initial_val (Pmode, 2);
4314 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4317 saved_rp = gen_reg_rtx (Pmode);
4318 emit_move_insn (saved_rp, rp);
4320 /* Get pointer to the instruction stream. We have to mask out the
4321 privilege level from the two low order bits of the return address
4322 pointer here so that ins will point to the start of the first
4323 instruction that would have been executed if we returned. */
4324 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4325 label = gen_label_rtx ();
4327 /* Check the instruction stream at the normal return address for the
4330 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4331 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4332 0x00011820 | stub+16: mtsp r1,sr0
4333 0xe0400002 | stub+20: be,n 0(sr0,rp)
4335 If it is an export stub, than our return address is really in
4338 emit_cmp_insn (gen_rtx_MEM (SImode, ins), GEN_INT (0x4bc23fd1), NE,
4339 NULL_RTX, SImode, 1);
4340 emit_jump_insn (gen_bne (label));
4342 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 4)),
4343 GEN_INT (0x004010a1), NE, NULL_RTX, SImode, 1);
4344 emit_jump_insn (gen_bne (label));
4346 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 8)),
4347 GEN_INT (0x00011820), NE, NULL_RTX, SImode, 1);
4348 emit_jump_insn (gen_bne (label));
4350 /* 0xe0400002 must be specified as -532676606 so that it won't be
4351 rejected as an invalid immediate operand on 64-bit hosts. */
4352 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 12)),
4353 GEN_INT (-532676606), NE, NULL_RTX, SImode, 1);
4355 /* If there is no export stub then just use the value saved from
4356 the return pointer register. */
4358 emit_jump_insn (gen_bne (label));
4360 /* Here we know that our return address points to an export
4361 stub. We don't want to return the address of the export stub,
4362 but rather the return address of the export stub. That return
4363 address is stored at -24[frameaddr]. */
4365 emit_move_insn (saved_rp,
4367 memory_address (Pmode,
4368 plus_constant (frameaddr,
4375 /* This is only valid once reload has completed because it depends on
4376 knowing exactly how much (if any) frame there is and...
4378 It's only valid if there is no frame marker to de-allocate and...
4380 It's only valid if %r2 hasn't been saved into the caller's frame
4381 (we're not profiling and %r2 isn't live anywhere). */
4383 hppa_can_use_return_insn_p (void)
4385 return (reload_completed
4386 && (compute_frame_size (get_frame_size (), 0) ? 0 : 1)
4387 && ! regs_ever_live[2]
4388 && ! frame_pointer_needed);
4392 emit_bcond_fp (enum rtx_code code, rtx operand0)
4394 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4395 gen_rtx_IF_THEN_ELSE (VOIDmode,
4396 gen_rtx_fmt_ee (code,
4398 gen_rtx_REG (CCFPmode, 0),
4400 gen_rtx_LABEL_REF (VOIDmode, operand0),
4406 gen_cmp_fp (enum rtx_code code, rtx operand0, rtx operand1)
4408 return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4409 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1));
4412 /* Adjust the cost of a scheduling dependency. Return the new cost of
4413 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4416 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4418 enum attr_type attr_type;
4420 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4421 true dependencies as they are described with bypasses now. */
4422 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4425 if (! recog_memoized (insn))
4428 attr_type = get_attr_type (insn);
4430 switch (REG_NOTE_KIND (link))
4433 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4436 if (attr_type == TYPE_FPLOAD)
4438 rtx pat = PATTERN (insn);
4439 rtx dep_pat = PATTERN (dep_insn);
4440 if (GET_CODE (pat) == PARALLEL)
4442 /* This happens for the fldXs,mb patterns. */
4443 pat = XVECEXP (pat, 0, 0);
4445 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4446 /* If this happens, we have to extend this to schedule
4447 optimally. Return 0 for now. */
4450 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4452 if (! recog_memoized (dep_insn))
4454 switch (get_attr_type (dep_insn))
4461 case TYPE_FPSQRTSGL:
4462 case TYPE_FPSQRTDBL:
4463 /* A fpload can't be issued until one cycle before a
4464 preceding arithmetic operation has finished if
4465 the target of the fpload is any of the sources
4466 (or destination) of the arithmetic operation. */
4467 return insn_default_latency (dep_insn) - 1;
4474 else if (attr_type == TYPE_FPALU)
4476 rtx pat = PATTERN (insn);
4477 rtx dep_pat = PATTERN (dep_insn);
4478 if (GET_CODE (pat) == PARALLEL)
4480 /* This happens for the fldXs,mb patterns. */
4481 pat = XVECEXP (pat, 0, 0);
4483 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4484 /* If this happens, we have to extend this to schedule
4485 optimally. Return 0 for now. */
4488 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4490 if (! recog_memoized (dep_insn))
4492 switch (get_attr_type (dep_insn))
4496 case TYPE_FPSQRTSGL:
4497 case TYPE_FPSQRTDBL:
4498 /* An ALU flop can't be issued until two cycles before a
4499 preceding divide or sqrt operation has finished if
4500 the target of the ALU flop is any of the sources
4501 (or destination) of the divide or sqrt operation. */
4502 return insn_default_latency (dep_insn) - 2;
4510 /* For other anti dependencies, the cost is 0. */
4513 case REG_DEP_OUTPUT:
4514 /* Output dependency; DEP_INSN writes a register that INSN writes some
4516 if (attr_type == TYPE_FPLOAD)
4518 rtx pat = PATTERN (insn);
4519 rtx dep_pat = PATTERN (dep_insn);
4520 if (GET_CODE (pat) == PARALLEL)
4522 /* This happens for the fldXs,mb patterns. */
4523 pat = XVECEXP (pat, 0, 0);
4525 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4526 /* If this happens, we have to extend this to schedule
4527 optimally. Return 0 for now. */
4530 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4532 if (! recog_memoized (dep_insn))
4534 switch (get_attr_type (dep_insn))
4541 case TYPE_FPSQRTSGL:
4542 case TYPE_FPSQRTDBL:
4543 /* A fpload can't be issued until one cycle before a
4544 preceding arithmetic operation has finished if
4545 the target of the fpload is the destination of the
4546 arithmetic operation.
4548 Exception: For PA7100LC, PA7200 and PA7300, the cost
4549 is 3 cycles, unless they bundle together. We also
4550 pay the penalty if the second insn is a fpload. */
4551 return insn_default_latency (dep_insn) - 1;
4558 else if (attr_type == TYPE_FPALU)
4560 rtx pat = PATTERN (insn);
4561 rtx dep_pat = PATTERN (dep_insn);
4562 if (GET_CODE (pat) == PARALLEL)
4564 /* This happens for the fldXs,mb patterns. */
4565 pat = XVECEXP (pat, 0, 0);
4567 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4568 /* If this happens, we have to extend this to schedule
4569 optimally. Return 0 for now. */
4572 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4574 if (! recog_memoized (dep_insn))
4576 switch (get_attr_type (dep_insn))
4580 case TYPE_FPSQRTSGL:
4581 case TYPE_FPSQRTDBL:
4582 /* An ALU flop can't be issued until two cycles before a
4583 preceding divide or sqrt operation has finished if
4584 the target of the ALU flop is also the target of
4585 the divide or sqrt operation. */
4586 return insn_default_latency (dep_insn) - 2;
4594 /* For other output dependencies, the cost is 0. */
4602 /* Adjust scheduling priorities. We use this to try and keep addil
4603 and the next use of %r1 close together. */
4605 pa_adjust_priority (rtx insn, int priority)
4607 rtx set = single_set (insn);
4611 src = SET_SRC (set);
4612 dest = SET_DEST (set);
4613 if (GET_CODE (src) == LO_SUM
4614 && symbolic_operand (XEXP (src, 1), VOIDmode)
4615 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4618 else if (GET_CODE (src) == MEM
4619 && GET_CODE (XEXP (src, 0)) == LO_SUM
4620 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4621 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4624 else if (GET_CODE (dest) == MEM
4625 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4626 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4627 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4633 /* The 700 can only issue a single insn at a time.
4634 The 7XXX processors can issue two insns at a time.
4635 The 8000 can issue 4 insns at a time. */
4637 pa_issue_rate (void)
4641 case PROCESSOR_700: return 1;
4642 case PROCESSOR_7100: return 2;
4643 case PROCESSOR_7100LC: return 2;
4644 case PROCESSOR_7200: return 2;
4645 case PROCESSOR_7300: return 2;
4646 case PROCESSOR_8000: return 4;
4655 /* Return any length adjustment needed by INSN which already has its length
4656 computed as LENGTH. Return zero if no adjustment is necessary.
4658 For the PA: function calls, millicode calls, and backwards short
4659 conditional branches with unfilled delay slots need an adjustment by +1
4660 (to account for the NOP which will be inserted into the instruction stream).
4662 Also compute the length of an inline block move here as it is too
4663 complicated to express as a length attribute in pa.md. */
4665 pa_adjust_insn_length (rtx insn, int length)
4667 rtx pat = PATTERN (insn);
4669 /* Jumps inside switch tables which have unfilled delay slots need
4671 if (GET_CODE (insn) == JUMP_INSN
4672 && GET_CODE (pat) == PARALLEL
4673 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4675 /* Millicode insn with an unfilled delay slot. */
4676 else if (GET_CODE (insn) == INSN
4677 && GET_CODE (pat) != SEQUENCE
4678 && GET_CODE (pat) != USE
4679 && GET_CODE (pat) != CLOBBER
4680 && get_attr_type (insn) == TYPE_MILLI)
4682 /* Block move pattern. */
4683 else if (GET_CODE (insn) == INSN
4684 && GET_CODE (pat) == PARALLEL
4685 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4686 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4687 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4688 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4689 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4690 return compute_movmem_length (insn) - 4;
4691 /* Block clear pattern. */
4692 else if (GET_CODE (insn) == INSN
4693 && GET_CODE (pat) == PARALLEL
4694 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4695 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4696 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4697 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4698 return compute_clrmem_length (insn) - 4;
4699 /* Conditional branch with an unfilled delay slot. */
4700 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4702 /* Adjust a short backwards conditional with an unfilled delay slot. */
4703 if (GET_CODE (pat) == SET
4705 && ! forward_branch_p (insn))
4707 else if (GET_CODE (pat) == PARALLEL
4708 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4711 /* Adjust dbra insn with short backwards conditional branch with
4712 unfilled delay slot -- only for case where counter is in a
4713 general register register. */
4714 else if (GET_CODE (pat) == PARALLEL
4715 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4716 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4717 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4719 && ! forward_branch_p (insn))
4727 /* Print operand X (an rtx) in assembler syntax to file FILE.
4728 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4729 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4732 print_operand (FILE *file, rtx x, int code)
4737 /* Output a 'nop' if there's nothing for the delay slot. */
4738 if (dbr_sequence_length () == 0)
4739 fputs ("\n\tnop", file);
4742 /* Output a nullification completer if there's nothing for the */
4743 /* delay slot or nullification is requested. */
4744 if (dbr_sequence_length () == 0 ||
4746 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
4750 /* Print out the second register name of a register pair.
4751 I.e., R (6) => 7. */
4752 fputs (reg_names[REGNO (x) + 1], file);
4755 /* A register or zero. */
4757 || (x == CONST0_RTX (DFmode))
4758 || (x == CONST0_RTX (SFmode)))
4760 fputs ("%r0", file);
4766 /* A register or zero (floating point). */
4768 || (x == CONST0_RTX (DFmode))
4769 || (x == CONST0_RTX (SFmode)))
4771 fputs ("%fr0", file);
4780 xoperands[0] = XEXP (XEXP (x, 0), 0);
4781 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
4782 output_global_address (file, xoperands[1], 0);
4783 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
4787 case 'C': /* Plain (C)ondition */
4789 switch (GET_CODE (x))
4792 fputs ("=", file); break;
4794 fputs ("<>", file); break;
4796 fputs (">", file); break;
4798 fputs (">=", file); break;
4800 fputs (">>=", file); break;
4802 fputs (">>", file); break;
4804 fputs ("<", file); break;
4806 fputs ("<=", file); break;
4808 fputs ("<<=", file); break;
4810 fputs ("<<", file); break;
4815 case 'N': /* Condition, (N)egated */
4816 switch (GET_CODE (x))
4819 fputs ("<>", file); break;
4821 fputs ("=", file); break;
4823 fputs ("<=", file); break;
4825 fputs ("<", file); break;
4827 fputs ("<<", file); break;
4829 fputs ("<<=", file); break;
4831 fputs (">=", file); break;
4833 fputs (">", file); break;
4835 fputs (">>", file); break;
4837 fputs (">>=", file); break;
4842 /* For floating point comparisons. Note that the output
4843 predicates are the complement of the desired mode. The
4844 conditions for GT, GE, LT, LE and LTGT cause an invalid
4845 operation exception if the result is unordered and this
4846 exception is enabled in the floating-point status register. */
4848 switch (GET_CODE (x))
4851 fputs ("!=", file); break;
4853 fputs ("=", file); break;
4855 fputs ("!>", file); break;
4857 fputs ("!>=", file); break;
4859 fputs ("!<", file); break;
4861 fputs ("!<=", file); break;
4863 fputs ("!<>", file); break;
4865 fputs ("!?<=", file); break;
4867 fputs ("!?<", file); break;
4869 fputs ("!?>=", file); break;
4871 fputs ("!?>", file); break;
4873 fputs ("!?=", file); break;
4875 fputs ("!?", file); break;
4877 fputs ("?", file); break;
4882 case 'S': /* Condition, operands are (S)wapped. */
4883 switch (GET_CODE (x))
4886 fputs ("=", file); break;
4888 fputs ("<>", file); break;
4890 fputs ("<", file); break;
4892 fputs ("<=", file); break;
4894 fputs ("<<=", file); break;
4896 fputs ("<<", file); break;
4898 fputs (">", file); break;
4900 fputs (">=", file); break;
4902 fputs (">>=", file); break;
4904 fputs (">>", file); break;
4909 case 'B': /* Condition, (B)oth swapped and negate. */
4910 switch (GET_CODE (x))
4913 fputs ("<>", file); break;
4915 fputs ("=", file); break;
4917 fputs (">=", file); break;
4919 fputs (">", file); break;
4921 fputs (">>", file); break;
4923 fputs (">>=", file); break;
4925 fputs ("<=", file); break;
4927 fputs ("<", file); break;
4929 fputs ("<<", file); break;
4931 fputs ("<<=", file); break;
4937 gcc_assert (GET_CODE (x) == CONST_INT);
4938 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
4941 gcc_assert (GET_CODE (x) == CONST_INT);
4942 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
4945 gcc_assert (GET_CODE (x) == CONST_INT);
4946 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
4949 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
4950 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4953 gcc_assert (GET_CODE (x) == CONST_INT);
4954 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
4957 gcc_assert (GET_CODE (x) == CONST_INT);
4958 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
4961 if (GET_CODE (x) == CONST_INT)
4966 switch (GET_CODE (XEXP (x, 0)))
4970 if (ASSEMBLER_DIALECT == 0)
4971 fputs ("s,mb", file);
4973 fputs (",mb", file);
4977 if (ASSEMBLER_DIALECT == 0)
4978 fputs ("s,ma", file);
4980 fputs (",ma", file);
4983 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
4984 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
4986 if (ASSEMBLER_DIALECT == 0)
4989 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4990 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
4992 if (ASSEMBLER_DIALECT == 0)
4993 fputs ("x,s", file);
4997 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5001 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5007 output_global_address (file, x, 0);
5010 output_global_address (file, x, 1);
5012 case 0: /* Don't do anything special */
5017 compute_zdepwi_operands (INTVAL (x), op);
5018 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5024 compute_zdepdi_operands (INTVAL (x), op);
5025 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5029 /* We can get here from a .vtable_inherit due to our
5030 CONSTANT_ADDRESS_P rejecting perfectly good constant
5036 if (GET_CODE (x) == REG)
5038 fputs (reg_names [REGNO (x)], file);
5039 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5045 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5046 && (REGNO (x) & 1) == 0)
5049 else if (GET_CODE (x) == MEM)
5051 int size = GET_MODE_SIZE (GET_MODE (x));
5052 rtx base = NULL_RTX;
5053 switch (GET_CODE (XEXP (x, 0)))
5057 base = XEXP (XEXP (x, 0), 0);
5058 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5062 base = XEXP (XEXP (x, 0), 0);
5063 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5066 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5067 fprintf (file, "%s(%s)",
5068 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5069 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5070 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5071 fprintf (file, "%s(%s)",
5072 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5073 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5074 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5075 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5077 /* Because the REG_POINTER flag can get lost during reload,
5078 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5079 index and base registers in the combined move patterns. */
5080 rtx base = XEXP (XEXP (x, 0), 1);
5081 rtx index = XEXP (XEXP (x, 0), 0);
5083 fprintf (file, "%s(%s)",
5084 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5087 output_address (XEXP (x, 0));
5090 output_address (XEXP (x, 0));
5095 output_addr_const (file, x);
5098 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5101 output_global_address (FILE *file, rtx x, int round_constant)
5104 /* Imagine (high (const (plus ...))). */
5105 if (GET_CODE (x) == HIGH)
5108 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5109 output_addr_const (file, x);
5110 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5112 output_addr_const (file, x);
5113 fputs ("-$global$", file);
5115 else if (GET_CODE (x) == CONST)
5117 const char *sep = "";
5118 int offset = 0; /* assembler wants -$global$ at end */
5119 rtx base = NULL_RTX;
5121 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5124 base = XEXP (XEXP (x, 0), 0);
5125 output_addr_const (file, base);
5128 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5134 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5137 base = XEXP (XEXP (x, 0), 1);
5138 output_addr_const (file, base);
5141 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5147 /* How bogus. The compiler is apparently responsible for
5148 rounding the constant if it uses an LR field selector.
5150 The linker and/or assembler seem a better place since
5151 they have to do this kind of thing already.
5153 If we fail to do this, HP's optimizing linker may eliminate
5154 an addil, but not update the ldw/stw/ldo instruction that
5155 uses the result of the addil. */
5157 offset = ((offset + 0x1000) & ~0x1fff);
5159 switch (GET_CODE (XEXP (x, 0)))
5172 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5180 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5181 fputs ("-$global$", file);
5183 fprintf (file, "%s%d", sep, offset);
5186 output_addr_const (file, x);
5189 /* Output boilerplate text to appear at the beginning of the file.
5190 There are several possible versions. */
5191 #define aputs(x) fputs(x, asm_out_file)
5193 pa_file_start_level (void)
5196 aputs ("\t.LEVEL 2.0w\n");
5197 else if (TARGET_PA_20)
5198 aputs ("\t.LEVEL 2.0\n");
5199 else if (TARGET_PA_11)
5200 aputs ("\t.LEVEL 1.1\n");
5202 aputs ("\t.LEVEL 1.0\n");
5206 pa_file_start_space (int sortspace)
5208 aputs ("\t.SPACE $PRIVATE$");
5211 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5212 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5213 "\n\t.SPACE $TEXT$");
5216 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5217 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5221 pa_file_start_file (int want_version)
5223 if (write_symbols != NO_DEBUG)
5225 output_file_directive (asm_out_file, main_input_filename);
5227 aputs ("\t.version\t\"01.01\"\n");
5232 pa_file_start_mcount (const char *aswhat)
5235 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5239 pa_elf_file_start (void)
5241 pa_file_start_level ();
5242 pa_file_start_mcount ("ENTRY");
5243 pa_file_start_file (0);
5247 pa_som_file_start (void)
5249 pa_file_start_level ();
5250 pa_file_start_space (0);
5251 aputs ("\t.IMPORT $global$,DATA\n"
5252 "\t.IMPORT $$dyncall,MILLICODE\n");
5253 pa_file_start_mcount ("CODE");
5254 pa_file_start_file (0);
5258 pa_linux_file_start (void)
5260 pa_file_start_file (1);
5261 pa_file_start_level ();
5262 pa_file_start_mcount ("CODE");
5266 pa_hpux64_gas_file_start (void)
5268 pa_file_start_level ();
5269 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5271 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5273 pa_file_start_file (1);
5277 pa_hpux64_hpas_file_start (void)
5279 pa_file_start_level ();
5280 pa_file_start_space (1);
5281 pa_file_start_mcount ("CODE");
5282 pa_file_start_file (0);
5286 /* Search the deferred plabel list for SYMBOL and return its internal
5287 label. If an entry for SYMBOL is not found, a new entry is created. */
5290 get_deferred_plabel (rtx symbol)
5292 const char *fname = XSTR (symbol, 0);
5295 /* See if we have already put this function on the list of deferred
5296 plabels. This list is generally small, so a liner search is not
5297 too ugly. If it proves too slow replace it with something faster. */
5298 for (i = 0; i < n_deferred_plabels; i++)
5299 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5302 /* If the deferred plabel list is empty, or this entry was not found
5303 on the list, create a new entry on the list. */
5304 if (deferred_plabels == NULL || i == n_deferred_plabels)
5308 if (deferred_plabels == 0)
5309 deferred_plabels = (struct deferred_plabel *)
5310 ggc_alloc (sizeof (struct deferred_plabel));
5312 deferred_plabels = (struct deferred_plabel *)
5313 ggc_realloc (deferred_plabels,
5314 ((n_deferred_plabels + 1)
5315 * sizeof (struct deferred_plabel)));
5317 i = n_deferred_plabels++;
5318 deferred_plabels[i].internal_label = gen_label_rtx ();
5319 deferred_plabels[i].symbol = symbol;
5321 /* Gross. We have just implicitly taken the address of this
5322 function. Mark it in the same manner as assemble_name. */
5323 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5325 mark_referenced (id);
5328 return deferred_plabels[i].internal_label;
5332 output_deferred_plabels (void)
5336 /* If we have some deferred plabels, then we need to switch into the
5337 data or readonly data section, and align it to a 4 byte boundary
5338 before outputting the deferred plabels. */
5339 if (n_deferred_plabels)
5341 switch_to_section (flag_pic ? data_section : readonly_data_section);
5342 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5345 /* Now output the deferred plabels. */
5346 for (i = 0; i < n_deferred_plabels; i++)
5348 targetm.asm_out.internal_label (asm_out_file, "L",
5349 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5350 assemble_integer (deferred_plabels[i].symbol,
5351 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5355 #ifdef HPUX_LONG_DOUBLE_LIBRARY
5356 /* Initialize optabs to point to HPUX long double emulation routines. */
5358 pa_hpux_init_libfuncs (void)
5360 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5361 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5362 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5363 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5364 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5365 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5366 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5367 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5368 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5370 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5371 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5372 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5373 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5374 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5375 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5376 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5378 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5379 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5380 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5381 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5383 set_conv_libfunc (sfix_optab, SImode, TFmode, TARGET_64BIT
5384 ? "__U_Qfcnvfxt_quad_to_sgl"
5385 : "_U_Qfcnvfxt_quad_to_sgl");
5386 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
5387 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_usgl");
5388 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_udbl");
5390 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
5391 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
5392 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_U_Qfcnvxf_usgl_to_quad");
5393 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxf_udbl_to_quad");
5397 /* HP's millicode routines mean something special to the assembler.
5398 Keep track of which ones we have used. */
5400 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5401 static void import_milli (enum millicodes);
5402 static char imported[(int) end1000];
5403 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5404 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5405 #define MILLI_START 10
5408 import_milli (enum millicodes code)
5410 char str[sizeof (import_string)];
5412 if (!imported[(int) code])
5414 imported[(int) code] = 1;
5415 strcpy (str, import_string);
5416 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5417 output_asm_insn (str, 0);
5421 /* The register constraints have put the operands and return value in
5422 the proper registers. */
5425 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5427 import_milli (mulI);
5428 return output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5431 /* Emit the rtl for doing a division by a constant. */
5433 /* Do magic division millicodes exist for this value? */
5434 const int magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5436 /* We'll use an array to keep track of the magic millicodes and
5437 whether or not we've used them already. [n][0] is signed, [n][1] is
5440 static int div_milli[16][2];
5443 emit_hpdiv_const (rtx *operands, int unsignedp)
5445 if (GET_CODE (operands[2]) == CONST_INT
5446 && INTVAL (operands[2]) > 0
5447 && INTVAL (operands[2]) < 16
5448 && magic_milli[INTVAL (operands[2])])
5450 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5452 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5456 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5457 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5459 gen_rtx_REG (SImode, 26),
5461 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5462 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5463 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5464 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5465 gen_rtx_CLOBBER (VOIDmode, ret))));
5466 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5473 output_div_insn (rtx *operands, int unsignedp, rtx insn)
5477 /* If the divisor is a constant, try to use one of the special
5479 if (GET_CODE (operands[0]) == CONST_INT)
5481 static char buf[100];
5482 divisor = INTVAL (operands[0]);
5483 if (!div_milli[divisor][unsignedp])
5485 div_milli[divisor][unsignedp] = 1;
5487 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5489 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5493 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5494 INTVAL (operands[0]));
5495 return output_millicode_call (insn,
5496 gen_rtx_SYMBOL_REF (SImode, buf));
5500 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5501 INTVAL (operands[0]));
5502 return output_millicode_call (insn,
5503 gen_rtx_SYMBOL_REF (SImode, buf));
5506 /* Divisor isn't a special constant. */
5511 import_milli (divU);
5512 return output_millicode_call (insn,
5513 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5517 import_milli (divI);
5518 return output_millicode_call (insn,
5519 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5524 /* Output a $$rem millicode to do mod. */
5527 output_mod_insn (int unsignedp, rtx insn)
5531 import_milli (remU);
5532 return output_millicode_call (insn,
5533 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5537 import_milli (remI);
5538 return output_millicode_call (insn,
5539 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5544 output_arg_descriptor (rtx call_insn)
5546 const char *arg_regs[4];
5547 enum machine_mode arg_mode;
5549 int i, output_flag = 0;
5552 /* We neither need nor want argument location descriptors for the
5553 64bit runtime environment or the ELF32 environment. */
5554 if (TARGET_64BIT || TARGET_ELF32)
5557 for (i = 0; i < 4; i++)
5560 /* Specify explicitly that no argument relocations should take place
5561 if using the portable runtime calling conventions. */
5562 if (TARGET_PORTABLE_RUNTIME)
5564 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5569 gcc_assert (GET_CODE (call_insn) == CALL_INSN);
5570 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5571 link; link = XEXP (link, 1))
5573 rtx use = XEXP (link, 0);
5575 if (! (GET_CODE (use) == USE
5576 && GET_CODE (XEXP (use, 0)) == REG
5577 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5580 arg_mode = GET_MODE (XEXP (use, 0));
5581 regno = REGNO (XEXP (use, 0));
5582 if (regno >= 23 && regno <= 26)
5584 arg_regs[26 - regno] = "GR";
5585 if (arg_mode == DImode)
5586 arg_regs[25 - regno] = "GR";
5588 else if (regno >= 32 && regno <= 39)
5590 if (arg_mode == SFmode)
5591 arg_regs[(regno - 32) / 2] = "FR";
5594 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5595 arg_regs[(regno - 34) / 2] = "FR";
5596 arg_regs[(regno - 34) / 2 + 1] = "FU";
5598 arg_regs[(regno - 34) / 2] = "FU";
5599 arg_regs[(regno - 34) / 2 + 1] = "FR";
5604 fputs ("\t.CALL ", asm_out_file);
5605 for (i = 0; i < 4; i++)
5610 fputc (',', asm_out_file);
5611 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5614 fputc ('\n', asm_out_file);
5617 static enum reg_class
5618 pa_secondary_reload (bool in_p, rtx x, enum reg_class class,
5619 enum machine_mode mode, secondary_reload_info *sri)
5621 int is_symbolic, regno;
5623 /* Handle the easy stuff first. */
5624 if (class == R1_REGS)
5630 if (class == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5636 /* If we have something like (mem (mem (...)), we can safely assume the
5637 inner MEM will end up in a general register after reloading, so there's
5638 no need for a secondary reload. */
5639 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5642 /* Trying to load a constant into a FP register during PIC code
5643 generation requires %r1 as a scratch register. */
5645 && (mode == SImode || mode == DImode)
5646 && FP_REG_CLASS_P (class)
5647 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5649 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5650 : CODE_FOR_reload_indi_r1);
5654 /* Profiling showed the PA port spends about 1.3% of its compilation
5655 time in true_regnum from calls inside pa_secondary_reload_class. */
5656 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5657 regno = true_regnum (x);
5659 /* Handle out of range displacement for integer mode loads/stores of
5661 if (((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5662 && GET_MODE_CLASS (mode) == MODE_INT
5663 && FP_REG_CLASS_P (class))
5664 || (class == SHIFT_REGS && (regno <= 0 || regno >= 32)))
5666 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5670 /* A SAR<->FP register copy requires a secondary register (GPR) as
5671 well as secondary memory. */
5672 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
5673 && ((REGNO_REG_CLASS (regno) == SHIFT_REGS && FP_REG_CLASS_P (class))
5674 || (class == SHIFT_REGS
5675 && FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))))
5677 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5681 /* Secondary reloads of symbolic operands require %r1 as a scratch
5682 register when we're generating PIC code and the operand isn't
5684 if (GET_CODE (x) == HIGH)
5687 /* Profiling has showed GCC spends about 2.6% of its compilation
5688 time in symbolic_operand from calls inside pa_secondary_reload_class.
5689 So, we use an inline copy to avoid useless work. */
5690 switch (GET_CODE (x))
5695 is_symbolic = !SYMBOL_REF_TLS_MODEL (x);
5702 is_symbolic = (((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
5703 && !SYMBOL_REF_TLS_MODEL (XEXP (op, 0)))
5704 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
5705 && GET_CODE (XEXP (op, 1)) == CONST_INT);
5712 if (is_symbolic && (flag_pic || !read_only_operand (x, VOIDmode)))
5714 gcc_assert (mode == SImode || mode == DImode);
5715 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5716 : CODE_FOR_reload_indi_r1);
5722 /* In the 32-bit runtime, arguments larger than eight bytes are passed
5723 by invisible reference. As a GCC extension, we also pass anything
5724 with a zero or variable size by reference.
5726 The 64-bit runtime does not describe passing any types by invisible
5727 reference. The internals of GCC can't currently handle passing
5728 empty structures, and zero or variable length arrays when they are
5729 not passed entirely on the stack or by reference. Thus, as a GCC
5730 extension, we pass these types by reference. The HP compiler doesn't
5731 support these types, so hopefully there shouldn't be any compatibility
5732 issues. This may have to be revisited when HP releases a C99 compiler
5733 or updates the ABI. */
5736 pa_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5737 enum machine_mode mode, tree type,
5738 bool named ATTRIBUTE_UNUSED)
5743 size = int_size_in_bytes (type);
5745 size = GET_MODE_SIZE (mode);
5750 return size <= 0 || size > 8;
5754 function_arg_padding (enum machine_mode mode, tree type)
5757 || (TARGET_64BIT && type && AGGREGATE_TYPE_P (type)))
5759 /* Return none if justification is not required. */
5761 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
5762 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
5765 /* The directions set here are ignored when a BLKmode argument larger
5766 than a word is placed in a register. Different code is used for
5767 the stack and registers. This makes it difficult to have a
5768 consistent data representation for both the stack and registers.
5769 For both runtimes, the justification and padding for arguments on
5770 the stack and in registers should be identical. */
5772 /* The 64-bit runtime specifies left justification for aggregates. */
5775 /* The 32-bit runtime architecture specifies right justification.
5776 When the argument is passed on the stack, the argument is padded
5777 with garbage on the left. The HP compiler pads with zeros. */
5781 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
5788 /* Do what is necessary for `va_start'. We look at the current function
5789 to determine if stdargs or varargs is used and fill in an initial
5790 va_list. A pointer to this constructor is returned. */
5793 hppa_builtin_saveregs (void)
5796 tree fntype = TREE_TYPE (current_function_decl);
5797 int argadj = ((!(TYPE_ARG_TYPES (fntype) != 0
5798 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
5799 != void_type_node)))
5800 ? UNITS_PER_WORD : 0);
5803 offset = plus_constant (current_function_arg_offset_rtx, argadj);
5805 offset = current_function_arg_offset_rtx;
5811 /* Adjust for varargs/stdarg differences. */
5813 offset = plus_constant (current_function_arg_offset_rtx, -argadj);
5815 offset = current_function_arg_offset_rtx;
5817 /* We need to save %r26 .. %r19 inclusive starting at offset -64
5818 from the incoming arg pointer and growing to larger addresses. */
5819 for (i = 26, off = -64; i >= 19; i--, off += 8)
5820 emit_move_insn (gen_rtx_MEM (word_mode,
5821 plus_constant (arg_pointer_rtx, off)),
5822 gen_rtx_REG (word_mode, i));
5824 /* The incoming args pointer points just beyond the flushback area;
5825 normally this is not a serious concern. However, when we are doing
5826 varargs/stdargs we want to make the arg pointer point to the start
5827 of the incoming argument area. */
5828 emit_move_insn (virtual_incoming_args_rtx,
5829 plus_constant (arg_pointer_rtx, -64));
5831 /* Now return a pointer to the first anonymous argument. */
5832 return copy_to_reg (expand_binop (Pmode, add_optab,
5833 virtual_incoming_args_rtx,
5834 offset, 0, 0, OPTAB_LIB_WIDEN));
5837 /* Store general registers on the stack. */
5838 dest = gen_rtx_MEM (BLKmode,
5839 plus_constant (current_function_internal_arg_pointer,
5841 set_mem_alias_set (dest, get_varargs_alias_set ());
5842 set_mem_align (dest, BITS_PER_WORD);
5843 move_block_from_reg (23, dest, 4);
5845 /* move_block_from_reg will emit code to store the argument registers
5846 individually as scalar stores.
5848 However, other insns may later load from the same addresses for
5849 a structure load (passing a struct to a varargs routine).
5851 The alias code assumes that such aliasing can never happen, so we
5852 have to keep memory referencing insns from moving up beyond the
5853 last argument register store. So we emit a blockage insn here. */
5854 emit_insn (gen_blockage ());
5856 return copy_to_reg (expand_binop (Pmode, add_optab,
5857 current_function_internal_arg_pointer,
5858 offset, 0, 0, OPTAB_LIB_WIDEN));
5862 hppa_va_start (tree valist, rtx nextarg)
5864 nextarg = expand_builtin_saveregs ();
5865 std_expand_builtin_va_start (valist, nextarg);
5869 hppa_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
5873 /* Args grow upward. We can use the generic routines. */
5874 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5876 else /* !TARGET_64BIT */
5878 tree ptr = build_pointer_type (type);
5881 unsigned int size, ofs;
5884 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
5888 ptr = build_pointer_type (type);
5890 size = int_size_in_bytes (type);
5891 valist_type = TREE_TYPE (valist);
5893 /* Args grow down. Not handled by generic routines. */
5895 u = fold_convert (valist_type, size_in_bytes (type));
5896 t = build2 (MINUS_EXPR, valist_type, valist, u);
5898 /* Copied from va-pa.h, but we probably don't need to align to
5899 word size, since we generate and preserve that invariant. */
5900 u = build_int_cst (valist_type, (size > 4 ? -8 : -4));
5901 t = build2 (BIT_AND_EXPR, valist_type, t, u);
5903 t = build2 (MODIFY_EXPR, valist_type, valist, t);
5905 ofs = (8 - size) % 4;
5908 u = fold_convert (valist_type, size_int (ofs));
5909 t = build2 (PLUS_EXPR, valist_type, t, u);
5912 t = fold_convert (ptr, t);
5913 t = build_va_arg_indirect_ref (t);
5916 t = build_va_arg_indirect_ref (t);
5922 /* True if MODE is valid for the target. By "valid", we mean able to
5923 be manipulated in non-trivial ways. In particular, this means all
5924 the arithmetic is supported.
5926 Currently, TImode is not valid as the HP 64-bit runtime documentation
5927 doesn't document the alignment and calling conventions for this type.
5928 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
5929 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
5932 pa_scalar_mode_supported_p (enum machine_mode mode)
5934 int precision = GET_MODE_PRECISION (mode);
5936 switch (GET_MODE_CLASS (mode))
5938 case MODE_PARTIAL_INT:
5940 if (precision == CHAR_TYPE_SIZE)
5942 if (precision == SHORT_TYPE_SIZE)
5944 if (precision == INT_TYPE_SIZE)
5946 if (precision == LONG_TYPE_SIZE)
5948 if (precision == LONG_LONG_TYPE_SIZE)
5953 if (precision == FLOAT_TYPE_SIZE)
5955 if (precision == DOUBLE_TYPE_SIZE)
5957 if (precision == LONG_DOUBLE_TYPE_SIZE)
5961 case MODE_DECIMAL_FLOAT:
5969 /* This routine handles all the normal conditional branch sequences we
5970 might need to generate. It handles compare immediate vs compare
5971 register, nullification of delay slots, varying length branches,
5972 negated branches, and all combinations of the above. It returns the
5973 output appropriate to emit the branch corresponding to all given
5977 output_cbranch (rtx *operands, int negated, rtx insn)
5979 static char buf[100];
5981 int nullify = INSN_ANNULLED_BRANCH_P (insn);
5982 int length = get_attr_length (insn);
5985 /* A conditional branch to the following instruction (e.g. the delay slot)
5986 is asking for a disaster. This can happen when not optimizing and
5987 when jump optimization fails.
5989 While it is usually safe to emit nothing, this can fail if the
5990 preceding instruction is a nullified branch with an empty delay
5991 slot and the same branch target as this branch. We could check
5992 for this but jump optimization should eliminate nop jumps. It
5993 is always safe to emit a nop. */
5994 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
5997 /* The doubleword form of the cmpib instruction doesn't have the LEU
5998 and GTU conditions while the cmpb instruction does. Since we accept
5999 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6000 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6001 operands[2] = gen_rtx_REG (DImode, 0);
6002 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6003 operands[1] = gen_rtx_REG (DImode, 0);
6005 /* If this is a long branch with its delay slot unfilled, set `nullify'
6006 as it can nullify the delay slot and save a nop. */
6007 if (length == 8 && dbr_sequence_length () == 0)
6010 /* If this is a short forward conditional branch which did not get
6011 its delay slot filled, the delay slot can still be nullified. */
6012 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6013 nullify = forward_branch_p (insn);
6015 /* A forward branch over a single nullified insn can be done with a
6016 comclr instruction. This avoids a single cycle penalty due to
6017 mis-predicted branch if we fall through (branch not taken). */
6019 && next_real_insn (insn) != 0
6020 && get_attr_length (next_real_insn (insn)) == 4
6021 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6027 /* All short conditional branches except backwards with an unfilled
6031 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6033 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6034 if (GET_MODE (operands[1]) == DImode)
6037 strcat (buf, "%B3");
6039 strcat (buf, "%S3");
6041 strcat (buf, " %2,%r1,%%r0");
6043 strcat (buf, ",n %2,%r1,%0");
6045 strcat (buf, " %2,%r1,%0");
6048 /* All long conditionals. Note a short backward branch with an
6049 unfilled delay slot is treated just like a long backward branch
6050 with an unfilled delay slot. */
6052 /* Handle weird backwards branch with a filled delay slot
6053 which is nullified. */
6054 if (dbr_sequence_length () != 0
6055 && ! forward_branch_p (insn)
6058 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6059 if (GET_MODE (operands[1]) == DImode)
6062 strcat (buf, "%S3");
6064 strcat (buf, "%B3");
6065 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6067 /* Handle short backwards branch with an unfilled delay slot.
6068 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6069 taken and untaken branches. */
6070 else if (dbr_sequence_length () == 0
6071 && ! forward_branch_p (insn)
6072 && INSN_ADDRESSES_SET_P ()
6073 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6074 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6076 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6077 if (GET_MODE (operands[1]) == DImode)
6080 strcat (buf, "%B3 %2,%r1,%0%#");
6082 strcat (buf, "%S3 %2,%r1,%0%#");
6086 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6087 if (GET_MODE (operands[1]) == DImode)
6090 strcat (buf, "%S3");
6092 strcat (buf, "%B3");
6094 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6096 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6101 /* The reversed conditional branch must branch over one additional
6102 instruction if the delay slot is filled and needs to be extracted
6103 by output_lbranch. If the delay slot is empty or this is a
6104 nullified forward branch, the instruction after the reversed
6105 condition branch must be nullified. */
6106 if (dbr_sequence_length () == 0
6107 || (nullify && forward_branch_p (insn)))
6111 operands[4] = GEN_INT (length);
6116 operands[4] = GEN_INT (length + 4);
6119 /* Create a reversed conditional branch which branches around
6120 the following insns. */
6121 if (GET_MODE (operands[1]) != DImode)
6127 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6130 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6136 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6139 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6148 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6151 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6157 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6160 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6164 output_asm_insn (buf, operands);
6165 return output_lbranch (operands[0], insn, xdelay);
6170 /* This routine handles output of long unconditional branches that
6171 exceed the maximum range of a simple branch instruction. Since
6172 we don't have a register available for the branch, we save register
6173 %r1 in the frame marker, load the branch destination DEST into %r1,
6174 execute the branch, and restore %r1 in the delay slot of the branch.
6176 Since long branches may have an insn in the delay slot and the
6177 delay slot is used to restore %r1, we in general need to extract
6178 this insn and execute it before the branch. However, to facilitate
6179 use of this function by conditional branches, we also provide an
6180 option to not extract the delay insn so that it will be emitted
6181 after the long branch. So, if there is an insn in the delay slot,
6182 it is extracted if XDELAY is nonzero.
6184 The lengths of the various long-branch sequences are 20, 16 and 24
6185 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6188 output_lbranch (rtx dest, rtx insn, int xdelay)
6192 xoperands[0] = dest;
6194 /* First, free up the delay slot. */
6195 if (xdelay && dbr_sequence_length () != 0)
6197 /* We can't handle a jump in the delay slot. */
6198 gcc_assert (GET_CODE (NEXT_INSN (insn)) != JUMP_INSN);
6200 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6203 /* Now delete the delay insn. */
6204 PUT_CODE (NEXT_INSN (insn), NOTE);
6205 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
6206 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
6209 /* Output an insn to save %r1. The runtime documentation doesn't
6210 specify whether the "Clean Up" slot in the callers frame can
6211 be clobbered by the callee. It isn't copied by HP's builtin
6212 alloca, so this suggests that it can be clobbered if necessary.
6213 The "Static Link" location is copied by HP builtin alloca, so
6214 we avoid using it. Using the cleanup slot might be a problem
6215 if we have to interoperate with languages that pass cleanup
6216 information. However, it should be possible to handle these
6217 situations with GCC's asm feature.
6219 The "Current RP" slot is reserved for the called procedure, so
6220 we try to use it when we don't have a frame of our own. It's
6221 rather unlikely that we won't have a frame when we need to emit
6224 Really the way to go long term is a register scavenger; goto
6225 the target of the jump and find a register which we can use
6226 as a scratch to hold the value in %r1. Then, we wouldn't have
6227 to free up the delay slot or clobber a slot that may be needed
6228 for other purposes. */
6231 if (actual_fsize == 0 && !regs_ever_live[2])
6232 /* Use the return pointer slot in the frame marker. */
6233 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6235 /* Use the slot at -40 in the frame marker since HP builtin
6236 alloca doesn't copy it. */
6237 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6241 if (actual_fsize == 0 && !regs_ever_live[2])
6242 /* Use the return pointer slot in the frame marker. */
6243 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6245 /* Use the "Clean Up" slot in the frame marker. In GCC,
6246 the only other use of this location is for copying a
6247 floating point double argument from a floating-point
6248 register to two general registers. The copy is done
6249 as an "atomic" operation when outputting a call, so it
6250 won't interfere with our using the location here. */
6251 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6254 if (TARGET_PORTABLE_RUNTIME)
6256 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6257 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6258 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6262 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6263 if (TARGET_SOM || !TARGET_GAS)
6265 xoperands[1] = gen_label_rtx ();
6266 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6267 targetm.asm_out.internal_label (asm_out_file, "L",
6268 CODE_LABEL_NUMBER (xoperands[1]));
6269 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6273 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6274 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6276 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6279 /* Now output a very long branch to the original target. */
6280 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6282 /* Now restore the value of %r1 in the delay slot. */
6285 if (actual_fsize == 0 && !regs_ever_live[2])
6286 return "ldd -16(%%r30),%%r1";
6288 return "ldd -40(%%r30),%%r1";
6292 if (actual_fsize == 0 && !regs_ever_live[2])
6293 return "ldw -20(%%r30),%%r1";
6295 return "ldw -12(%%r30),%%r1";
6299 /* This routine handles all the branch-on-bit conditional branch sequences we
6300 might need to generate. It handles nullification of delay slots,
6301 varying length branches, negated branches and all combinations of the
6302 above. it returns the appropriate output template to emit the branch. */
6305 output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6307 static char buf[100];
6309 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6310 int length = get_attr_length (insn);
6313 /* A conditional branch to the following instruction (e.g. the delay slot) is
6314 asking for a disaster. I do not think this can happen as this pattern
6315 is only used when optimizing; jump optimization should eliminate the
6316 jump. But be prepared just in case. */
6318 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6321 /* If this is a long branch with its delay slot unfilled, set `nullify'
6322 as it can nullify the delay slot and save a nop. */
6323 if (length == 8 && dbr_sequence_length () == 0)
6326 /* If this is a short forward conditional branch which did not get
6327 its delay slot filled, the delay slot can still be nullified. */
6328 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6329 nullify = forward_branch_p (insn);
6331 /* A forward branch over a single nullified insn can be done with a
6332 extrs instruction. This avoids a single cycle penalty due to
6333 mis-predicted branch if we fall through (branch not taken). */
6336 && next_real_insn (insn) != 0
6337 && get_attr_length (next_real_insn (insn)) == 4
6338 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6345 /* All short conditional branches except backwards with an unfilled
6349 strcpy (buf, "{extrs,|extrw,s,}");
6351 strcpy (buf, "bb,");
6352 if (useskip && GET_MODE (operands[0]) == DImode)
6353 strcpy (buf, "extrd,s,*");
6354 else if (GET_MODE (operands[0]) == DImode)
6355 strcpy (buf, "bb,*");
6356 if ((which == 0 && negated)
6357 || (which == 1 && ! negated))
6362 strcat (buf, " %0,%1,1,%%r0");
6363 else if (nullify && negated)
6364 strcat (buf, ",n %0,%1,%3");
6365 else if (nullify && ! negated)
6366 strcat (buf, ",n %0,%1,%2");
6367 else if (! nullify && negated)
6368 strcat (buf, "%0,%1,%3");
6369 else if (! nullify && ! negated)
6370 strcat (buf, " %0,%1,%2");
6373 /* All long conditionals. Note a short backward branch with an
6374 unfilled delay slot is treated just like a long backward branch
6375 with an unfilled delay slot. */
6377 /* Handle weird backwards branch with a filled delay slot
6378 which is nullified. */
6379 if (dbr_sequence_length () != 0
6380 && ! forward_branch_p (insn)
6383 strcpy (buf, "bb,");
6384 if (GET_MODE (operands[0]) == DImode)
6386 if ((which == 0 && negated)
6387 || (which == 1 && ! negated))
6392 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6394 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6396 /* Handle short backwards branch with an unfilled delay slot.
6397 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6398 taken and untaken branches. */
6399 else if (dbr_sequence_length () == 0
6400 && ! forward_branch_p (insn)
6401 && INSN_ADDRESSES_SET_P ()
6402 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6403 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6405 strcpy (buf, "bb,");
6406 if (GET_MODE (operands[0]) == DImode)
6408 if ((which == 0 && negated)
6409 || (which == 1 && ! negated))
6414 strcat (buf, " %0,%1,%3%#");
6416 strcat (buf, " %0,%1,%2%#");
6420 if (GET_MODE (operands[0]) == DImode)
6421 strcpy (buf, "extrd,s,*");
6423 strcpy (buf, "{extrs,|extrw,s,}");
6424 if ((which == 0 && negated)
6425 || (which == 1 && ! negated))
6429 if (nullify && negated)
6430 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6431 else if (nullify && ! negated)
6432 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6434 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6436 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6441 /* The reversed conditional branch must branch over one additional
6442 instruction if the delay slot is filled and needs to be extracted
6443 by output_lbranch. If the delay slot is empty or this is a
6444 nullified forward branch, the instruction after the reversed
6445 condition branch must be nullified. */
6446 if (dbr_sequence_length () == 0
6447 || (nullify && forward_branch_p (insn)))
6451 operands[4] = GEN_INT (length);
6456 operands[4] = GEN_INT (length + 4);
6459 if (GET_MODE (operands[0]) == DImode)
6460 strcpy (buf, "bb,*");
6462 strcpy (buf, "bb,");
6463 if ((which == 0 && negated)
6464 || (which == 1 && !negated))
6469 strcat (buf, ",n %0,%1,.+%4");
6471 strcat (buf, " %0,%1,.+%4");
6472 output_asm_insn (buf, operands);
6473 return output_lbranch (negated ? operands[3] : operands[2],
6479 /* This routine handles all the branch-on-variable-bit conditional branch
6480 sequences we might need to generate. It handles nullification of delay
6481 slots, varying length branches, negated branches and all combinations
6482 of the above. it returns the appropriate output template to emit the
6486 output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6488 static char buf[100];
6490 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6491 int length = get_attr_length (insn);
6494 /* A conditional branch to the following instruction (e.g. the delay slot) is
6495 asking for a disaster. I do not think this can happen as this pattern
6496 is only used when optimizing; jump optimization should eliminate the
6497 jump. But be prepared just in case. */
6499 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6502 /* If this is a long branch with its delay slot unfilled, set `nullify'
6503 as it can nullify the delay slot and save a nop. */
6504 if (length == 8 && dbr_sequence_length () == 0)
6507 /* If this is a short forward conditional branch which did not get
6508 its delay slot filled, the delay slot can still be nullified. */
6509 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6510 nullify = forward_branch_p (insn);
6512 /* A forward branch over a single nullified insn can be done with a
6513 extrs instruction. This avoids a single cycle penalty due to
6514 mis-predicted branch if we fall through (branch not taken). */
6517 && next_real_insn (insn) != 0
6518 && get_attr_length (next_real_insn (insn)) == 4
6519 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6526 /* All short conditional branches except backwards with an unfilled
6530 strcpy (buf, "{vextrs,|extrw,s,}");
6532 strcpy (buf, "{bvb,|bb,}");
6533 if (useskip && GET_MODE (operands[0]) == DImode)
6534 strcpy (buf, "extrd,s,*");
6535 else if (GET_MODE (operands[0]) == DImode)
6536 strcpy (buf, "bb,*");
6537 if ((which == 0 && negated)
6538 || (which == 1 && ! negated))
6543 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6544 else if (nullify && negated)
6545 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6546 else if (nullify && ! negated)
6547 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6548 else if (! nullify && negated)
6549 strcat (buf, "{%0,%3|%0,%%sar,%3}");
6550 else if (! nullify && ! negated)
6551 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6554 /* All long conditionals. Note a short backward branch with an
6555 unfilled delay slot is treated just like a long backward branch
6556 with an unfilled delay slot. */
6558 /* Handle weird backwards branch with a filled delay slot
6559 which is nullified. */
6560 if (dbr_sequence_length () != 0
6561 && ! forward_branch_p (insn)
6564 strcpy (buf, "{bvb,|bb,}");
6565 if (GET_MODE (operands[0]) == DImode)
6567 if ((which == 0 && negated)
6568 || (which == 1 && ! negated))
6573 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6575 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6577 /* Handle short backwards branch with an unfilled delay slot.
6578 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6579 taken and untaken branches. */
6580 else if (dbr_sequence_length () == 0
6581 && ! forward_branch_p (insn)
6582 && INSN_ADDRESSES_SET_P ()
6583 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6584 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6586 strcpy (buf, "{bvb,|bb,}");
6587 if (GET_MODE (operands[0]) == DImode)
6589 if ((which == 0 && negated)
6590 || (which == 1 && ! negated))
6595 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
6597 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
6601 strcpy (buf, "{vextrs,|extrw,s,}");
6602 if (GET_MODE (operands[0]) == DImode)
6603 strcpy (buf, "extrd,s,*");
6604 if ((which == 0 && negated)
6605 || (which == 1 && ! negated))
6609 if (nullify && negated)
6610 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6611 else if (nullify && ! negated)
6612 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6614 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6616 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6621 /* The reversed conditional branch must branch over one additional
6622 instruction if the delay slot is filled and needs to be extracted
6623 by output_lbranch. If the delay slot is empty or this is a
6624 nullified forward branch, the instruction after the reversed
6625 condition branch must be nullified. */
6626 if (dbr_sequence_length () == 0
6627 || (nullify && forward_branch_p (insn)))
6631 operands[4] = GEN_INT (length);
6636 operands[4] = GEN_INT (length + 4);
6639 if (GET_MODE (operands[0]) == DImode)
6640 strcpy (buf, "bb,*");
6642 strcpy (buf, "{bvb,|bb,}");
6643 if ((which == 0 && negated)
6644 || (which == 1 && !negated))
6649 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
6651 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
6652 output_asm_insn (buf, operands);
6653 return output_lbranch (negated ? operands[3] : operands[2],
6659 /* Return the output template for emitting a dbra type insn.
6661 Note it may perform some output operations on its own before
6662 returning the final output string. */
6664 output_dbra (rtx *operands, rtx insn, int which_alternative)
6666 int length = get_attr_length (insn);
6668 /* A conditional branch to the following instruction (e.g. the delay slot) is
6669 asking for a disaster. Be prepared! */
6671 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6673 if (which_alternative == 0)
6674 return "ldo %1(%0),%0";
6675 else if (which_alternative == 1)
6677 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
6678 output_asm_insn ("ldw -16(%%r30),%4", operands);
6679 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6680 return "{fldws|fldw} -16(%%r30),%0";
6684 output_asm_insn ("ldw %0,%4", operands);
6685 return "ldo %1(%4),%4\n\tstw %4,%0";
6689 if (which_alternative == 0)
6691 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6694 /* If this is a long branch with its delay slot unfilled, set `nullify'
6695 as it can nullify the delay slot and save a nop. */
6696 if (length == 8 && dbr_sequence_length () == 0)
6699 /* If this is a short forward conditional branch which did not get
6700 its delay slot filled, the delay slot can still be nullified. */
6701 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6702 nullify = forward_branch_p (insn);
6708 return "addib,%C2,n %1,%0,%3";
6710 return "addib,%C2 %1,%0,%3";
6713 /* Handle weird backwards branch with a fulled delay slot
6714 which is nullified. */
6715 if (dbr_sequence_length () != 0
6716 && ! forward_branch_p (insn)
6718 return "addib,%N2,n %1,%0,.+12\n\tb %3";
6719 /* Handle short backwards branch with an unfilled delay slot.
6720 Using a addb;nop rather than addi;bl saves 1 cycle for both
6721 taken and untaken branches. */
6722 else if (dbr_sequence_length () == 0
6723 && ! forward_branch_p (insn)
6724 && INSN_ADDRESSES_SET_P ()
6725 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6726 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6727 return "addib,%C2 %1,%0,%3%#";
6729 /* Handle normal cases. */
6731 return "addi,%N2 %1,%0,%0\n\tb,n %3";
6733 return "addi,%N2 %1,%0,%0\n\tb %3";
6736 /* The reversed conditional branch must branch over one additional
6737 instruction if the delay slot is filled and needs to be extracted
6738 by output_lbranch. If the delay slot is empty or this is a
6739 nullified forward branch, the instruction after the reversed
6740 condition branch must be nullified. */
6741 if (dbr_sequence_length () == 0
6742 || (nullify && forward_branch_p (insn)))
6746 operands[4] = GEN_INT (length);
6751 operands[4] = GEN_INT (length + 4);
6755 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
6757 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
6759 return output_lbranch (operands[3], insn, xdelay);
6763 /* Deal with gross reload from FP register case. */
6764 else if (which_alternative == 1)
6766 /* Move loop counter from FP register to MEM then into a GR,
6767 increment the GR, store the GR into MEM, and finally reload
6768 the FP register from MEM from within the branch's delay slot. */
6769 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
6771 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6773 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
6774 else if (length == 28)
6775 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6778 operands[5] = GEN_INT (length - 16);
6779 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
6780 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
6781 return output_lbranch (operands[3], insn, 0);
6784 /* Deal with gross reload from memory case. */
6787 /* Reload loop counter from memory, the store back to memory
6788 happens in the branch's delay slot. */
6789 output_asm_insn ("ldw %0,%4", operands);
6791 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
6792 else if (length == 16)
6793 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
6796 operands[5] = GEN_INT (length - 4);
6797 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
6798 return output_lbranch (operands[3], insn, 0);
6803 /* Return the output template for emitting a movb type insn.
6805 Note it may perform some output operations on its own before
6806 returning the final output string. */
6808 output_movb (rtx *operands, rtx insn, int which_alternative,
6809 int reverse_comparison)
6811 int length = get_attr_length (insn);
6813 /* A conditional branch to the following instruction (e.g. the delay slot) is
6814 asking for a disaster. Be prepared! */
6816 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6818 if (which_alternative == 0)
6819 return "copy %1,%0";
6820 else if (which_alternative == 1)
6822 output_asm_insn ("stw %1,-16(%%r30)", operands);
6823 return "{fldws|fldw} -16(%%r30),%0";
6825 else if (which_alternative == 2)
6831 /* Support the second variant. */
6832 if (reverse_comparison)
6833 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
6835 if (which_alternative == 0)
6837 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6840 /* If this is a long branch with its delay slot unfilled, set `nullify'
6841 as it can nullify the delay slot and save a nop. */
6842 if (length == 8 && dbr_sequence_length () == 0)
6845 /* If this is a short forward conditional branch which did not get
6846 its delay slot filled, the delay slot can still be nullified. */
6847 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6848 nullify = forward_branch_p (insn);
6854 return "movb,%C2,n %1,%0,%3";
6856 return "movb,%C2 %1,%0,%3";
6859 /* Handle weird backwards branch with a filled delay slot
6860 which is nullified. */
6861 if (dbr_sequence_length () != 0
6862 && ! forward_branch_p (insn)
6864 return "movb,%N2,n %1,%0,.+12\n\tb %3";
6866 /* Handle short backwards branch with an unfilled delay slot.
6867 Using a movb;nop rather than or;bl saves 1 cycle for both
6868 taken and untaken branches. */
6869 else if (dbr_sequence_length () == 0
6870 && ! forward_branch_p (insn)
6871 && INSN_ADDRESSES_SET_P ()
6872 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6873 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6874 return "movb,%C2 %1,%0,%3%#";
6875 /* Handle normal cases. */
6877 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
6879 return "or,%N2 %1,%%r0,%0\n\tb %3";
6882 /* The reversed conditional branch must branch over one additional
6883 instruction if the delay slot is filled and needs to be extracted
6884 by output_lbranch. If the delay slot is empty or this is a
6885 nullified forward branch, the instruction after the reversed
6886 condition branch must be nullified. */
6887 if (dbr_sequence_length () == 0
6888 || (nullify && forward_branch_p (insn)))
6892 operands[4] = GEN_INT (length);
6897 operands[4] = GEN_INT (length + 4);
6901 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
6903 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
6905 return output_lbranch (operands[3], insn, xdelay);
6908 /* Deal with gross reload for FP destination register case. */
6909 else if (which_alternative == 1)
6911 /* Move source register to MEM, perform the branch test, then
6912 finally load the FP register from MEM from within the branch's
6914 output_asm_insn ("stw %1,-16(%%r30)", operands);
6916 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
6917 else if (length == 16)
6918 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6921 operands[4] = GEN_INT (length - 4);
6922 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
6923 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
6924 return output_lbranch (operands[3], insn, 0);
6927 /* Deal with gross reload from memory case. */
6928 else if (which_alternative == 2)
6930 /* Reload loop counter from memory, the store back to memory
6931 happens in the branch's delay slot. */
6933 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
6934 else if (length == 12)
6935 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
6938 operands[4] = GEN_INT (length);
6939 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
6941 return output_lbranch (operands[3], insn, 0);
6944 /* Handle SAR as a destination. */
6948 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
6949 else if (length == 12)
6950 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
6953 operands[4] = GEN_INT (length);
6954 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
6956 return output_lbranch (operands[3], insn, 0);
6961 /* Copy any FP arguments in INSN into integer registers. */
6963 copy_fp_args (rtx insn)
6968 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
6970 int arg_mode, regno;
6971 rtx use = XEXP (link, 0);
6973 if (! (GET_CODE (use) == USE
6974 && GET_CODE (XEXP (use, 0)) == REG
6975 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6978 arg_mode = GET_MODE (XEXP (use, 0));
6979 regno = REGNO (XEXP (use, 0));
6981 /* Is it a floating point register? */
6982 if (regno >= 32 && regno <= 39)
6984 /* Copy the FP register into an integer register via memory. */
6985 if (arg_mode == SFmode)
6987 xoperands[0] = XEXP (use, 0);
6988 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
6989 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
6990 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6994 xoperands[0] = XEXP (use, 0);
6995 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
6996 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
6997 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
6998 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7004 /* Compute length of the FP argument copy sequence for INSN. */
7006 length_fp_args (rtx insn)
7011 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7013 int arg_mode, regno;
7014 rtx use = XEXP (link, 0);
7016 if (! (GET_CODE (use) == USE
7017 && GET_CODE (XEXP (use, 0)) == REG
7018 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7021 arg_mode = GET_MODE (XEXP (use, 0));
7022 regno = REGNO (XEXP (use, 0));
7024 /* Is it a floating point register? */
7025 if (regno >= 32 && regno <= 39)
7027 if (arg_mode == SFmode)
7037 /* Return the attribute length for the millicode call instruction INSN.
7038 The length must match the code generated by output_millicode_call.
7039 We include the delay slot in the returned length as it is better to
7040 over estimate the length than to under estimate it. */
7043 attr_length_millicode_call (rtx insn)
7045 unsigned long distance = -1;
7046 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7048 if (INSN_ADDRESSES_SET_P ())
7050 distance = (total + insn_current_reference_address (insn));
7051 if (distance < total)
7057 if (!TARGET_LONG_CALLS && distance < 7600000)
7062 else if (TARGET_PORTABLE_RUNTIME)
7066 if (!TARGET_LONG_CALLS && distance < 240000)
7069 if (TARGET_LONG_ABS_CALL && !flag_pic)
7076 /* INSN is a function call. It may have an unconditional jump
7079 CALL_DEST is the routine we are calling. */
7082 output_millicode_call (rtx insn, rtx call_dest)
7084 int attr_length = get_attr_length (insn);
7085 int seq_length = dbr_sequence_length ();
7090 xoperands[0] = call_dest;
7091 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7093 /* Handle the common case where we are sure that the branch will
7094 reach the beginning of the $CODE$ subspace. The within reach
7095 form of the $$sh_func_adrs call has a length of 28. Because
7096 it has an attribute type of multi, it never has a nonzero
7097 sequence length. The length of the $$sh_func_adrs is the same
7098 as certain out of reach PIC calls to other routines. */
7099 if (!TARGET_LONG_CALLS
7100 && ((seq_length == 0
7101 && (attr_length == 12
7102 || (attr_length == 28 && get_attr_type (insn) == TYPE_MULTI)))
7103 || (seq_length != 0 && attr_length == 8)))
7105 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7111 /* It might seem that one insn could be saved by accessing
7112 the millicode function using the linkage table. However,
7113 this doesn't work in shared libraries and other dynamically
7114 loaded objects. Using a pc-relative sequence also avoids
7115 problems related to the implicit use of the gp register. */
7116 output_asm_insn ("b,l .+8,%%r1", xoperands);
7120 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7121 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7125 xoperands[1] = gen_label_rtx ();
7126 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7127 targetm.asm_out.internal_label (asm_out_file, "L",
7128 CODE_LABEL_NUMBER (xoperands[1]));
7129 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7132 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7134 else if (TARGET_PORTABLE_RUNTIME)
7136 /* Pure portable runtime doesn't allow be/ble; we also don't
7137 have PIC support in the assembler/linker, so this sequence
7140 /* Get the address of our target into %r1. */
7141 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7142 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7144 /* Get our return address into %r31. */
7145 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7146 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7148 /* Jump to our target address in %r1. */
7149 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7153 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7155 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7157 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7161 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7162 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7164 if (TARGET_SOM || !TARGET_GAS)
7166 /* The HP assembler can generate relocations for the
7167 difference of two symbols. GAS can do this for a
7168 millicode symbol but not an arbitrary external
7169 symbol when generating SOM output. */
7170 xoperands[1] = gen_label_rtx ();
7171 targetm.asm_out.internal_label (asm_out_file, "L",
7172 CODE_LABEL_NUMBER (xoperands[1]));
7173 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7174 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7178 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7179 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7183 /* Jump to our target address in %r1. */
7184 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7188 if (seq_length == 0)
7189 output_asm_insn ("nop", xoperands);
7191 /* We are done if there isn't a jump in the delay slot. */
7192 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7195 /* This call has an unconditional jump in its delay slot. */
7196 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7198 /* See if the return address can be adjusted. Use the containing
7199 sequence insn's address. */
7200 if (INSN_ADDRESSES_SET_P ())
7202 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7203 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7204 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7206 if (VAL_14_BITS_P (distance))
7208 xoperands[1] = gen_label_rtx ();
7209 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7210 targetm.asm_out.internal_label (asm_out_file, "L",
7211 CODE_LABEL_NUMBER (xoperands[1]));
7214 /* ??? This branch may not reach its target. */
7215 output_asm_insn ("nop\n\tb,n %0", xoperands);
7218 /* ??? This branch may not reach its target. */
7219 output_asm_insn ("nop\n\tb,n %0", xoperands);
7221 /* Delete the jump. */
7222 PUT_CODE (NEXT_INSN (insn), NOTE);
7223 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7224 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7229 /* Return the attribute length of the call instruction INSN. The SIBCALL
7230 flag indicates whether INSN is a regular call or a sibling call. The
7231 length returned must be longer than the code actually generated by
7232 output_call. Since branch shortening is done before delay branch
7233 sequencing, there is no way to determine whether or not the delay
7234 slot will be filled during branch shortening. Even when the delay
7235 slot is filled, we may have to add a nop if the delay slot contains
7236 a branch that can't reach its target. Thus, we always have to include
7237 the delay slot in the length estimate. This used to be done in
7238 pa_adjust_insn_length but we do it here now as some sequences always
7239 fill the delay slot and we can save four bytes in the estimate for
7243 attr_length_call (rtx insn, int sibcall)
7249 rtx pat = PATTERN (insn);
7250 unsigned long distance = -1;
7252 if (INSN_ADDRESSES_SET_P ())
7254 unsigned long total;
7256 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7257 distance = (total + insn_current_reference_address (insn));
7258 if (distance < total)
7262 /* Determine if this is a local call. */
7263 if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL)
7264 call_dest = XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0);
7266 call_dest = XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0);
7268 call_decl = SYMBOL_REF_DECL (call_dest);
7269 local_call = call_decl && targetm.binds_local_p (call_decl);
7271 /* pc-relative branch. */
7272 if (!TARGET_LONG_CALLS
7273 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7274 || distance < 240000))
7277 /* 64-bit plabel sequence. */
7278 else if (TARGET_64BIT && !local_call)
7279 length += sibcall ? 28 : 24;
7281 /* non-pic long absolute branch sequence. */
7282 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7285 /* long pc-relative branch sequence. */
7286 else if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7287 || (TARGET_64BIT && !TARGET_GAS)
7288 || (TARGET_GAS && !TARGET_SOM
7289 && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7293 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS)
7297 /* 32-bit plabel sequence. */
7303 length += length_fp_args (insn);
7313 if (!TARGET_NO_SPACE_REGS)
7321 /* INSN is a function call. It may have an unconditional jump
7324 CALL_DEST is the routine we are calling. */
7327 output_call (rtx insn, rtx call_dest, int sibcall)
7329 int delay_insn_deleted = 0;
7330 int delay_slot_filled = 0;
7331 int seq_length = dbr_sequence_length ();
7332 tree call_decl = SYMBOL_REF_DECL (call_dest);
7333 int local_call = call_decl && targetm.binds_local_p (call_decl);
7336 xoperands[0] = call_dest;
7338 /* Handle the common case where we're sure that the branch will reach
7339 the beginning of the "$CODE$" subspace. This is the beginning of
7340 the current function if we are in a named section. */
7341 if (!TARGET_LONG_CALLS && attr_length_call (insn, sibcall) == 8)
7343 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7344 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7348 if (TARGET_64BIT && !local_call)
7350 /* ??? As far as I can tell, the HP linker doesn't support the
7351 long pc-relative sequence described in the 64-bit runtime
7352 architecture. So, we use a slightly longer indirect call. */
7353 xoperands[0] = get_deferred_plabel (call_dest);
7354 xoperands[1] = gen_label_rtx ();
7356 /* If this isn't a sibcall, we put the load of %r27 into the
7357 delay slot. We can't do this in a sibcall as we don't
7358 have a second call-clobbered scratch register available. */
7360 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7363 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7366 /* Now delete the delay insn. */
7367 PUT_CODE (NEXT_INSN (insn), NOTE);
7368 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7369 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7370 delay_insn_deleted = 1;
7373 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7374 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7375 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7379 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7380 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7381 output_asm_insn ("bve (%%r1)", xoperands);
7385 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7386 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7387 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7388 delay_slot_filled = 1;
7393 int indirect_call = 0;
7395 /* Emit a long call. There are several different sequences
7396 of increasing length and complexity. In most cases,
7397 they don't allow an instruction in the delay slot. */
7398 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7399 && !(TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7400 && !(TARGET_GAS && !TARGET_SOM
7401 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7406 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7408 && (!TARGET_PA_20 || indirect_call))
7410 /* A non-jump insn in the delay slot. By definition we can
7411 emit this insn before the call (and in fact before argument
7413 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7416 /* Now delete the delay insn. */
7417 PUT_CODE (NEXT_INSN (insn), NOTE);
7418 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7419 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7420 delay_insn_deleted = 1;
7423 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7425 /* This is the best sequence for making long calls in
7426 non-pic code. Unfortunately, GNU ld doesn't provide
7427 the stub needed for external calls, and GAS's support
7428 for this with the SOM linker is buggy. It is safe
7429 to use this for local calls. */
7430 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7432 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7436 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7439 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7441 output_asm_insn ("copy %%r31,%%r2", xoperands);
7442 delay_slot_filled = 1;
7447 if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7448 || (TARGET_64BIT && !TARGET_GAS))
7450 /* The HP assembler and linker can handle relocations
7451 for the difference of two symbols. GAS and the HP
7452 linker can't do this when one of the symbols is
7454 xoperands[1] = gen_label_rtx ();
7455 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7456 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7457 targetm.asm_out.internal_label (asm_out_file, "L",
7458 CODE_LABEL_NUMBER (xoperands[1]));
7459 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7461 else if (TARGET_GAS && !TARGET_SOM
7462 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7464 /* GAS currently can't generate the relocations that
7465 are needed for the SOM linker under HP-UX using this
7466 sequence. The GNU linker doesn't generate the stubs
7467 that are needed for external calls on TARGET_ELF32
7468 with this sequence. For now, we have to use a
7469 longer plabel sequence when using GAS. */
7470 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7471 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7473 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7478 /* Emit a long plabel-based call sequence. This is
7479 essentially an inline implementation of $$dyncall.
7480 We don't actually try to call $$dyncall as this is
7481 as difficult as calling the function itself. */
7482 xoperands[0] = get_deferred_plabel (call_dest);
7483 xoperands[1] = gen_label_rtx ();
7485 /* Since the call is indirect, FP arguments in registers
7486 need to be copied to the general registers. Then, the
7487 argument relocation stub will copy them back. */
7489 copy_fp_args (insn);
7493 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7494 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7495 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7499 output_asm_insn ("addil LR'%0-$global$,%%r27",
7501 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7505 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7506 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7507 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7508 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7510 if (!sibcall && !TARGET_PA_20)
7512 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7513 if (TARGET_NO_SPACE_REGS)
7514 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7516 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7523 output_asm_insn ("bve (%%r1)", xoperands);
7528 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7529 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7530 delay_slot_filled = 1;
7533 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7538 if (!TARGET_NO_SPACE_REGS)
7539 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7544 if (TARGET_NO_SPACE_REGS)
7545 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7547 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7551 if (TARGET_NO_SPACE_REGS)
7552 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
7554 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
7557 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
7559 output_asm_insn ("copy %%r31,%%r2", xoperands);
7560 delay_slot_filled = 1;
7567 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
7568 output_asm_insn ("nop", xoperands);
7570 /* We are done if there isn't a jump in the delay slot. */
7572 || delay_insn_deleted
7573 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7576 /* A sibcall should never have a branch in the delay slot. */
7577 gcc_assert (!sibcall);
7579 /* This call has an unconditional jump in its delay slot. */
7580 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7582 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
7584 /* See if the return address can be adjusted. Use the containing
7585 sequence insn's address. */
7586 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7587 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7588 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7590 if (VAL_14_BITS_P (distance))
7592 xoperands[1] = gen_label_rtx ();
7593 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
7594 targetm.asm_out.internal_label (asm_out_file, "L",
7595 CODE_LABEL_NUMBER (xoperands[1]));
7598 output_asm_insn ("nop\n\tb,n %0", xoperands);
7601 output_asm_insn ("b,n %0", xoperands);
7603 /* Delete the jump. */
7604 PUT_CODE (NEXT_INSN (insn), NOTE);
7605 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7606 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7611 /* Return the attribute length of the indirect call instruction INSN.
7612 The length must match the code generated by output_indirect call.
7613 The returned length includes the delay slot. Currently, the delay
7614 slot of an indirect call sequence is not exposed and it is used by
7615 the sequence itself. */
7618 attr_length_indirect_call (rtx insn)
7620 unsigned long distance = -1;
7621 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7623 if (INSN_ADDRESSES_SET_P ())
7625 distance = (total + insn_current_reference_address (insn));
7626 if (distance < total)
7633 if (TARGET_FAST_INDIRECT_CALLS
7634 || (!TARGET_PORTABLE_RUNTIME
7635 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
7636 || distance < 240000)))
7642 if (TARGET_PORTABLE_RUNTIME)
7645 /* Out of reach, can use ble. */
7650 output_indirect_call (rtx insn, rtx call_dest)
7656 xoperands[0] = call_dest;
7657 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
7658 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
7662 /* First the special case for kernels, level 0 systems, etc. */
7663 if (TARGET_FAST_INDIRECT_CALLS)
7664 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
7666 /* Now the normal case -- we can reach $$dyncall directly or
7667 we're sure that we can get there via a long-branch stub.
7669 No need to check target flags as the length uniquely identifies
7670 the remaining cases. */
7671 if (attr_length_indirect_call (insn) == 8)
7673 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
7674 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
7675 variant of the B,L instruction can't be used on the SOM target. */
7676 if (TARGET_PA_20 && !TARGET_SOM)
7677 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
7679 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
7682 /* Long millicode call, but we are not generating PIC or portable runtime
7684 if (attr_length_indirect_call (insn) == 12)
7685 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
7687 /* Long millicode call for portable runtime. */
7688 if (attr_length_indirect_call (insn) == 20)
7689 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
7691 /* We need a long PIC call to $$dyncall. */
7692 xoperands[0] = NULL_RTX;
7693 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7694 if (TARGET_SOM || !TARGET_GAS)
7696 xoperands[0] = gen_label_rtx ();
7697 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
7698 targetm.asm_out.internal_label (asm_out_file, "L",
7699 CODE_LABEL_NUMBER (xoperands[0]));
7700 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
7704 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
7705 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
7708 output_asm_insn ("blr %%r0,%%r2", xoperands);
7709 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
7713 /* Return the total length of the save and restore instructions needed for
7714 the data linkage table pointer (i.e., the PIC register) across the call
7715 instruction INSN. No-return calls do not require a save and restore.
7716 In addition, we may be able to avoid the save and restore for calls
7717 within the same translation unit. */
7720 attr_length_save_restore_dltp (rtx insn)
7722 if (find_reg_note (insn, REG_NORETURN, NULL_RTX))
7728 /* In HPUX 8.0's shared library scheme, special relocations are needed
7729 for function labels if they might be passed to a function
7730 in a shared library (because shared libraries don't live in code
7731 space), and special magic is needed to construct their address. */
7734 hppa_encode_label (rtx sym)
7736 const char *str = XSTR (sym, 0);
7737 int len = strlen (str) + 1;
7740 p = newstr = alloca (len + 1);
7744 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
7748 pa_encode_section_info (tree decl, rtx rtl, int first)
7750 default_encode_section_info (decl, rtl, first);
7752 if (first && TEXT_SPACE_P (decl))
7754 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
7755 if (TREE_CODE (decl) == FUNCTION_DECL)
7756 hppa_encode_label (XEXP (rtl, 0));
7760 /* This is sort of inverse to pa_encode_section_info. */
7763 pa_strip_name_encoding (const char *str)
7765 str += (*str == '@');
7766 str += (*str == '*');
7771 function_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7773 return GET_CODE (op) == SYMBOL_REF && FUNCTION_NAME_P (XSTR (op, 0));
7776 /* Returns 1 if OP is a function label involved in a simple addition
7777 with a constant. Used to keep certain patterns from matching
7778 during instruction combination. */
7780 is_function_label_plus_const (rtx op)
7782 /* Strip off any CONST. */
7783 if (GET_CODE (op) == CONST)
7786 return (GET_CODE (op) == PLUS
7787 && function_label_operand (XEXP (op, 0), Pmode)
7788 && GET_CODE (XEXP (op, 1)) == CONST_INT);
7791 /* Output assembly code for a thunk to FUNCTION. */
7794 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
7795 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
7798 static unsigned int current_thunk_number;
7799 int val_14 = VAL_14_BITS_P (delta);
7804 xoperands[0] = XEXP (DECL_RTL (function), 0);
7805 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
7806 xoperands[2] = GEN_INT (delta);
7808 ASM_OUTPUT_LABEL (file, XSTR (xoperands[1], 0));
7809 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
7811 /* Output the thunk. We know that the function is in the same
7812 translation unit (i.e., the same space) as the thunk, and that
7813 thunks are output after their method. Thus, we don't need an
7814 external branch to reach the function. With SOM and GAS,
7815 functions and thunks are effectively in different sections.
7816 Thus, we can always use a IA-relative branch and the linker
7817 will add a long branch stub if necessary.
7819 However, we have to be careful when generating PIC code on the
7820 SOM port to ensure that the sequence does not transfer to an
7821 import stub for the target function as this could clobber the
7822 return value saved at SP-24. This would also apply to the
7823 32-bit linux port if the multi-space model is implemented. */
7824 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7825 && !(flag_pic && TREE_PUBLIC (function))
7826 && (TARGET_GAS || last_address < 262132))
7827 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7828 && ((targetm.have_named_sections
7829 && DECL_SECTION_NAME (thunk_fndecl) != NULL
7830 /* The GNU 64-bit linker has rather poor stub management.
7831 So, we use a long branch from thunks that aren't in
7832 the same section as the target function. */
7834 && (DECL_SECTION_NAME (thunk_fndecl)
7835 != DECL_SECTION_NAME (function)))
7836 || ((DECL_SECTION_NAME (thunk_fndecl)
7837 == DECL_SECTION_NAME (function))
7838 && last_address < 262132)))
7839 || (!targetm.have_named_sections && last_address < 262132))))
7842 output_asm_insn ("addil L'%2,%%r26", xoperands);
7844 output_asm_insn ("b %0", xoperands);
7848 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7853 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7857 else if (TARGET_64BIT)
7859 /* We only have one call-clobbered scratch register, so we can't
7860 make use of the delay slot if delta doesn't fit in 14 bits. */
7863 output_asm_insn ("addil L'%2,%%r26", xoperands);
7864 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7867 output_asm_insn ("b,l .+8,%%r1", xoperands);
7871 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7872 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7876 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
7877 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
7882 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7883 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7888 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
7892 else if (TARGET_PORTABLE_RUNTIME)
7894 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7895 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
7898 output_asm_insn ("addil L'%2,%%r26", xoperands);
7900 output_asm_insn ("bv %%r0(%%r22)", xoperands);
7904 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7909 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7913 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
7915 /* The function is accessible from outside this module. The only
7916 way to avoid an import stub between the thunk and function is to
7917 call the function directly with an indirect sequence similar to
7918 that used by $$dyncall. This is possible because $$dyncall acts
7919 as the import stub in an indirect call. */
7920 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
7921 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
7922 output_asm_insn ("addil LT'%3,%%r19", xoperands);
7923 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
7924 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
7925 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
7926 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
7927 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
7928 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
7932 output_asm_insn ("addil L'%2,%%r26", xoperands);
7938 output_asm_insn ("bve (%%r22)", xoperands);
7941 else if (TARGET_NO_SPACE_REGS)
7943 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
7948 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
7949 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
7950 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
7955 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7957 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7961 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7963 if (TARGET_SOM || !TARGET_GAS)
7965 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
7966 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
7970 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7971 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
7975 output_asm_insn ("addil L'%2,%%r26", xoperands);
7977 output_asm_insn ("bv %%r0(%%r22)", xoperands);
7981 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7986 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7993 output_asm_insn ("addil L'%2,%%r26", xoperands);
7995 output_asm_insn ("ldil L'%0,%%r22", xoperands);
7996 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8000 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8005 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8010 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
8012 if (TARGET_SOM && TARGET_GAS)
8014 /* We done with this subspace except possibly for some additional
8015 debug information. Forget that we are in this subspace to ensure
8016 that the next function is output in its own subspace. */
8018 cfun->machine->in_nsubspa = 2;
8021 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8023 switch_to_section (data_section);
8024 output_asm_insn (".align 4", xoperands);
8025 ASM_OUTPUT_LABEL (file, label);
8026 output_asm_insn (".word P'%0", xoperands);
8029 current_thunk_number++;
8030 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8031 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8032 last_address += nbytes;
8033 update_total_code_bytes (nbytes);
8036 /* Only direct calls to static functions are allowed to be sibling (tail)
8039 This restriction is necessary because some linker generated stubs will
8040 store return pointers into rp' in some cases which might clobber a
8041 live value already in rp'.
8043 In a sibcall the current function and the target function share stack
8044 space. Thus if the path to the current function and the path to the
8045 target function save a value in rp', they save the value into the
8046 same stack slot, which has undesirable consequences.
8048 Because of the deferred binding nature of shared libraries any function
8049 with external scope could be in a different load module and thus require
8050 rp' to be saved when calling that function. So sibcall optimizations
8051 can only be safe for static function.
8053 Note that GCC never needs return value relocations, so we don't have to
8054 worry about static calls with return value relocations (which require
8057 It is safe to perform a sibcall optimization when the target function
8058 will never return. */
8060 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8062 if (TARGET_PORTABLE_RUNTIME)
8065 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8066 single subspace mode and the call is not indirect. As far as I know,
8067 there is no operating system support for the multiple subspace mode.
8068 It might be possible to support indirect calls if we didn't use
8069 $$dyncall (see the indirect sequence generated in output_call). */
8071 return (decl != NULL_TREE);
8073 /* Sibcalls are not ok because the arg pointer register is not a fixed
8074 register. This prevents the sibcall optimization from occurring. In
8075 addition, there are problems with stub placement using GNU ld. This
8076 is because a normal sibcall branch uses a 17-bit relocation while
8077 a regular call branch uses a 22-bit relocation. As a result, more
8078 care needs to be taken in the placement of long-branch stubs. */
8082 /* Sibcalls are only ok within a translation unit. */
8083 return (decl && !TREE_PUBLIC (decl));
8086 /* ??? Addition is not commutative on the PA due to the weird implicit
8087 space register selection rules for memory addresses. Therefore, we
8088 don't consider a + b == b + a, as this might be inside a MEM. */
8090 pa_commutative_p (rtx x, int outer_code)
8092 return (COMMUTATIVE_P (x)
8093 && (TARGET_NO_SPACE_REGS
8094 || (outer_code != UNKNOWN && outer_code != MEM)
8095 || GET_CODE (x) != PLUS));
8098 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8099 use in fmpyadd instructions. */
8101 fmpyaddoperands (rtx *operands)
8103 enum machine_mode mode = GET_MODE (operands[0]);
8105 /* Must be a floating point mode. */
8106 if (mode != SFmode && mode != DFmode)
8109 /* All modes must be the same. */
8110 if (! (mode == GET_MODE (operands[1])
8111 && mode == GET_MODE (operands[2])
8112 && mode == GET_MODE (operands[3])
8113 && mode == GET_MODE (operands[4])
8114 && mode == GET_MODE (operands[5])))
8117 /* All operands must be registers. */
8118 if (! (GET_CODE (operands[1]) == REG
8119 && GET_CODE (operands[2]) == REG
8120 && GET_CODE (operands[3]) == REG
8121 && GET_CODE (operands[4]) == REG
8122 && GET_CODE (operands[5]) == REG))
8125 /* Only 2 real operands to the addition. One of the input operands must
8126 be the same as the output operand. */
8127 if (! rtx_equal_p (operands[3], operands[4])
8128 && ! rtx_equal_p (operands[3], operands[5]))
8131 /* Inout operand of add cannot conflict with any operands from multiply. */
8132 if (rtx_equal_p (operands[3], operands[0])
8133 || rtx_equal_p (operands[3], operands[1])
8134 || rtx_equal_p (operands[3], operands[2]))
8137 /* multiply cannot feed into addition operands. */
8138 if (rtx_equal_p (operands[4], operands[0])
8139 || rtx_equal_p (operands[5], operands[0]))
8142 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8144 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8145 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8146 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8147 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8148 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8149 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8152 /* Passed. Operands are suitable for fmpyadd. */
8156 #if !defined(USE_COLLECT2)
8158 pa_asm_out_constructor (rtx symbol, int priority)
8160 if (!function_label_operand (symbol, VOIDmode))
8161 hppa_encode_label (symbol);
8163 #ifdef CTORS_SECTION_ASM_OP
8164 default_ctor_section_asm_out_constructor (symbol, priority);
8166 # ifdef TARGET_ASM_NAMED_SECTION
8167 default_named_section_asm_out_constructor (symbol, priority);
8169 default_stabs_asm_out_constructor (symbol, priority);
8175 pa_asm_out_destructor (rtx symbol, int priority)
8177 if (!function_label_operand (symbol, VOIDmode))
8178 hppa_encode_label (symbol);
8180 #ifdef DTORS_SECTION_ASM_OP
8181 default_dtor_section_asm_out_destructor (symbol, priority);
8183 # ifdef TARGET_ASM_NAMED_SECTION
8184 default_named_section_asm_out_destructor (symbol, priority);
8186 default_stabs_asm_out_destructor (symbol, priority);
8192 /* This function places uninitialized global data in the bss section.
8193 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8194 function on the SOM port to prevent uninitialized global data from
8195 being placed in the data section. */
8198 pa_asm_output_aligned_bss (FILE *stream,
8200 unsigned HOST_WIDE_INT size,
8203 switch_to_section (bss_section);
8204 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8206 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8207 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8210 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8211 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8214 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8215 ASM_OUTPUT_LABEL (stream, name);
8216 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8219 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8220 that doesn't allow the alignment of global common storage to be directly
8221 specified. The SOM linker aligns common storage based on the rounded
8222 value of the NUM_BYTES parameter in the .comm directive. It's not
8223 possible to use the .align directive as it doesn't affect the alignment
8224 of the label associated with a .comm directive. */
8227 pa_asm_output_aligned_common (FILE *stream,
8229 unsigned HOST_WIDE_INT size,
8232 unsigned int max_common_align;
8234 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8235 if (align > max_common_align)
8237 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8238 "for global common data. Using %u",
8239 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8240 align = max_common_align;
8243 switch_to_section (bss_section);
8245 assemble_name (stream, name);
8246 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8247 MAX (size, align / BITS_PER_UNIT));
8250 /* We can't use .comm for local common storage as the SOM linker effectively
8251 treats the symbol as universal and uses the same storage for local symbols
8252 with the same name in different object files. The .block directive
8253 reserves an uninitialized block of storage. However, it's not common
8254 storage. Fortunately, GCC never requests common storage with the same
8255 name in any given translation unit. */
8258 pa_asm_output_aligned_local (FILE *stream,
8260 unsigned HOST_WIDE_INT size,
8263 switch_to_section (bss_section);
8264 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8267 fprintf (stream, "%s", LOCAL_ASM_OP);
8268 assemble_name (stream, name);
8269 fprintf (stream, "\n");
8272 ASM_OUTPUT_LABEL (stream, name);
8273 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8276 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8277 use in fmpysub instructions. */
8279 fmpysuboperands (rtx *operands)
8281 enum machine_mode mode = GET_MODE (operands[0]);
8283 /* Must be a floating point mode. */
8284 if (mode != SFmode && mode != DFmode)
8287 /* All modes must be the same. */
8288 if (! (mode == GET_MODE (operands[1])
8289 && mode == GET_MODE (operands[2])
8290 && mode == GET_MODE (operands[3])
8291 && mode == GET_MODE (operands[4])
8292 && mode == GET_MODE (operands[5])))
8295 /* All operands must be registers. */
8296 if (! (GET_CODE (operands[1]) == REG
8297 && GET_CODE (operands[2]) == REG
8298 && GET_CODE (operands[3]) == REG
8299 && GET_CODE (operands[4]) == REG
8300 && GET_CODE (operands[5]) == REG))
8303 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8304 operation, so operands[4] must be the same as operand[3]. */
8305 if (! rtx_equal_p (operands[3], operands[4]))
8308 /* multiply cannot feed into subtraction. */
8309 if (rtx_equal_p (operands[5], operands[0]))
8312 /* Inout operand of sub cannot conflict with any operands from multiply. */
8313 if (rtx_equal_p (operands[3], operands[0])
8314 || rtx_equal_p (operands[3], operands[1])
8315 || rtx_equal_p (operands[3], operands[2]))
8318 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8320 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8321 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8322 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8323 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8324 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8325 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8328 /* Passed. Operands are suitable for fmpysub. */
8332 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8333 constants for shadd instructions. */
8335 shadd_constant_p (int val)
8337 if (val == 2 || val == 4 || val == 8)
8343 /* Return 1 if OP is valid as a base or index register in a
8347 borx_reg_operand (rtx op, enum machine_mode mode)
8349 if (GET_CODE (op) != REG)
8352 /* We must reject virtual registers as the only expressions that
8353 can be instantiated are REG and REG+CONST. */
8354 if (op == virtual_incoming_args_rtx
8355 || op == virtual_stack_vars_rtx
8356 || op == virtual_stack_dynamic_rtx
8357 || op == virtual_outgoing_args_rtx
8358 || op == virtual_cfa_rtx)
8361 /* While it's always safe to index off the frame pointer, it's not
8362 profitable to do so when the frame pointer is being eliminated. */
8363 if (!reload_completed
8364 && flag_omit_frame_pointer
8365 && !current_function_calls_alloca
8366 && op == frame_pointer_rtx)
8369 return register_operand (op, mode);
8372 /* Return 1 if this operand is anything other than a hard register. */
8375 non_hard_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8377 return ! (GET_CODE (op) == REG && REGNO (op) < FIRST_PSEUDO_REGISTER);
8380 /* Return 1 if INSN branches forward. Should be using insn_addresses
8381 to avoid walking through all the insns... */
8383 forward_branch_p (rtx insn)
8385 rtx label = JUMP_LABEL (insn);
8392 insn = NEXT_INSN (insn);
8395 return (insn == label);
8398 /* Return 1 if OP is an equality comparison, else return 0. */
8400 eq_neq_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8402 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
8405 /* Return 1 if INSN is in the delay slot of a call instruction. */
8407 jump_in_call_delay (rtx insn)
8410 if (GET_CODE (insn) != JUMP_INSN)
8413 if (PREV_INSN (insn)
8414 && PREV_INSN (PREV_INSN (insn))
8415 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8417 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8419 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8420 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8427 /* Output an unconditional move and branch insn. */
8430 output_parallel_movb (rtx *operands, rtx insn)
8432 int length = get_attr_length (insn);
8434 /* These are the cases in which we win. */
8436 return "mov%I1b,tr %1,%0,%2";
8438 /* None of the following cases win, but they don't lose either. */
8441 if (dbr_sequence_length () == 0)
8443 /* Nothing in the delay slot, fake it by putting the combined
8444 insn (the copy or add) in the delay slot of a bl. */
8445 if (GET_CODE (operands[1]) == CONST_INT)
8446 return "b %2\n\tldi %1,%0";
8448 return "b %2\n\tcopy %1,%0";
8452 /* Something in the delay slot, but we've got a long branch. */
8453 if (GET_CODE (operands[1]) == CONST_INT)
8454 return "ldi %1,%0\n\tb %2";
8456 return "copy %1,%0\n\tb %2";
8460 if (GET_CODE (operands[1]) == CONST_INT)
8461 output_asm_insn ("ldi %1,%0", operands);
8463 output_asm_insn ("copy %1,%0", operands);
8464 return output_lbranch (operands[2], insn, 1);
8467 /* Output an unconditional add and branch insn. */
8470 output_parallel_addb (rtx *operands, rtx insn)
8472 int length = get_attr_length (insn);
8474 /* To make life easy we want operand0 to be the shared input/output
8475 operand and operand1 to be the readonly operand. */
8476 if (operands[0] == operands[1])
8477 operands[1] = operands[2];
8479 /* These are the cases in which we win. */
8481 return "add%I1b,tr %1,%0,%3";
8483 /* None of the following cases win, but they don't lose either. */
8486 if (dbr_sequence_length () == 0)
8487 /* Nothing in the delay slot, fake it by putting the combined
8488 insn (the copy or add) in the delay slot of a bl. */
8489 return "b %3\n\tadd%I1 %1,%0,%0";
8491 /* Something in the delay slot, but we've got a long branch. */
8492 return "add%I1 %1,%0,%0\n\tb %3";
8495 output_asm_insn ("add%I1 %1,%0,%0", operands);
8496 return output_lbranch (operands[3], insn, 1);
8499 /* Return nonzero if INSN (a jump insn) immediately follows a call
8500 to a named function. This is used to avoid filling the delay slot
8501 of the jump since it can usually be eliminated by modifying RP in
8502 the delay slot of the call. */
8505 following_call (rtx insn)
8507 if (! TARGET_JUMP_IN_DELAY)
8510 /* Find the previous real insn, skipping NOTEs. */
8511 insn = PREV_INSN (insn);
8512 while (insn && GET_CODE (insn) == NOTE)
8513 insn = PREV_INSN (insn);
8515 /* Check for CALL_INSNs and millicode calls. */
8517 && ((GET_CODE (insn) == CALL_INSN
8518 && get_attr_type (insn) != TYPE_DYNCALL)
8519 || (GET_CODE (insn) == INSN
8520 && GET_CODE (PATTERN (insn)) != SEQUENCE
8521 && GET_CODE (PATTERN (insn)) != USE
8522 && GET_CODE (PATTERN (insn)) != CLOBBER
8523 && get_attr_type (insn) == TYPE_MILLI)))
8529 /* We use this hook to perform a PA specific optimization which is difficult
8530 to do in earlier passes.
8532 We want the delay slots of branches within jump tables to be filled.
8533 None of the compiler passes at the moment even has the notion that a
8534 PA jump table doesn't contain addresses, but instead contains actual
8537 Because we actually jump into the table, the addresses of each entry
8538 must stay constant in relation to the beginning of the table (which
8539 itself must stay constant relative to the instruction to jump into
8540 it). I don't believe we can guarantee earlier passes of the compiler
8541 will adhere to those rules.
8543 So, late in the compilation process we find all the jump tables, and
8544 expand them into real code -- e.g. each entry in the jump table vector
8545 will get an appropriate label followed by a jump to the final target.
8547 Reorg and the final jump pass can then optimize these branches and
8548 fill their delay slots. We end up with smaller, more efficient code.
8550 The jump instructions within the table are special; we must be able
8551 to identify them during assembly output (if the jumps don't get filled
8552 we need to emit a nop rather than nullifying the delay slot)). We
8553 identify jumps in switch tables by using insns with the attribute
8554 type TYPE_BTABLE_BRANCH.
8556 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8557 insns. This serves two purposes, first it prevents jump.c from
8558 noticing that the last N entries in the table jump to the instruction
8559 immediately after the table and deleting the jumps. Second, those
8560 insns mark where we should emit .begin_brtab and .end_brtab directives
8561 when using GAS (allows for better link time optimizations). */
8568 remove_useless_addtr_insns (1);
8570 if (pa_cpu < PROCESSOR_8000)
8571 pa_combine_instructions ();
8574 /* This is fairly cheap, so always run it if optimizing. */
8575 if (optimize > 0 && !TARGET_BIG_SWITCH)
8577 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8578 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8580 rtx pattern, tmp, location, label;
8581 unsigned int length, i;
8583 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8584 if (GET_CODE (insn) != JUMP_INSN
8585 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8586 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8589 /* Emit marker for the beginning of the branch table. */
8590 emit_insn_before (gen_begin_brtab (), insn);
8592 pattern = PATTERN (insn);
8593 location = PREV_INSN (insn);
8594 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
8596 for (i = 0; i < length; i++)
8598 /* Emit a label before each jump to keep jump.c from
8599 removing this code. */
8600 tmp = gen_label_rtx ();
8601 LABEL_NUSES (tmp) = 1;
8602 emit_label_after (tmp, location);
8603 location = NEXT_INSN (location);
8605 if (GET_CODE (pattern) == ADDR_VEC)
8606 label = XEXP (XVECEXP (pattern, 0, i), 0);
8608 label = XEXP (XVECEXP (pattern, 1, i), 0);
8610 tmp = gen_short_jump (label);
8612 /* Emit the jump itself. */
8613 tmp = emit_jump_insn_after (tmp, location);
8614 JUMP_LABEL (tmp) = label;
8615 LABEL_NUSES (label)++;
8616 location = NEXT_INSN (location);
8618 /* Emit a BARRIER after the jump. */
8619 emit_barrier_after (location);
8620 location = NEXT_INSN (location);
8623 /* Emit marker for the end of the branch table. */
8624 emit_insn_before (gen_end_brtab (), location);
8625 location = NEXT_INSN (location);
8626 emit_barrier_after (location);
8628 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8634 /* Still need brtab marker insns. FIXME: the presence of these
8635 markers disables output of the branch table to readonly memory,
8636 and any alignment directives that might be needed. Possibly,
8637 the begin_brtab insn should be output before the label for the
8638 table. This doesn't matter at the moment since the tables are
8639 always output in the text section. */
8640 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8642 /* Find an ADDR_VEC insn. */
8643 if (GET_CODE (insn) != JUMP_INSN
8644 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8645 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8648 /* Now generate markers for the beginning and end of the
8650 emit_insn_before (gen_begin_brtab (), insn);
8651 emit_insn_after (gen_end_brtab (), insn);
8656 /* The PA has a number of odd instructions which can perform multiple
8657 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8658 it may be profitable to combine two instructions into one instruction
8659 with two outputs. It's not profitable PA2.0 machines because the
8660 two outputs would take two slots in the reorder buffers.
8662 This routine finds instructions which can be combined and combines
8663 them. We only support some of the potential combinations, and we
8664 only try common ways to find suitable instructions.
8666 * addb can add two registers or a register and a small integer
8667 and jump to a nearby (+-8k) location. Normally the jump to the
8668 nearby location is conditional on the result of the add, but by
8669 using the "true" condition we can make the jump unconditional.
8670 Thus addb can perform two independent operations in one insn.
8672 * movb is similar to addb in that it can perform a reg->reg
8673 or small immediate->reg copy and jump to a nearby (+-8k location).
8675 * fmpyadd and fmpysub can perform a FP multiply and either an
8676 FP add or FP sub if the operands of the multiply and add/sub are
8677 independent (there are other minor restrictions). Note both
8678 the fmpy and fadd/fsub can in theory move to better spots according
8679 to data dependencies, but for now we require the fmpy stay at a
8682 * Many of the memory operations can perform pre & post updates
8683 of index registers. GCC's pre/post increment/decrement addressing
8684 is far too simple to take advantage of all the possibilities. This
8685 pass may not be suitable since those insns may not be independent.
8687 * comclr can compare two ints or an int and a register, nullify
8688 the following instruction and zero some other register. This
8689 is more difficult to use as it's harder to find an insn which
8690 will generate a comclr than finding something like an unconditional
8691 branch. (conditional moves & long branches create comclr insns).
8693 * Most arithmetic operations can conditionally skip the next
8694 instruction. They can be viewed as "perform this operation
8695 and conditionally jump to this nearby location" (where nearby
8696 is an insns away). These are difficult to use due to the
8697 branch length restrictions. */
8700 pa_combine_instructions (void)
8704 /* This can get expensive since the basic algorithm is on the
8705 order of O(n^2) (or worse). Only do it for -O2 or higher
8706 levels of optimization. */
8710 /* Walk down the list of insns looking for "anchor" insns which
8711 may be combined with "floating" insns. As the name implies,
8712 "anchor" instructions don't move, while "floating" insns may
8714 new = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
8715 new = make_insn_raw (new);
8717 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
8719 enum attr_pa_combine_type anchor_attr;
8720 enum attr_pa_combine_type floater_attr;
8722 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8723 Also ignore any special USE insns. */
8724 if ((GET_CODE (anchor) != INSN
8725 && GET_CODE (anchor) != JUMP_INSN
8726 && GET_CODE (anchor) != CALL_INSN)
8727 || GET_CODE (PATTERN (anchor)) == USE
8728 || GET_CODE (PATTERN (anchor)) == CLOBBER
8729 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
8730 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
8733 anchor_attr = get_attr_pa_combine_type (anchor);
8734 /* See if anchor is an insn suitable for combination. */
8735 if (anchor_attr == PA_COMBINE_TYPE_FMPY
8736 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
8737 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8738 && ! forward_branch_p (anchor)))
8742 for (floater = PREV_INSN (anchor);
8744 floater = PREV_INSN (floater))
8746 if (GET_CODE (floater) == NOTE
8747 || (GET_CODE (floater) == INSN
8748 && (GET_CODE (PATTERN (floater)) == USE
8749 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8752 /* Anything except a regular INSN will stop our search. */
8753 if (GET_CODE (floater) != INSN
8754 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8755 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8761 /* See if FLOATER is suitable for combination with the
8763 floater_attr = get_attr_pa_combine_type (floater);
8764 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8765 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8766 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8767 && floater_attr == PA_COMBINE_TYPE_FMPY))
8769 /* If ANCHOR and FLOATER can be combined, then we're
8770 done with this pass. */
8771 if (pa_can_combine_p (new, anchor, floater, 0,
8772 SET_DEST (PATTERN (floater)),
8773 XEXP (SET_SRC (PATTERN (floater)), 0),
8774 XEXP (SET_SRC (PATTERN (floater)), 1)))
8778 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8779 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
8781 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
8783 if (pa_can_combine_p (new, anchor, floater, 0,
8784 SET_DEST (PATTERN (floater)),
8785 XEXP (SET_SRC (PATTERN (floater)), 0),
8786 XEXP (SET_SRC (PATTERN (floater)), 1)))
8791 if (pa_can_combine_p (new, anchor, floater, 0,
8792 SET_DEST (PATTERN (floater)),
8793 SET_SRC (PATTERN (floater)),
8794 SET_SRC (PATTERN (floater))))
8800 /* If we didn't find anything on the backwards scan try forwards. */
8802 && (anchor_attr == PA_COMBINE_TYPE_FMPY
8803 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
8805 for (floater = anchor; floater; floater = NEXT_INSN (floater))
8807 if (GET_CODE (floater) == NOTE
8808 || (GET_CODE (floater) == INSN
8809 && (GET_CODE (PATTERN (floater)) == USE
8810 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8814 /* Anything except a regular INSN will stop our search. */
8815 if (GET_CODE (floater) != INSN
8816 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8817 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8823 /* See if FLOATER is suitable for combination with the
8825 floater_attr = get_attr_pa_combine_type (floater);
8826 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8827 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8828 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8829 && floater_attr == PA_COMBINE_TYPE_FMPY))
8831 /* If ANCHOR and FLOATER can be combined, then we're
8832 done with this pass. */
8833 if (pa_can_combine_p (new, anchor, floater, 1,
8834 SET_DEST (PATTERN (floater)),
8835 XEXP (SET_SRC (PATTERN (floater)),
8837 XEXP (SET_SRC (PATTERN (floater)),
8844 /* FLOATER will be nonzero if we found a suitable floating
8845 insn for combination with ANCHOR. */
8847 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8848 || anchor_attr == PA_COMBINE_TYPE_FMPY))
8850 /* Emit the new instruction and delete the old anchor. */
8851 emit_insn_before (gen_rtx_PARALLEL
8853 gen_rtvec (2, PATTERN (anchor),
8854 PATTERN (floater))),
8857 PUT_CODE (anchor, NOTE);
8858 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8859 NOTE_SOURCE_FILE (anchor) = 0;
8861 /* Emit a special USE insn for FLOATER, then delete
8862 the floating insn. */
8863 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8864 delete_insn (floater);
8869 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
8872 /* Emit the new_jump instruction and delete the old anchor. */
8874 = emit_jump_insn_before (gen_rtx_PARALLEL
8876 gen_rtvec (2, PATTERN (anchor),
8877 PATTERN (floater))),
8880 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
8881 PUT_CODE (anchor, NOTE);
8882 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8883 NOTE_SOURCE_FILE (anchor) = 0;
8885 /* Emit a special USE insn for FLOATER, then delete
8886 the floating insn. */
8887 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8888 delete_insn (floater);
8896 pa_can_combine_p (rtx new, rtx anchor, rtx floater, int reversed, rtx dest,
8899 int insn_code_number;
8902 /* Create a PARALLEL with the patterns of ANCHOR and
8903 FLOATER, try to recognize it, then test constraints
8904 for the resulting pattern.
8906 If the pattern doesn't match or the constraints
8907 aren't met keep searching for a suitable floater
8909 XVECEXP (PATTERN (new), 0, 0) = PATTERN (anchor);
8910 XVECEXP (PATTERN (new), 0, 1) = PATTERN (floater);
8911 INSN_CODE (new) = -1;
8912 insn_code_number = recog_memoized (new);
8913 if (insn_code_number < 0
8914 || (extract_insn (new), ! constrain_operands (1)))
8928 /* There's up to three operands to consider. One
8929 output and two inputs.
8931 The output must not be used between FLOATER & ANCHOR
8932 exclusive. The inputs must not be set between
8933 FLOATER and ANCHOR exclusive. */
8935 if (reg_used_between_p (dest, start, end))
8938 if (reg_set_between_p (src1, start, end))
8941 if (reg_set_between_p (src2, start, end))
8944 /* If we get here, then everything is good. */
8948 /* Return nonzero if references for INSN are delayed.
8950 Millicode insns are actually function calls with some special
8951 constraints on arguments and register usage.
8953 Millicode calls always expect their arguments in the integer argument
8954 registers, and always return their result in %r29 (ret1). They
8955 are expected to clobber their arguments, %r1, %r29, and the return
8956 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
8958 This function tells reorg that the references to arguments and
8959 millicode calls do not appear to happen until after the millicode call.
8960 This allows reorg to put insns which set the argument registers into the
8961 delay slot of the millicode call -- thus they act more like traditional
8964 Note we cannot consider side effects of the insn to be delayed because
8965 the branch and link insn will clobber the return pointer. If we happened
8966 to use the return pointer in the delay slot of the call, then we lose.
8968 get_attr_type will try to recognize the given insn, so make sure to
8969 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
8972 insn_refs_are_delayed (rtx insn)
8974 return ((GET_CODE (insn) == INSN
8975 && GET_CODE (PATTERN (insn)) != SEQUENCE
8976 && GET_CODE (PATTERN (insn)) != USE
8977 && GET_CODE (PATTERN (insn)) != CLOBBER
8978 && get_attr_type (insn) == TYPE_MILLI));
8981 /* On the HP-PA the value is found in register(s) 28(-29), unless
8982 the mode is SF or DF. Then the value is returned in fr4 (32).
8984 This must perform the same promotions as PROMOTE_MODE, else
8985 TARGET_PROMOTE_FUNCTION_RETURN will not work correctly.
8987 Small structures must be returned in a PARALLEL on PA64 in order
8988 to match the HP Compiler ABI. */
8991 function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
8993 enum machine_mode valmode;
8995 if (AGGREGATE_TYPE_P (valtype)
8996 || TREE_CODE (valtype) == COMPLEX_TYPE
8997 || TREE_CODE (valtype) == VECTOR_TYPE)
9001 /* Aggregates with a size less than or equal to 128 bits are
9002 returned in GR 28(-29). They are left justified. The pad
9003 bits are undefined. Larger aggregates are returned in
9007 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
9009 for (i = 0; i < ub; i++)
9011 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9012 gen_rtx_REG (DImode, 28 + i),
9017 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9019 else if (int_size_in_bytes (valtype) > UNITS_PER_WORD)
9021 /* Aggregates 5 to 8 bytes in size are returned in general
9022 registers r28-r29 in the same manner as other non
9023 floating-point objects. The data is right-justified and
9024 zero-extended to 64 bits. This is opposite to the normal
9025 justification used on big endian targets and requires
9026 special treatment. */
9027 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9028 gen_rtx_REG (DImode, 28), const0_rtx);
9029 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9033 if ((INTEGRAL_TYPE_P (valtype)
9034 && TYPE_PRECISION (valtype) < BITS_PER_WORD)
9035 || POINTER_TYPE_P (valtype))
9036 valmode = word_mode;
9038 valmode = TYPE_MODE (valtype);
9040 if (TREE_CODE (valtype) == REAL_TYPE
9041 && !AGGREGATE_TYPE_P (valtype)
9042 && TYPE_MODE (valtype) != TFmode
9043 && !TARGET_SOFT_FLOAT)
9044 return gen_rtx_REG (valmode, 32);
9046 return gen_rtx_REG (valmode, 28);
9049 /* Return the location of a parameter that is passed in a register or NULL
9050 if the parameter has any component that is passed in memory.
9052 This is new code and will be pushed to into the net sources after
9055 ??? We might want to restructure this so that it looks more like other
9058 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
9059 int named ATTRIBUTE_UNUSED)
9061 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9068 if (mode == VOIDmode)
9071 arg_size = FUNCTION_ARG_SIZE (mode, type);
9073 /* If this arg would be passed partially or totally on the stack, then
9074 this routine should return zero. pa_arg_partial_bytes will
9075 handle arguments which are split between regs and stack slots if
9076 the ABI mandates split arguments. */
9079 /* The 32-bit ABI does not split arguments. */
9080 if (cum->words + arg_size > max_arg_words)
9086 alignment = cum->words & 1;
9087 if (cum->words + alignment >= max_arg_words)
9091 /* The 32bit ABIs and the 64bit ABIs are rather different,
9092 particularly in their handling of FP registers. We might
9093 be able to cleverly share code between them, but I'm not
9094 going to bother in the hope that splitting them up results
9095 in code that is more easily understood. */
9099 /* Advance the base registers to their current locations.
9101 Remember, gprs grow towards smaller register numbers while
9102 fprs grow to higher register numbers. Also remember that
9103 although FP regs are 32-bit addressable, we pretend that
9104 the registers are 64-bits wide. */
9105 gpr_reg_base = 26 - cum->words;
9106 fpr_reg_base = 32 + cum->words;
9108 /* Arguments wider than one word and small aggregates need special
9112 || (type && (AGGREGATE_TYPE_P (type)
9113 || TREE_CODE (type) == COMPLEX_TYPE
9114 || TREE_CODE (type) == VECTOR_TYPE)))
9116 /* Double-extended precision (80-bit), quad-precision (128-bit)
9117 and aggregates including complex numbers are aligned on
9118 128-bit boundaries. The first eight 64-bit argument slots
9119 are associated one-to-one, with general registers r26
9120 through r19, and also with floating-point registers fr4
9121 through fr11. Arguments larger than one word are always
9122 passed in general registers.
9124 Using a PARALLEL with a word mode register results in left
9125 justified data on a big-endian target. */
9128 int i, offset = 0, ub = arg_size;
9130 /* Align the base register. */
9131 gpr_reg_base -= alignment;
9133 ub = MIN (ub, max_arg_words - cum->words - alignment);
9134 for (i = 0; i < ub; i++)
9136 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9137 gen_rtx_REG (DImode, gpr_reg_base),
9143 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9148 /* If the argument is larger than a word, then we know precisely
9149 which registers we must use. */
9163 /* Structures 5 to 8 bytes in size are passed in the general
9164 registers in the same manner as other non floating-point
9165 objects. The data is right-justified and zero-extended
9166 to 64 bits. This is opposite to the normal justification
9167 used on big endian targets and requires special treatment.
9168 We now define BLOCK_REG_PADDING to pad these objects.
9169 Aggregates, complex and vector types are passed in the same
9170 manner as structures. */
9172 || (type && (AGGREGATE_TYPE_P (type)
9173 || TREE_CODE (type) == COMPLEX_TYPE
9174 || TREE_CODE (type) == VECTOR_TYPE)))
9176 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9177 gen_rtx_REG (DImode, gpr_reg_base),
9179 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9184 /* We have a single word (32 bits). A simple computation
9185 will get us the register #s we need. */
9186 gpr_reg_base = 26 - cum->words;
9187 fpr_reg_base = 32 + 2 * cum->words;
9191 /* Determine if the argument needs to be passed in both general and
9192 floating point registers. */
9193 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9194 /* If we are doing soft-float with portable runtime, then there
9195 is no need to worry about FP regs. */
9196 && !TARGET_SOFT_FLOAT
9197 /* The parameter must be some kind of scalar float, else we just
9198 pass it in integer registers. */
9199 && GET_MODE_CLASS (mode) == MODE_FLOAT
9200 /* The target function must not have a prototype. */
9201 && cum->nargs_prototype <= 0
9202 /* libcalls do not need to pass items in both FP and general
9204 && type != NULL_TREE
9205 /* All this hair applies to "outgoing" args only. This includes
9206 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9208 /* Also pass outgoing floating arguments in both registers in indirect
9209 calls with the 32 bit ABI and the HP assembler since there is no
9210 way to the specify argument locations in static functions. */
9215 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9221 gen_rtx_EXPR_LIST (VOIDmode,
9222 gen_rtx_REG (mode, fpr_reg_base),
9224 gen_rtx_EXPR_LIST (VOIDmode,
9225 gen_rtx_REG (mode, gpr_reg_base),
9230 /* See if we should pass this parameter in a general register. */
9231 if (TARGET_SOFT_FLOAT
9232 /* Indirect calls in the normal 32bit ABI require all arguments
9233 to be passed in general registers. */
9234 || (!TARGET_PORTABLE_RUNTIME
9238 /* If the parameter is not a scalar floating-point parameter,
9239 then it belongs in GPRs. */
9240 || GET_MODE_CLASS (mode) != MODE_FLOAT
9241 /* Structure with single SFmode field belongs in GPR. */
9242 || (type && AGGREGATE_TYPE_P (type)))
9243 retval = gen_rtx_REG (mode, gpr_reg_base);
9245 retval = gen_rtx_REG (mode, fpr_reg_base);
9251 /* If this arg would be passed totally in registers or totally on the stack,
9252 then this routine should return zero. */
9255 pa_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
9256 tree type, bool named ATTRIBUTE_UNUSED)
9258 unsigned int max_arg_words = 8;
9259 unsigned int offset = 0;
9264 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9267 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9268 /* Arg fits fully into registers. */
9270 else if (cum->words + offset >= max_arg_words)
9271 /* Arg fully on the stack. */
9275 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9279 /* A get_unnamed_section callback for switching to the text section.
9281 This function is only used with SOM. Because we don't support
9282 named subspaces, we can only create a new subspace or switch back
9283 to the default text subspace. */
9286 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9288 gcc_assert (TARGET_SOM);
9291 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9293 /* We only want to emit a .nsubspa directive once at the
9294 start of the function. */
9295 cfun->machine->in_nsubspa = 1;
9297 /* Create a new subspace for the text. This provides
9298 better stub placement and one-only functions. */
9300 && DECL_ONE_ONLY (cfun->decl)
9301 && !DECL_WEAK (cfun->decl))
9303 output_section_asm_op ("\t.SPACE $TEXT$\n"
9304 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9305 "ACCESS=44,SORT=24,COMDAT");
9311 /* There isn't a current function or the body of the current
9312 function has been completed. So, we are changing to the
9313 text section to output debugging information. Thus, we
9314 need to forget that we are in the text section so that
9315 varasm.c will call us when text_section is selected again. */
9316 gcc_assert (!cfun || !cfun->machine
9317 || cfun->machine->in_nsubspa == 2);
9320 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9323 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9326 /* A get_unnamed_section callback for switching to comdat data
9327 sections. This function is only used with SOM. */
9330 som_output_comdat_data_section_asm_op (const void *data)
9333 output_section_asm_op (data);
9336 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9339 pa_som_asm_init_sections (void)
9342 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9344 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9345 is not being generated. */
9346 som_readonly_data_section
9347 = get_unnamed_section (0, output_section_asm_op,
9348 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9350 /* When secondary definitions are not supported, SOM makes readonly
9351 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9353 som_one_only_readonly_data_section
9354 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9356 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9357 "ACCESS=0x2c,SORT=16,COMDAT");
9360 /* When secondary definitions are not supported, SOM makes data one-only
9361 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9362 som_one_only_data_section
9363 = get_unnamed_section (SECTION_WRITE,
9364 som_output_comdat_data_section_asm_op,
9365 "\t.SPACE $PRIVATE$\n"
9366 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9367 "ACCESS=31,SORT=24,COMDAT");
9369 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9370 which reference data within the $TEXT$ space (for example constant
9371 strings in the $LIT$ subspace).
9373 The assemblers (GAS and HP as) both have problems with handling
9374 the difference of two symbols which is the other correct way to
9375 reference constant data during PIC code generation.
9377 So, there's no way to reference constant data which is in the
9378 $TEXT$ space during PIC generation. Instead place all constant
9379 data into the $PRIVATE$ subspace (this reduces sharing, but it
9380 works correctly). */
9381 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9383 /* We must not have a reference to an external symbol defined in a
9384 shared library in a readonly section, else the SOM linker will
9387 So, we force exception information into the data section. */
9388 exception_section = data_section;
9391 /* On hpux10, the linker will give an error if we have a reference
9392 in the read-only data section to a symbol defined in a shared
9393 library. Therefore, expressions that might require a reloc can
9394 not be placed in the read-only data section. */
9397 pa_select_section (tree exp, int reloc,
9398 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9400 if (TREE_CODE (exp) == VAR_DECL
9401 && TREE_READONLY (exp)
9402 && !TREE_THIS_VOLATILE (exp)
9403 && DECL_INITIAL (exp)
9404 && (DECL_INITIAL (exp) == error_mark_node
9405 || TREE_CONSTANT (DECL_INITIAL (exp)))
9409 && DECL_ONE_ONLY (exp)
9410 && !DECL_WEAK (exp))
9411 return som_one_only_readonly_data_section;
9413 return readonly_data_section;
9415 else if (CONSTANT_CLASS_P (exp) && !reloc)
9416 return readonly_data_section;
9418 && TREE_CODE (exp) == VAR_DECL
9419 && DECL_ONE_ONLY (exp)
9420 && !DECL_WEAK (exp))
9421 return som_one_only_data_section;
9423 return data_section;
9427 pa_globalize_label (FILE *stream, const char *name)
9429 /* We only handle DATA objects here, functions are globalized in
9430 ASM_DECLARE_FUNCTION_NAME. */
9431 if (! FUNCTION_NAME_P (name))
9433 fputs ("\t.EXPORT ", stream);
9434 assemble_name (stream, name);
9435 fputs (",DATA\n", stream);
9439 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9442 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9443 int incoming ATTRIBUTE_UNUSED)
9445 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9448 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9451 pa_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
9453 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9454 PA64 ABI says that objects larger than 128 bits are returned in memory.
9455 Note, int_size_in_bytes can return -1 if the size of the object is
9456 variable or larger than the maximum value that can be expressed as
9457 a HOST_WIDE_INT. It can also return zero for an empty type. The
9458 simplest way to handle variable and empty types is to pass them in
9459 memory. This avoids problems in defining the boundaries of argument
9460 slots, allocating registers, etc. */
9461 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9462 || int_size_in_bytes (type) <= 0);
9465 /* Structure to hold declaration and name of external symbols that are
9466 emitted by GCC. We generate a vector of these symbols and output them
9467 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9468 This avoids putting out names that are never really used. */
9470 typedef struct extern_symbol GTY(())
9476 /* Define gc'd vector type for extern_symbol. */
9477 DEF_VEC_O(extern_symbol);
9478 DEF_VEC_ALLOC_O(extern_symbol,gc);
9480 /* Vector of extern_symbol pointers. */
9481 static GTY(()) VEC(extern_symbol,gc) *extern_symbols;
9483 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9484 /* Mark DECL (name NAME) as an external reference (assembler output
9485 file FILE). This saves the names to output at the end of the file
9486 if actually referenced. */
9489 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9491 extern_symbol * p = VEC_safe_push (extern_symbol, gc, extern_symbols, NULL);
9493 gcc_assert (file == asm_out_file);
9498 /* Output text required at the end of an assembler file.
9499 This includes deferred plabels and .import directives for
9500 all external symbols that were actually referenced. */
9503 pa_hpux_file_end (void)
9508 if (!NO_DEFERRED_PROFILE_COUNTERS)
9509 output_deferred_profile_counters ();
9511 output_deferred_plabels ();
9513 for (i = 0; VEC_iterate (extern_symbol, extern_symbols, i, p); i++)
9515 tree decl = p->decl;
9517 if (!TREE_ASM_WRITTEN (decl)
9518 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9519 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9522 VEC_free (extern_symbol, gc, extern_symbols);