1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
4 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
25 #include "coretypes.h"
29 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
41 #include "integrate.h"
49 #include "target-def.h"
51 /* Return nonzero if there is a bypass for the output of
52 OUT_INSN and the fp store IN_INSN. */
54 hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
56 enum machine_mode store_mode;
57 enum machine_mode other_mode;
60 if (recog_memoized (in_insn) < 0
61 || get_attr_type (in_insn) != TYPE_FPSTORE
62 || recog_memoized (out_insn) < 0)
65 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
67 set = single_set (out_insn);
71 other_mode = GET_MODE (SET_SRC (set));
73 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
77 #ifndef DO_FRAME_NOTES
78 #ifdef INCOMING_RETURN_ADDR_RTX
79 #define DO_FRAME_NOTES 1
81 #define DO_FRAME_NOTES 0
85 static void copy_reg_pointer (rtx, rtx);
86 static void fix_range (const char *);
87 static bool pa_handle_option (size_t, const char *, int);
88 static int hppa_address_cost (rtx);
89 static bool hppa_rtx_costs (rtx, int, int, int *);
90 static inline rtx force_mode (enum machine_mode, rtx);
91 static void pa_reorg (void);
92 static void pa_combine_instructions (void);
93 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
94 static int forward_branch_p (rtx);
95 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
96 static int compute_movmem_length (rtx);
97 static int compute_clrmem_length (rtx);
98 static bool pa_assemble_integer (rtx, unsigned int, int);
99 static void remove_useless_addtr_insns (int);
100 static void store_reg (int, HOST_WIDE_INT, int);
101 static void store_reg_modify (int, int, HOST_WIDE_INT);
102 static void load_reg (int, HOST_WIDE_INT, int);
103 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
104 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
105 static void update_total_code_bytes (int);
106 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
107 static int pa_adjust_cost (rtx, rtx, rtx, int);
108 static int pa_adjust_priority (rtx, int);
109 static int pa_issue_rate (void);
110 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
111 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
113 static void pa_encode_section_info (tree, rtx, int);
114 static const char *pa_strip_name_encoding (const char *);
115 static bool pa_function_ok_for_sibcall (tree, tree);
116 static void pa_globalize_label (FILE *, const char *)
118 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
119 HOST_WIDE_INT, tree);
120 #if !defined(USE_COLLECT2)
121 static void pa_asm_out_constructor (rtx, int);
122 static void pa_asm_out_destructor (rtx, int);
124 static void pa_init_builtins (void);
125 static rtx hppa_builtin_saveregs (void);
126 static tree hppa_gimplify_va_arg_expr (tree, tree, tree *, tree *);
127 static bool pa_scalar_mode_supported_p (enum machine_mode);
128 static bool pa_commutative_p (rtx x, int outer_code);
129 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
130 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
131 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
132 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
133 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
134 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
135 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
136 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
137 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
138 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
139 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
140 static void output_deferred_plabels (void);
141 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
142 #ifdef ASM_OUTPUT_EXTERNAL_REAL
143 static void pa_hpux_file_end (void);
145 #ifdef HPUX_LONG_DOUBLE_LIBRARY
146 static void pa_hpux_init_libfuncs (void);
148 static rtx pa_struct_value_rtx (tree, int);
149 static bool pa_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
151 static int pa_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
153 static struct machine_function * pa_init_machine_status (void);
154 static enum reg_class pa_secondary_reload (bool, rtx, enum reg_class,
156 secondary_reload_info *);
159 /* The following extra sections are only used for SOM. */
160 static GTY(()) section *som_readonly_data_section;
161 static GTY(()) section *som_one_only_readonly_data_section;
162 static GTY(()) section *som_one_only_data_section;
164 /* Save the operands last given to a compare for use when we
165 generate a scc or bcc insn. */
166 rtx hppa_compare_op0, hppa_compare_op1;
167 enum cmp_type hppa_branch_type;
169 /* Which cpu we are scheduling for. */
170 enum processor_type pa_cpu = TARGET_SCHED_DEFAULT;
172 /* The UNIX standard to use for predefines and linking. */
173 int flag_pa_unix = TARGET_HPUX_11_11 ? 1998 : TARGET_HPUX_10_10 ? 1995 : 1993;
175 /* Counts for the number of callee-saved general and floating point
176 registers which were saved by the current function's prologue. */
177 static int gr_saved, fr_saved;
179 static rtx find_addr_reg (rtx);
181 /* Keep track of the number of bytes we have output in the CODE subspace
182 during this compilation so we'll know when to emit inline long-calls. */
183 unsigned long total_code_bytes;
185 /* The last address of the previous function plus the number of bytes in
186 associated thunks that have been output. This is used to determine if
187 a thunk can use an IA-relative branch to reach its target function. */
188 static int last_address;
190 /* Variables to handle plabels that we discover are necessary at assembly
191 output time. They are output after the current function. */
192 struct deferred_plabel GTY(())
197 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
199 static size_t n_deferred_plabels = 0;
202 /* Initialize the GCC target structure. */
204 #undef TARGET_ASM_ALIGNED_HI_OP
205 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
206 #undef TARGET_ASM_ALIGNED_SI_OP
207 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
208 #undef TARGET_ASM_ALIGNED_DI_OP
209 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
210 #undef TARGET_ASM_UNALIGNED_HI_OP
211 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
212 #undef TARGET_ASM_UNALIGNED_SI_OP
213 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
214 #undef TARGET_ASM_UNALIGNED_DI_OP
215 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
216 #undef TARGET_ASM_INTEGER
217 #define TARGET_ASM_INTEGER pa_assemble_integer
219 #undef TARGET_ASM_FUNCTION_PROLOGUE
220 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
221 #undef TARGET_ASM_FUNCTION_EPILOGUE
222 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
224 #undef TARGET_SCHED_ADJUST_COST
225 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
226 #undef TARGET_SCHED_ADJUST_PRIORITY
227 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
228 #undef TARGET_SCHED_ISSUE_RATE
229 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
231 #undef TARGET_ENCODE_SECTION_INFO
232 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
233 #undef TARGET_STRIP_NAME_ENCODING
234 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
236 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
237 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
239 #undef TARGET_COMMUTATIVE_P
240 #define TARGET_COMMUTATIVE_P pa_commutative_p
242 #undef TARGET_ASM_OUTPUT_MI_THUNK
243 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
244 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
245 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
247 #undef TARGET_ASM_FILE_END
248 #ifdef ASM_OUTPUT_EXTERNAL_REAL
249 #define TARGET_ASM_FILE_END pa_hpux_file_end
251 #define TARGET_ASM_FILE_END output_deferred_plabels
254 #if !defined(USE_COLLECT2)
255 #undef TARGET_ASM_CONSTRUCTOR
256 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
257 #undef TARGET_ASM_DESTRUCTOR
258 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
261 #undef TARGET_DEFAULT_TARGET_FLAGS
262 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
263 #undef TARGET_HANDLE_OPTION
264 #define TARGET_HANDLE_OPTION pa_handle_option
266 #undef TARGET_INIT_BUILTINS
267 #define TARGET_INIT_BUILTINS pa_init_builtins
269 #undef TARGET_RTX_COSTS
270 #define TARGET_RTX_COSTS hppa_rtx_costs
271 #undef TARGET_ADDRESS_COST
272 #define TARGET_ADDRESS_COST hppa_address_cost
274 #undef TARGET_MACHINE_DEPENDENT_REORG
275 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
277 #ifdef HPUX_LONG_DOUBLE_LIBRARY
278 #undef TARGET_INIT_LIBFUNCS
279 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
282 #undef TARGET_PROMOTE_FUNCTION_RETURN
283 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
284 #undef TARGET_PROMOTE_PROTOTYPES
285 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
287 #undef TARGET_STRUCT_VALUE_RTX
288 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
289 #undef TARGET_RETURN_IN_MEMORY
290 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
291 #undef TARGET_MUST_PASS_IN_STACK
292 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
293 #undef TARGET_PASS_BY_REFERENCE
294 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
295 #undef TARGET_CALLEE_COPIES
296 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
297 #undef TARGET_ARG_PARTIAL_BYTES
298 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
300 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
301 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
302 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
303 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
305 #undef TARGET_SCALAR_MODE_SUPPORTED_P
306 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
308 #undef TARGET_CANNOT_FORCE_CONST_MEM
309 #define TARGET_CANNOT_FORCE_CONST_MEM pa_tls_referenced_p
311 #undef TARGET_SECONDARY_RELOAD
312 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
314 struct gcc_target targetm = TARGET_INITIALIZER;
316 /* Parse the -mfixed-range= option string. */
319 fix_range (const char *const_str)
322 char *str, *dash, *comma;
324 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
325 REG2 are either register names or register numbers. The effect
326 of this option is to mark the registers in the range from REG1 to
327 REG2 as ``fixed'' so they won't be used by the compiler. This is
328 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
330 i = strlen (const_str);
331 str = (char *) alloca (i + 1);
332 memcpy (str, const_str, i + 1);
336 dash = strchr (str, '-');
339 warning (0, "value of -mfixed-range must have form REG1-REG2");
344 comma = strchr (dash + 1, ',');
348 first = decode_reg_name (str);
351 warning (0, "unknown register name: %s", str);
355 last = decode_reg_name (dash + 1);
358 warning (0, "unknown register name: %s", dash + 1);
366 warning (0, "%s-%s is an empty range", str, dash + 1);
370 for (i = first; i <= last; ++i)
371 fixed_regs[i] = call_used_regs[i] = 1;
380 /* Check if all floating point registers have been fixed. */
381 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
386 target_flags |= MASK_DISABLE_FPREGS;
389 /* Implement TARGET_HANDLE_OPTION. */
392 pa_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
397 case OPT_mpa_risc_1_0:
399 target_flags &= ~(MASK_PA_11 | MASK_PA_20);
403 case OPT_mpa_risc_1_1:
405 target_flags &= ~MASK_PA_20;
406 target_flags |= MASK_PA_11;
409 case OPT_mpa_risc_2_0:
411 target_flags |= MASK_PA_11 | MASK_PA_20;
415 if (strcmp (arg, "8000") == 0)
416 pa_cpu = PROCESSOR_8000;
417 else if (strcmp (arg, "7100") == 0)
418 pa_cpu = PROCESSOR_7100;
419 else if (strcmp (arg, "700") == 0)
420 pa_cpu = PROCESSOR_700;
421 else if (strcmp (arg, "7100LC") == 0)
422 pa_cpu = PROCESSOR_7100LC;
423 else if (strcmp (arg, "7200") == 0)
424 pa_cpu = PROCESSOR_7200;
425 else if (strcmp (arg, "7300") == 0)
426 pa_cpu = PROCESSOR_7300;
431 case OPT_mfixed_range_:
441 #if TARGET_HPUX_10_10
447 #if TARGET_HPUX_11_11
459 override_options (void)
461 /* Unconditional branches in the delay slot are not compatible with dwarf2
462 call frame information. There is no benefit in using this optimization
463 on PA8000 and later processors. */
464 if (pa_cpu >= PROCESSOR_8000
465 || (! USING_SJLJ_EXCEPTIONS && flag_exceptions)
466 || flag_unwind_tables)
467 target_flags &= ~MASK_JUMP_IN_DELAY;
469 if (flag_pic && TARGET_PORTABLE_RUNTIME)
471 warning (0, "PIC code generation is not supported in the portable runtime model");
474 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
476 warning (0, "PIC code generation is not compatible with fast indirect calls");
479 if (! TARGET_GAS && write_symbols != NO_DEBUG)
481 warning (0, "-g is only supported when using GAS on this processor,");
482 warning (0, "-g option disabled");
483 write_symbols = NO_DEBUG;
486 /* We only support the "big PIC" model now. And we always generate PIC
487 code when in 64bit mode. */
488 if (flag_pic == 1 || TARGET_64BIT)
491 /* We can't guarantee that .dword is available for 32-bit targets. */
492 if (UNITS_PER_WORD == 4)
493 targetm.asm_out.aligned_op.di = NULL;
495 /* The unaligned ops are only available when using GAS. */
498 targetm.asm_out.unaligned_op.hi = NULL;
499 targetm.asm_out.unaligned_op.si = NULL;
500 targetm.asm_out.unaligned_op.di = NULL;
503 init_machine_status = pa_init_machine_status;
507 pa_init_builtins (void)
509 #ifdef DONT_HAVE_FPUTC_UNLOCKED
510 built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] =
511 built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
512 implicit_built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED]
513 = implicit_built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
517 /* Function to init struct machine_function.
518 This will be called, via a pointer variable,
519 from push_function_context. */
521 static struct machine_function *
522 pa_init_machine_status (void)
524 return ggc_alloc_cleared (sizeof (machine_function));
527 /* If FROM is a probable pointer register, mark TO as a probable
528 pointer register with the same pointer alignment as FROM. */
531 copy_reg_pointer (rtx to, rtx from)
533 if (REG_POINTER (from))
534 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
537 /* Return 1 if X contains a symbolic expression. We know these
538 expressions will have one of a few well defined forms, so
539 we need only check those forms. */
541 symbolic_expression_p (rtx x)
544 /* Strip off any HIGH. */
545 if (GET_CODE (x) == HIGH)
548 return (symbolic_operand (x, VOIDmode));
551 /* Accept any constant that can be moved in one instruction into a
554 cint_ok_for_move (HOST_WIDE_INT intval)
556 /* OK if ldo, ldil, or zdepi, can be used. */
557 return (CONST_OK_FOR_LETTER_P (intval, 'J')
558 || CONST_OK_FOR_LETTER_P (intval, 'N')
559 || CONST_OK_FOR_LETTER_P (intval, 'K'));
562 /* Return truth value of whether OP can be used as an operand in a
565 adddi3_operand (rtx op, enum machine_mode mode)
567 return (register_operand (op, mode)
568 || (GET_CODE (op) == CONST_INT
569 && (TARGET_64BIT ? INT_14_BITS (op) : INT_11_BITS (op))));
572 /* True iff zdepi can be used to generate this CONST_INT.
573 zdepi first sign extends a 5 bit signed number to a given field
574 length, then places this field anywhere in a zero. */
576 zdepi_cint_p (unsigned HOST_WIDE_INT x)
578 unsigned HOST_WIDE_INT lsb_mask, t;
580 /* This might not be obvious, but it's at least fast.
581 This function is critical; we don't have the time loops would take. */
583 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
584 /* Return true iff t is a power of two. */
585 return ((t & (t - 1)) == 0);
588 /* True iff depi or extru can be used to compute (reg & mask).
589 Accept bit pattern like these:
594 and_mask_p (unsigned HOST_WIDE_INT mask)
597 mask += mask & -mask;
598 return (mask & (mask - 1)) == 0;
601 /* True iff depi can be used to compute (reg | MASK). */
603 ior_mask_p (unsigned HOST_WIDE_INT mask)
605 mask += mask & -mask;
606 return (mask & (mask - 1)) == 0;
609 /* Legitimize PIC addresses. If the address is already
610 position-independent, we return ORIG. Newly generated
611 position-independent addresses go to REG. If we need more
612 than one register, we lose. */
615 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
619 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
621 /* Labels need special handling. */
622 if (pic_label_operand (orig, mode))
624 /* We do not want to go through the movXX expanders here since that
625 would create recursion.
627 Nor do we really want to call a generator for a named pattern
628 since that requires multiple patterns if we want to support
631 So instead we just emit the raw set, which avoids the movXX
632 expanders completely. */
633 mark_reg_pointer (reg, BITS_PER_UNIT);
634 emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
635 current_function_uses_pic_offset_table = 1;
638 if (GET_CODE (orig) == SYMBOL_REF)
644 /* Before reload, allocate a temporary register for the intermediate
645 result. This allows the sequence to be deleted when the final
646 result is unused and the insns are trivially dead. */
647 tmp_reg = ((reload_in_progress || reload_completed)
648 ? reg : gen_reg_rtx (Pmode));
650 emit_move_insn (tmp_reg,
651 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
652 gen_rtx_HIGH (word_mode, orig)));
654 = gen_const_mem (Pmode,
655 gen_rtx_LO_SUM (Pmode, tmp_reg,
656 gen_rtx_UNSPEC (Pmode,
660 current_function_uses_pic_offset_table = 1;
661 mark_reg_pointer (reg, BITS_PER_UNIT);
662 insn = emit_move_insn (reg, pic_ref);
664 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
665 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig, REG_NOTES (insn));
669 else if (GET_CODE (orig) == CONST)
673 if (GET_CODE (XEXP (orig, 0)) == PLUS
674 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
678 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
680 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
681 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
682 base == reg ? 0 : reg);
684 if (GET_CODE (orig) == CONST_INT)
686 if (INT_14_BITS (orig))
687 return plus_constant (base, INTVAL (orig));
688 orig = force_reg (Pmode, orig);
690 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
691 /* Likewise, should we set special REG_NOTEs here? */
697 static GTY(()) rtx gen_tls_tga;
700 gen_tls_get_addr (void)
703 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
708 hppa_tls_call (rtx arg)
712 ret = gen_reg_rtx (Pmode);
713 emit_library_call_value (gen_tls_get_addr (), ret,
714 LCT_CONST, Pmode, 1, arg, Pmode);
720 legitimize_tls_address (rtx addr)
722 rtx ret, insn, tmp, t1, t2, tp;
723 enum tls_model model = SYMBOL_REF_TLS_MODEL (addr);
727 case TLS_MODEL_GLOBAL_DYNAMIC:
728 tmp = gen_reg_rtx (Pmode);
729 emit_insn (gen_tgd_load (tmp, addr));
730 ret = hppa_tls_call (tmp);
733 case TLS_MODEL_LOCAL_DYNAMIC:
734 ret = gen_reg_rtx (Pmode);
735 tmp = gen_reg_rtx (Pmode);
737 emit_insn (gen_tld_load (tmp, addr));
738 t1 = hppa_tls_call (tmp);
741 t2 = gen_reg_rtx (Pmode);
742 emit_libcall_block (insn, t2, t1,
743 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
745 emit_insn (gen_tld_offset_load (ret, addr, t2));
748 case TLS_MODEL_INITIAL_EXEC:
749 tp = gen_reg_rtx (Pmode);
750 tmp = gen_reg_rtx (Pmode);
751 ret = gen_reg_rtx (Pmode);
752 emit_insn (gen_tp_load (tp));
753 emit_insn (gen_tie_load (tmp, addr));
754 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
757 case TLS_MODEL_LOCAL_EXEC:
758 tp = gen_reg_rtx (Pmode);
759 ret = gen_reg_rtx (Pmode);
760 emit_insn (gen_tp_load (tp));
761 emit_insn (gen_tle_load (ret, addr, tp));
771 /* Try machine-dependent ways of modifying an illegitimate address
772 to be legitimate. If we find one, return the new, valid address.
773 This macro is used in only one place: `memory_address' in explow.c.
775 OLDX is the address as it was before break_out_memory_refs was called.
776 In some cases it is useful to look at this to decide what needs to be done.
778 MODE and WIN are passed so that this macro can use
779 GO_IF_LEGITIMATE_ADDRESS.
781 It is always safe for this macro to do nothing. It exists to recognize
782 opportunities to optimize the output.
784 For the PA, transform:
786 memory(X + <large int>)
790 if (<large int> & mask) >= 16
791 Y = (<large int> & ~mask) + mask + 1 Round up.
793 Y = (<large int> & ~mask) Round down.
795 memory (Z + (<large int> - Y));
797 This is for CSE to find several similar references, and only use one Z.
799 X can either be a SYMBOL_REF or REG, but because combine cannot
800 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
801 D will not fit in 14 bits.
803 MODE_FLOAT references allow displacements which fit in 5 bits, so use
806 MODE_INT references allow displacements which fit in 14 bits, so use
809 This relies on the fact that most mode MODE_FLOAT references will use FP
810 registers and most mode MODE_INT references will use integer registers.
811 (In the rare case of an FP register used in an integer MODE, we depend
812 on secondary reloads to clean things up.)
815 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
816 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
817 addressing modes to be used).
819 Put X and Z into registers. Then put the entire expression into
823 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
824 enum machine_mode mode)
828 /* We need to canonicalize the order of operands in unscaled indexed
829 addresses since the code that checks if an address is valid doesn't
830 always try both orders. */
831 if (!TARGET_NO_SPACE_REGS
832 && GET_CODE (x) == PLUS
833 && GET_MODE (x) == Pmode
834 && REG_P (XEXP (x, 0))
835 && REG_P (XEXP (x, 1))
836 && REG_POINTER (XEXP (x, 0))
837 && !REG_POINTER (XEXP (x, 1)))
838 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
840 if (PA_SYMBOL_REF_TLS_P (x))
841 return legitimize_tls_address (x);
843 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
845 /* Strip off CONST. */
846 if (GET_CODE (x) == CONST)
849 /* Special case. Get the SYMBOL_REF into a register and use indexing.
850 That should always be safe. */
851 if (GET_CODE (x) == PLUS
852 && GET_CODE (XEXP (x, 0)) == REG
853 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
855 rtx reg = force_reg (Pmode, XEXP (x, 1));
856 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
859 /* Note we must reject symbols which represent function addresses
860 since the assembler/linker can't handle arithmetic on plabels. */
861 if (GET_CODE (x) == PLUS
862 && GET_CODE (XEXP (x, 1)) == CONST_INT
863 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
864 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
865 || GET_CODE (XEXP (x, 0)) == REG))
867 rtx int_part, ptr_reg;
869 int offset = INTVAL (XEXP (x, 1));
872 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
873 ? (TARGET_PA_20 ? 0x3fff : 0x1f) : 0x3fff);
875 /* Choose which way to round the offset. Round up if we
876 are >= halfway to the next boundary. */
877 if ((offset & mask) >= ((mask + 1) / 2))
878 newoffset = (offset & ~ mask) + mask + 1;
880 newoffset = (offset & ~ mask);
882 /* If the newoffset will not fit in 14 bits (ldo), then
883 handling this would take 4 or 5 instructions (2 to load
884 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
885 add the new offset and the SYMBOL_REF.) Combine can
886 not handle 4->2 or 5->2 combinations, so do not create
888 if (! VAL_14_BITS_P (newoffset)
889 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
891 rtx const_part = plus_constant (XEXP (x, 0), newoffset);
894 gen_rtx_HIGH (Pmode, const_part));
897 gen_rtx_LO_SUM (Pmode,
898 tmp_reg, const_part));
902 if (! VAL_14_BITS_P (newoffset))
903 int_part = force_reg (Pmode, GEN_INT (newoffset));
905 int_part = GEN_INT (newoffset);
907 ptr_reg = force_reg (Pmode,
909 force_reg (Pmode, XEXP (x, 0)),
912 return plus_constant (ptr_reg, offset - newoffset);
915 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
917 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
918 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
919 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
920 && (OBJECT_P (XEXP (x, 1))
921 || GET_CODE (XEXP (x, 1)) == SUBREG)
922 && GET_CODE (XEXP (x, 1)) != CONST)
924 int val = INTVAL (XEXP (XEXP (x, 0), 1));
928 if (GET_CODE (reg1) != REG)
929 reg1 = force_reg (Pmode, force_operand (reg1, 0));
931 reg2 = XEXP (XEXP (x, 0), 0);
932 if (GET_CODE (reg2) != REG)
933 reg2 = force_reg (Pmode, force_operand (reg2, 0));
935 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
942 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
944 Only do so for floating point modes since this is more speculative
945 and we lose if it's an integer store. */
946 if (GET_CODE (x) == PLUS
947 && GET_CODE (XEXP (x, 0)) == PLUS
948 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
949 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
950 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
951 && (mode == SFmode || mode == DFmode))
954 /* First, try and figure out what to use as a base register. */
955 rtx reg1, reg2, base, idx, orig_base;
957 reg1 = XEXP (XEXP (x, 0), 1);
962 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
963 then emit_move_sequence will turn on REG_POINTER so we'll know
964 it's a base register below. */
965 if (GET_CODE (reg1) != REG)
966 reg1 = force_reg (Pmode, force_operand (reg1, 0));
968 if (GET_CODE (reg2) != REG)
969 reg2 = force_reg (Pmode, force_operand (reg2, 0));
971 /* Figure out what the base and index are. */
973 if (GET_CODE (reg1) == REG
974 && REG_POINTER (reg1))
977 orig_base = XEXP (XEXP (x, 0), 1);
978 idx = gen_rtx_PLUS (Pmode,
980 XEXP (XEXP (XEXP (x, 0), 0), 0),
981 XEXP (XEXP (XEXP (x, 0), 0), 1)),
984 else if (GET_CODE (reg2) == REG
985 && REG_POINTER (reg2))
988 orig_base = XEXP (x, 1);
995 /* If the index adds a large constant, try to scale the
996 constant so that it can be loaded with only one insn. */
997 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
998 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
999 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1000 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1002 /* Divide the CONST_INT by the scale factor, then add it to A. */
1003 int val = INTVAL (XEXP (idx, 1));
1005 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1006 reg1 = XEXP (XEXP (idx, 0), 0);
1007 if (GET_CODE (reg1) != REG)
1008 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1010 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1012 /* We can now generate a simple scaled indexed address. */
1015 (Pmode, gen_rtx_PLUS (Pmode,
1016 gen_rtx_MULT (Pmode, reg1,
1017 XEXP (XEXP (idx, 0), 1)),
1021 /* If B + C is still a valid base register, then add them. */
1022 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1023 && INTVAL (XEXP (idx, 1)) <= 4096
1024 && INTVAL (XEXP (idx, 1)) >= -4096)
1026 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1029 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1031 reg2 = XEXP (XEXP (idx, 0), 0);
1032 if (GET_CODE (reg2) != CONST_INT)
1033 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1035 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1036 gen_rtx_MULT (Pmode,
1042 /* Get the index into a register, then add the base + index and
1043 return a register holding the result. */
1045 /* First get A into a register. */
1046 reg1 = XEXP (XEXP (idx, 0), 0);
1047 if (GET_CODE (reg1) != REG)
1048 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1050 /* And get B into a register. */
1051 reg2 = XEXP (idx, 1);
1052 if (GET_CODE (reg2) != REG)
1053 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1055 reg1 = force_reg (Pmode,
1056 gen_rtx_PLUS (Pmode,
1057 gen_rtx_MULT (Pmode, reg1,
1058 XEXP (XEXP (idx, 0), 1)),
1061 /* Add the result to our base register and return. */
1062 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1066 /* Uh-oh. We might have an address for x[n-100000]. This needs
1067 special handling to avoid creating an indexed memory address
1068 with x-100000 as the base.
1070 If the constant part is small enough, then it's still safe because
1071 there is a guard page at the beginning and end of the data segment.
1073 Scaled references are common enough that we want to try and rearrange the
1074 terms so that we can use indexing for these addresses too. Only
1075 do the optimization for floatint point modes. */
1077 if (GET_CODE (x) == PLUS
1078 && symbolic_expression_p (XEXP (x, 1)))
1080 /* Ugly. We modify things here so that the address offset specified
1081 by the index expression is computed first, then added to x to form
1082 the entire address. */
1084 rtx regx1, regx2, regy1, regy2, y;
1086 /* Strip off any CONST. */
1088 if (GET_CODE (y) == CONST)
1091 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1093 /* See if this looks like
1094 (plus (mult (reg) (shadd_const))
1095 (const (plus (symbol_ref) (const_int))))
1097 Where const_int is small. In that case the const
1098 expression is a valid pointer for indexing.
1100 If const_int is big, but can be divided evenly by shadd_const
1101 and added to (reg). This allows more scaled indexed addresses. */
1102 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1103 && GET_CODE (XEXP (x, 0)) == MULT
1104 && GET_CODE (XEXP (y, 1)) == CONST_INT
1105 && INTVAL (XEXP (y, 1)) >= -4096
1106 && INTVAL (XEXP (y, 1)) <= 4095
1107 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1108 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1110 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1114 if (GET_CODE (reg1) != REG)
1115 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1117 reg2 = XEXP (XEXP (x, 0), 0);
1118 if (GET_CODE (reg2) != REG)
1119 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1121 return force_reg (Pmode,
1122 gen_rtx_PLUS (Pmode,
1123 gen_rtx_MULT (Pmode,
1128 else if ((mode == DFmode || mode == SFmode)
1129 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1130 && GET_CODE (XEXP (x, 0)) == MULT
1131 && GET_CODE (XEXP (y, 1)) == CONST_INT
1132 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1133 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1134 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1137 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1138 / INTVAL (XEXP (XEXP (x, 0), 1))));
1139 regx2 = XEXP (XEXP (x, 0), 0);
1140 if (GET_CODE (regx2) != REG)
1141 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1142 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1146 gen_rtx_PLUS (Pmode,
1147 gen_rtx_MULT (Pmode, regx2,
1148 XEXP (XEXP (x, 0), 1)),
1149 force_reg (Pmode, XEXP (y, 0))));
1151 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1152 && INTVAL (XEXP (y, 1)) >= -4096
1153 && INTVAL (XEXP (y, 1)) <= 4095)
1155 /* This is safe because of the guard page at the
1156 beginning and end of the data space. Just
1157 return the original address. */
1162 /* Doesn't look like one we can optimize. */
1163 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1164 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1165 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1166 regx1 = force_reg (Pmode,
1167 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1169 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1177 /* For the HPPA, REG and REG+CONST is cost 0
1178 and addresses involving symbolic constants are cost 2.
1180 PIC addresses are very expensive.
1182 It is no coincidence that this has the same structure
1183 as GO_IF_LEGITIMATE_ADDRESS. */
1186 hppa_address_cost (rtx X)
1188 switch (GET_CODE (X))
1201 /* Compute a (partial) cost for rtx X. Return true if the complete
1202 cost has been computed, and false if subexpressions should be
1203 scanned. In either case, *TOTAL contains the cost result. */
1206 hppa_rtx_costs (rtx x, int code, int outer_code, int *total)
1211 if (INTVAL (x) == 0)
1213 else if (INT_14_BITS (x))
1230 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1231 && outer_code != SET)
1238 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1239 *total = COSTS_N_INSNS (3);
1240 else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1241 *total = COSTS_N_INSNS (8);
1243 *total = COSTS_N_INSNS (20);
1247 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1249 *total = COSTS_N_INSNS (14);
1257 *total = COSTS_N_INSNS (60);
1260 case PLUS: /* this includes shNadd insns */
1262 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1263 *total = COSTS_N_INSNS (3);
1265 *total = COSTS_N_INSNS (1);
1271 *total = COSTS_N_INSNS (1);
1279 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1280 new rtx with the correct mode. */
1282 force_mode (enum machine_mode mode, rtx orig)
1284 if (mode == GET_MODE (orig))
1287 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1289 return gen_rtx_REG (mode, REGNO (orig));
1292 /* Return 1 if *X is a thread-local symbol. */
1295 pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1297 return PA_SYMBOL_REF_TLS_P (*x);
1300 /* Return 1 if X contains a thread-local symbol. */
1303 pa_tls_referenced_p (rtx x)
1305 if (!TARGET_HAVE_TLS)
1308 return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0);
1311 /* Emit insns to move operands[1] into operands[0].
1313 Return 1 if we have written out everything that needs to be done to
1314 do the move. Otherwise, return 0 and the caller will emit the move
1317 Note SCRATCH_REG may not be in the proper mode depending on how it
1318 will be used. This routine is responsible for creating a new copy
1319 of SCRATCH_REG in the proper mode. */
1322 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1324 register rtx operand0 = operands[0];
1325 register rtx operand1 = operands[1];
1328 /* We can only handle indexed addresses in the destination operand
1329 of floating point stores. Thus, we need to break out indexed
1330 addresses from the destination operand. */
1331 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1333 /* This is only safe up to the beginning of life analysis. */
1334 gcc_assert (!no_new_pseudos);
1336 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1337 operand0 = replace_equiv_address (operand0, tem);
1340 /* On targets with non-equivalent space registers, break out unscaled
1341 indexed addresses from the source operand before the final CSE.
1342 We have to do this because the REG_POINTER flag is not correctly
1343 carried through various optimization passes and CSE may substitute
1344 a pseudo without the pointer set for one with the pointer set. As
1345 a result, we loose various opportunities to create insns with
1346 unscaled indexed addresses. */
1347 if (!TARGET_NO_SPACE_REGS
1348 && !cse_not_expected
1349 && GET_CODE (operand1) == MEM
1350 && GET_CODE (XEXP (operand1, 0)) == PLUS
1351 && REG_P (XEXP (XEXP (operand1, 0), 0))
1352 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1354 = replace_equiv_address (operand1,
1355 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1358 && reload_in_progress && GET_CODE (operand0) == REG
1359 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1360 operand0 = reg_equiv_mem[REGNO (operand0)];
1361 else if (scratch_reg
1362 && reload_in_progress && GET_CODE (operand0) == SUBREG
1363 && GET_CODE (SUBREG_REG (operand0)) == REG
1364 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1366 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1367 the code which tracks sets/uses for delete_output_reload. */
1368 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1369 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
1370 SUBREG_BYTE (operand0));
1371 operand0 = alter_subreg (&temp);
1375 && reload_in_progress && GET_CODE (operand1) == REG
1376 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1377 operand1 = reg_equiv_mem[REGNO (operand1)];
1378 else if (scratch_reg
1379 && reload_in_progress && GET_CODE (operand1) == SUBREG
1380 && GET_CODE (SUBREG_REG (operand1)) == REG
1381 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1383 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1384 the code which tracks sets/uses for delete_output_reload. */
1385 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1386 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
1387 SUBREG_BYTE (operand1));
1388 operand1 = alter_subreg (&temp);
1391 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1392 && ((tem = find_replacement (&XEXP (operand0, 0)))
1393 != XEXP (operand0, 0)))
1394 operand0 = replace_equiv_address (operand0, tem);
1396 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1397 && ((tem = find_replacement (&XEXP (operand1, 0)))
1398 != XEXP (operand1, 0)))
1399 operand1 = replace_equiv_address (operand1, tem);
1401 /* Handle secondary reloads for loads/stores of FP registers from
1402 REG+D addresses where D does not fit in 5 or 14 bits, including
1403 (subreg (mem (addr))) cases. */
1405 && fp_reg_operand (operand0, mode)
1406 && ((GET_CODE (operand1) == MEM
1407 && !memory_address_p ((GET_MODE_SIZE (mode) == 4 ? SFmode : DFmode),
1408 XEXP (operand1, 0)))
1409 || ((GET_CODE (operand1) == SUBREG
1410 && GET_CODE (XEXP (operand1, 0)) == MEM
1411 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1413 XEXP (XEXP (operand1, 0), 0))))))
1415 if (GET_CODE (operand1) == SUBREG)
1416 operand1 = XEXP (operand1, 0);
1418 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1419 it in WORD_MODE regardless of what mode it was originally given
1421 scratch_reg = force_mode (word_mode, scratch_reg);
1423 /* D might not fit in 14 bits either; for such cases load D into
1425 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
1427 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1428 emit_move_insn (scratch_reg,
1429 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1431 XEXP (XEXP (operand1, 0), 0),
1435 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1436 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1437 replace_equiv_address (operand1, scratch_reg)));
1440 else if (scratch_reg
1441 && fp_reg_operand (operand1, mode)
1442 && ((GET_CODE (operand0) == MEM
1443 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1445 XEXP (operand0, 0)))
1446 || ((GET_CODE (operand0) == SUBREG)
1447 && GET_CODE (XEXP (operand0, 0)) == MEM
1448 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1450 XEXP (XEXP (operand0, 0), 0)))))
1452 if (GET_CODE (operand0) == SUBREG)
1453 operand0 = XEXP (operand0, 0);
1455 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1456 it in WORD_MODE regardless of what mode it was originally given
1458 scratch_reg = force_mode (word_mode, scratch_reg);
1460 /* D might not fit in 14 bits either; for such cases load D into
1462 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
1464 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1465 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1468 XEXP (XEXP (operand0, 0),
1473 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1474 emit_insn (gen_rtx_SET (VOIDmode,
1475 replace_equiv_address (operand0, scratch_reg),
1479 /* Handle secondary reloads for loads of FP registers from constant
1480 expressions by forcing the constant into memory.
1482 Use scratch_reg to hold the address of the memory location.
1484 The proper fix is to change PREFERRED_RELOAD_CLASS to return
1485 NO_REGS when presented with a const_int and a register class
1486 containing only FP registers. Doing so unfortunately creates
1487 more problems than it solves. Fix this for 2.5. */
1488 else if (scratch_reg
1489 && CONSTANT_P (operand1)
1490 && fp_reg_operand (operand0, mode))
1492 rtx const_mem, xoperands[2];
1494 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1495 it in WORD_MODE regardless of what mode it was originally given
1497 scratch_reg = force_mode (word_mode, scratch_reg);
1499 /* Force the constant into memory and put the address of the
1500 memory location into scratch_reg. */
1501 const_mem = force_const_mem (mode, operand1);
1502 xoperands[0] = scratch_reg;
1503 xoperands[1] = XEXP (const_mem, 0);
1504 emit_move_sequence (xoperands, Pmode, 0);
1506 /* Now load the destination register. */
1507 emit_insn (gen_rtx_SET (mode, operand0,
1508 replace_equiv_address (const_mem, scratch_reg)));
1511 /* Handle secondary reloads for SAR. These occur when trying to load
1512 the SAR from memory, FP register, or with a constant. */
1513 else if (scratch_reg
1514 && GET_CODE (operand0) == REG
1515 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1516 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1517 && (GET_CODE (operand1) == MEM
1518 || GET_CODE (operand1) == CONST_INT
1519 || (GET_CODE (operand1) == REG
1520 && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1))))))
1522 /* D might not fit in 14 bits either; for such cases load D into
1524 if (GET_CODE (operand1) == MEM
1525 && !memory_address_p (Pmode, XEXP (operand1, 0)))
1527 /* We are reloading the address into the scratch register, so we
1528 want to make sure the scratch register is a full register. */
1529 scratch_reg = force_mode (word_mode, scratch_reg);
1531 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1532 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1535 XEXP (XEXP (operand1, 0),
1539 /* Now we are going to load the scratch register from memory,
1540 we want to load it in the same width as the original MEM,
1541 which must be the same as the width of the ultimate destination,
1543 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1545 emit_move_insn (scratch_reg,
1546 replace_equiv_address (operand1, scratch_reg));
1550 /* We want to load the scratch register using the same mode as
1551 the ultimate destination. */
1552 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1554 emit_move_insn (scratch_reg, operand1);
1557 /* And emit the insn to set the ultimate destination. We know that
1558 the scratch register has the same mode as the destination at this
1560 emit_move_insn (operand0, scratch_reg);
1563 /* Handle the most common case: storing into a register. */
1564 else if (register_operand (operand0, mode))
1566 if (register_operand (operand1, mode)
1567 || (GET_CODE (operand1) == CONST_INT
1568 && cint_ok_for_move (INTVAL (operand1)))
1569 || (operand1 == CONST0_RTX (mode))
1570 || (GET_CODE (operand1) == HIGH
1571 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1572 /* Only `general_operands' can come here, so MEM is ok. */
1573 || GET_CODE (operand1) == MEM)
1575 /* Various sets are created during RTL generation which don't
1576 have the REG_POINTER flag correctly set. After the CSE pass,
1577 instruction recognition can fail if we don't consistently
1578 set this flag when performing register copies. This should
1579 also improve the opportunities for creating insns that use
1580 unscaled indexing. */
1581 if (REG_P (operand0) && REG_P (operand1))
1583 if (REG_POINTER (operand1)
1584 && !REG_POINTER (operand0)
1585 && !HARD_REGISTER_P (operand0))
1586 copy_reg_pointer (operand0, operand1);
1587 else if (REG_POINTER (operand0)
1588 && !REG_POINTER (operand1)
1589 && !HARD_REGISTER_P (operand1))
1590 copy_reg_pointer (operand1, operand0);
1593 /* When MEMs are broken out, the REG_POINTER flag doesn't
1594 get set. In some cases, we can set the REG_POINTER flag
1595 from the declaration for the MEM. */
1596 if (REG_P (operand0)
1597 && GET_CODE (operand1) == MEM
1598 && !REG_POINTER (operand0))
1600 tree decl = MEM_EXPR (operand1);
1602 /* Set the register pointer flag and register alignment
1603 if the declaration for this memory reference is a
1604 pointer type. Fortran indirect argument references
1607 && !(flag_argument_noalias > 1
1608 && TREE_CODE (decl) == INDIRECT_REF
1609 && TREE_CODE (TREE_OPERAND (decl, 0)) == PARM_DECL))
1613 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1615 if (TREE_CODE (decl) == COMPONENT_REF)
1616 decl = TREE_OPERAND (decl, 1);
1618 type = TREE_TYPE (decl);
1619 if (TREE_CODE (type) == ARRAY_TYPE)
1620 type = get_inner_array_type (type);
1622 if (POINTER_TYPE_P (type))
1626 type = TREE_TYPE (type);
1627 /* Using TYPE_ALIGN_OK is rather conservative as
1628 only the ada frontend actually sets it. */
1629 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1631 mark_reg_pointer (operand0, align);
1636 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1640 else if (GET_CODE (operand0) == MEM)
1642 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1643 && !(reload_in_progress || reload_completed))
1645 rtx temp = gen_reg_rtx (DFmode);
1647 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1648 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1651 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1653 /* Run this case quickly. */
1654 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1657 if (! (reload_in_progress || reload_completed))
1659 operands[0] = validize_mem (operand0);
1660 operands[1] = operand1 = force_reg (mode, operand1);
1664 /* Simplify the source if we need to.
1665 Note we do have to handle function labels here, even though we do
1666 not consider them legitimate constants. Loop optimizations can
1667 call the emit_move_xxx with one as a source. */
1668 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1669 || function_label_operand (operand1, mode)
1670 || (GET_CODE (operand1) == HIGH
1671 && symbolic_operand (XEXP (operand1, 0), mode)))
1675 if (GET_CODE (operand1) == HIGH)
1678 operand1 = XEXP (operand1, 0);
1680 if (symbolic_operand (operand1, mode))
1682 /* Argh. The assembler and linker can't handle arithmetic
1685 So we force the plabel into memory, load operand0 from
1686 the memory location, then add in the constant part. */
1687 if ((GET_CODE (operand1) == CONST
1688 && GET_CODE (XEXP (operand1, 0)) == PLUS
1689 && function_label_operand (XEXP (XEXP (operand1, 0), 0), Pmode))
1690 || function_label_operand (operand1, mode))
1692 rtx temp, const_part;
1694 /* Figure out what (if any) scratch register to use. */
1695 if (reload_in_progress || reload_completed)
1697 scratch_reg = scratch_reg ? scratch_reg : operand0;
1698 /* SCRATCH_REG will hold an address and maybe the actual
1699 data. We want it in WORD_MODE regardless of what mode it
1700 was originally given to us. */
1701 scratch_reg = force_mode (word_mode, scratch_reg);
1704 scratch_reg = gen_reg_rtx (Pmode);
1706 if (GET_CODE (operand1) == CONST)
1708 /* Save away the constant part of the expression. */
1709 const_part = XEXP (XEXP (operand1, 0), 1);
1710 gcc_assert (GET_CODE (const_part) == CONST_INT);
1712 /* Force the function label into memory. */
1713 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1717 /* No constant part. */
1718 const_part = NULL_RTX;
1720 /* Force the function label into memory. */
1721 temp = force_const_mem (mode, operand1);
1725 /* Get the address of the memory location. PIC-ify it if
1727 temp = XEXP (temp, 0);
1729 temp = legitimize_pic_address (temp, mode, scratch_reg);
1731 /* Put the address of the memory location into our destination
1734 emit_move_sequence (operands, mode, scratch_reg);
1736 /* Now load from the memory location into our destination
1738 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1739 emit_move_sequence (operands, mode, scratch_reg);
1741 /* And add back in the constant part. */
1742 if (const_part != NULL_RTX)
1743 expand_inc (operand0, const_part);
1752 if (reload_in_progress || reload_completed)
1754 temp = scratch_reg ? scratch_reg : operand0;
1755 /* TEMP will hold an address and maybe the actual
1756 data. We want it in WORD_MODE regardless of what mode it
1757 was originally given to us. */
1758 temp = force_mode (word_mode, temp);
1761 temp = gen_reg_rtx (Pmode);
1763 /* (const (plus (symbol) (const_int))) must be forced to
1764 memory during/after reload if the const_int will not fit
1766 if (GET_CODE (operand1) == CONST
1767 && GET_CODE (XEXP (operand1, 0)) == PLUS
1768 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
1769 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
1770 && (reload_completed || reload_in_progress)
1773 rtx const_mem = force_const_mem (mode, operand1);
1774 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
1776 operands[1] = replace_equiv_address (const_mem, operands[1]);
1777 emit_move_sequence (operands, mode, temp);
1781 operands[1] = legitimize_pic_address (operand1, mode, temp);
1782 if (REG_P (operand0) && REG_P (operands[1]))
1783 copy_reg_pointer (operand0, operands[1]);
1784 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
1787 /* On the HPPA, references to data space are supposed to use dp,
1788 register 27, but showing it in the RTL inhibits various cse
1789 and loop optimizations. */
1794 if (reload_in_progress || reload_completed)
1796 temp = scratch_reg ? scratch_reg : operand0;
1797 /* TEMP will hold an address and maybe the actual
1798 data. We want it in WORD_MODE regardless of what mode it
1799 was originally given to us. */
1800 temp = force_mode (word_mode, temp);
1803 temp = gen_reg_rtx (mode);
1805 /* Loading a SYMBOL_REF into a register makes that register
1806 safe to be used as the base in an indexed address.
1808 Don't mark hard registers though. That loses. */
1809 if (GET_CODE (operand0) == REG
1810 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1811 mark_reg_pointer (operand0, BITS_PER_UNIT);
1812 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
1813 mark_reg_pointer (temp, BITS_PER_UNIT);
1816 set = gen_rtx_SET (mode, operand0, temp);
1818 set = gen_rtx_SET (VOIDmode,
1820 gen_rtx_LO_SUM (mode, temp, operand1));
1822 emit_insn (gen_rtx_SET (VOIDmode,
1824 gen_rtx_HIGH (mode, operand1)));
1830 else if (pa_tls_referenced_p (operand1))
1835 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
1837 addend = XEXP (XEXP (tmp, 0), 1);
1838 tmp = XEXP (XEXP (tmp, 0), 0);
1841 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
1842 tmp = legitimize_tls_address (tmp);
1845 tmp = gen_rtx_PLUS (mode, tmp, addend);
1846 tmp = force_operand (tmp, operands[0]);
1850 else if (GET_CODE (operand1) != CONST_INT
1851 || !cint_ok_for_move (INTVAL (operand1)))
1855 HOST_WIDE_INT value = 0;
1856 HOST_WIDE_INT insv = 0;
1859 if (GET_CODE (operand1) == CONST_INT)
1860 value = INTVAL (operand1);
1863 && GET_CODE (operand1) == CONST_INT
1864 && HOST_BITS_PER_WIDE_INT > 32
1865 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
1869 /* Extract the low order 32 bits of the value and sign extend.
1870 If the new value is the same as the original value, we can
1871 can use the original value as-is. If the new value is
1872 different, we use it and insert the most-significant 32-bits
1873 of the original value into the final result. */
1874 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
1875 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
1878 #if HOST_BITS_PER_WIDE_INT > 32
1879 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
1883 operand1 = GEN_INT (nval);
1887 if (reload_in_progress || reload_completed)
1888 temp = scratch_reg ? scratch_reg : operand0;
1890 temp = gen_reg_rtx (mode);
1892 /* We don't directly split DImode constants on 32-bit targets
1893 because PLUS uses an 11-bit immediate and the insn sequence
1894 generated is not as efficient as the one using HIGH/LO_SUM. */
1895 if (GET_CODE (operand1) == CONST_INT
1896 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
1897 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1900 /* Directly break constant into high and low parts. This
1901 provides better optimization opportunities because various
1902 passes recognize constants split with PLUS but not LO_SUM.
1903 We use a 14-bit signed low part except when the addition
1904 of 0x4000 to the high part might change the sign of the
1906 HOST_WIDE_INT low = value & 0x3fff;
1907 HOST_WIDE_INT high = value & ~ 0x3fff;
1911 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
1919 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
1920 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1924 emit_insn (gen_rtx_SET (VOIDmode, temp,
1925 gen_rtx_HIGH (mode, operand1)));
1926 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
1929 insn = emit_move_insn (operands[0], operands[1]);
1931 /* Now insert the most significant 32 bits of the value
1932 into the register. When we don't have a second register
1933 available, it could take up to nine instructions to load
1934 a 64-bit integer constant. Prior to reload, we force
1935 constants that would take more than three instructions
1936 to load to the constant pool. During and after reload,
1937 we have to handle all possible values. */
1940 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
1941 register and the value to be inserted is outside the
1942 range that can be loaded with three depdi instructions. */
1943 if (temp != operand0 && (insv >= 16384 || insv < -16384))
1945 operand1 = GEN_INT (insv);
1947 emit_insn (gen_rtx_SET (VOIDmode, temp,
1948 gen_rtx_HIGH (mode, operand1)));
1949 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
1950 emit_insn (gen_insv (operand0, GEN_INT (32),
1955 int len = 5, pos = 27;
1957 /* Insert the bits using the depdi instruction. */
1960 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
1961 HOST_WIDE_INT sign = v5 < 0;
1963 /* Left extend the insertion. */
1964 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
1965 while (pos > 0 && (insv & 1) == sign)
1967 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
1972 emit_insn (gen_insv (operand0, GEN_INT (len),
1973 GEN_INT (pos), GEN_INT (v5)));
1975 len = pos > 0 && pos < 5 ? pos : 5;
1982 = gen_rtx_EXPR_LIST (REG_EQUAL, op1, REG_NOTES (insn));
1987 /* Now have insn-emit do whatever it normally does. */
1991 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
1992 it will need a link/runtime reloc). */
1995 reloc_needed (tree exp)
1999 switch (TREE_CODE (exp))
2006 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2007 reloc |= reloc_needed (TREE_OPERAND (exp, 1));
2012 case NON_LVALUE_EXPR:
2013 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2019 unsigned HOST_WIDE_INT ix;
2021 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2023 reloc |= reloc_needed (value);
2036 /* Does operand (which is a symbolic_operand) live in text space?
2037 If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
2041 read_only_operand (rtx operand, enum machine_mode mode ATTRIBUTE_UNUSED)
2043 if (GET_CODE (operand) == CONST)
2044 operand = XEXP (XEXP (operand, 0), 0);
2047 if (GET_CODE (operand) == SYMBOL_REF)
2048 return SYMBOL_REF_FLAG (operand) && !CONSTANT_POOL_ADDRESS_P (operand);
2052 if (GET_CODE (operand) == SYMBOL_REF)
2053 return SYMBOL_REF_FLAG (operand) || CONSTANT_POOL_ADDRESS_P (operand);
2059 /* Return the best assembler insn template
2060 for moving operands[1] into operands[0] as a fullword. */
2062 singlemove_string (rtx *operands)
2064 HOST_WIDE_INT intval;
2066 if (GET_CODE (operands[0]) == MEM)
2067 return "stw %r1,%0";
2068 if (GET_CODE (operands[1]) == MEM)
2070 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2075 gcc_assert (GET_MODE (operands[1]) == SFmode);
2077 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2079 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2080 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2082 operands[1] = GEN_INT (i);
2083 /* Fall through to CONST_INT case. */
2085 if (GET_CODE (operands[1]) == CONST_INT)
2087 intval = INTVAL (operands[1]);
2089 if (VAL_14_BITS_P (intval))
2091 else if ((intval & 0x7ff) == 0)
2092 return "ldil L'%1,%0";
2093 else if (zdepi_cint_p (intval))
2094 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2096 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2098 return "copy %1,%0";
2102 /* Compute position (in OP[1]) and width (in OP[2])
2103 useful for copying IMM to a register using the zdepi
2104 instructions. Store the immediate value to insert in OP[0]. */
2106 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2110 /* Find the least significant set bit in IMM. */
2111 for (lsb = 0; lsb < 32; lsb++)
2118 /* Choose variants based on *sign* of the 5-bit field. */
2119 if ((imm & 0x10) == 0)
2120 len = (lsb <= 28) ? 4 : 32 - lsb;
2123 /* Find the width of the bitstring in IMM. */
2124 for (len = 5; len < 32; len++)
2126 if ((imm & (1 << len)) == 0)
2130 /* Sign extend IMM as a 5-bit value. */
2131 imm = (imm & 0xf) - 0x10;
2139 /* Compute position (in OP[1]) and width (in OP[2])
2140 useful for copying IMM to a register using the depdi,z
2141 instructions. Store the immediate value to insert in OP[0]. */
2143 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2145 HOST_WIDE_INT lsb, len;
2147 /* Find the least significant set bit in IMM. */
2148 for (lsb = 0; lsb < HOST_BITS_PER_WIDE_INT; lsb++)
2155 /* Choose variants based on *sign* of the 5-bit field. */
2156 if ((imm & 0x10) == 0)
2157 len = ((lsb <= HOST_BITS_PER_WIDE_INT - 4)
2158 ? 4 : HOST_BITS_PER_WIDE_INT - lsb);
2161 /* Find the width of the bitstring in IMM. */
2162 for (len = 5; len < HOST_BITS_PER_WIDE_INT; len++)
2164 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2168 /* Sign extend IMM as a 5-bit value. */
2169 imm = (imm & 0xf) - 0x10;
2177 /* Output assembler code to perform a doubleword move insn
2178 with operands OPERANDS. */
2181 output_move_double (rtx *operands)
2183 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2185 rtx addreg0 = 0, addreg1 = 0;
2187 /* First classify both operands. */
2189 if (REG_P (operands[0]))
2191 else if (offsettable_memref_p (operands[0]))
2193 else if (GET_CODE (operands[0]) == MEM)
2198 if (REG_P (operands[1]))
2200 else if (CONSTANT_P (operands[1]))
2202 else if (offsettable_memref_p (operands[1]))
2204 else if (GET_CODE (operands[1]) == MEM)
2209 /* Check for the cases that the operand constraints are not
2210 supposed to allow to happen. */
2211 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2213 /* Handle copies between general and floating registers. */
2215 if (optype0 == REGOP && optype1 == REGOP
2216 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2218 if (FP_REG_P (operands[0]))
2220 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2221 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2222 return "{fldds|fldd} -16(%%sp),%0";
2226 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2227 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2228 return "{ldws|ldw} -12(%%sp),%R0";
2232 /* Handle auto decrementing and incrementing loads and stores
2233 specifically, since the structure of the function doesn't work
2234 for them without major modification. Do it better when we learn
2235 this port about the general inc/dec addressing of PA.
2236 (This was written by tege. Chide him if it doesn't work.) */
2238 if (optype0 == MEMOP)
2240 /* We have to output the address syntax ourselves, since print_operand
2241 doesn't deal with the addresses we want to use. Fix this later. */
2243 rtx addr = XEXP (operands[0], 0);
2244 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2246 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2248 operands[0] = XEXP (addr, 0);
2249 gcc_assert (GET_CODE (operands[1]) == REG
2250 && GET_CODE (operands[0]) == REG);
2252 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2254 /* No overlap between high target register and address
2255 register. (We do this in a non-obvious way to
2256 save a register file writeback) */
2257 if (GET_CODE (addr) == POST_INC)
2258 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2259 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2261 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2263 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2265 operands[0] = XEXP (addr, 0);
2266 gcc_assert (GET_CODE (operands[1]) == REG
2267 && GET_CODE (operands[0]) == REG);
2269 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2270 /* No overlap between high target register and address
2271 register. (We do this in a non-obvious way to save a
2272 register file writeback) */
2273 if (GET_CODE (addr) == PRE_INC)
2274 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2275 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2278 if (optype1 == MEMOP)
2280 /* We have to output the address syntax ourselves, since print_operand
2281 doesn't deal with the addresses we want to use. Fix this later. */
2283 rtx addr = XEXP (operands[1], 0);
2284 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2286 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2288 operands[1] = XEXP (addr, 0);
2289 gcc_assert (GET_CODE (operands[0]) == REG
2290 && GET_CODE (operands[1]) == REG);
2292 if (!reg_overlap_mentioned_p (high_reg, addr))
2294 /* No overlap between high target register and address
2295 register. (We do this in a non-obvious way to
2296 save a register file writeback) */
2297 if (GET_CODE (addr) == POST_INC)
2298 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2299 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2303 /* This is an undefined situation. We should load into the
2304 address register *and* update that register. Probably
2305 we don't need to handle this at all. */
2306 if (GET_CODE (addr) == POST_INC)
2307 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2308 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2311 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2313 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2315 operands[1] = XEXP (addr, 0);
2316 gcc_assert (GET_CODE (operands[0]) == REG
2317 && GET_CODE (operands[1]) == REG);
2319 if (!reg_overlap_mentioned_p (high_reg, addr))
2321 /* No overlap between high target register and address
2322 register. (We do this in a non-obvious way to
2323 save a register file writeback) */
2324 if (GET_CODE (addr) == PRE_INC)
2325 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2326 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2330 /* This is an undefined situation. We should load into the
2331 address register *and* update that register. Probably
2332 we don't need to handle this at all. */
2333 if (GET_CODE (addr) == PRE_INC)
2334 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2335 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2338 else if (GET_CODE (addr) == PLUS
2339 && GET_CODE (XEXP (addr, 0)) == MULT)
2341 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2343 if (!reg_overlap_mentioned_p (high_reg, addr))
2347 xoperands[0] = high_reg;
2348 xoperands[1] = XEXP (addr, 1);
2349 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2350 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2351 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2353 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2359 xoperands[0] = high_reg;
2360 xoperands[1] = XEXP (addr, 1);
2361 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2362 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2363 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2365 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2370 /* If an operand is an unoffsettable memory ref, find a register
2371 we can increment temporarily to make it refer to the second word. */
2373 if (optype0 == MEMOP)
2374 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2376 if (optype1 == MEMOP)
2377 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2379 /* Ok, we can do one word at a time.
2380 Normally we do the low-numbered word first.
2382 In either case, set up in LATEHALF the operands to use
2383 for the high-numbered word and in some cases alter the
2384 operands in OPERANDS to be suitable for the low-numbered word. */
2386 if (optype0 == REGOP)
2387 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2388 else if (optype0 == OFFSOP)
2389 latehalf[0] = adjust_address (operands[0], SImode, 4);
2391 latehalf[0] = operands[0];
2393 if (optype1 == REGOP)
2394 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2395 else if (optype1 == OFFSOP)
2396 latehalf[1] = adjust_address (operands[1], SImode, 4);
2397 else if (optype1 == CNSTOP)
2398 split_double (operands[1], &operands[1], &latehalf[1]);
2400 latehalf[1] = operands[1];
2402 /* If the first move would clobber the source of the second one,
2403 do them in the other order.
2405 This can happen in two cases:
2407 mem -> register where the first half of the destination register
2408 is the same register used in the memory's address. Reload
2409 can create such insns.
2411 mem in this case will be either register indirect or register
2412 indirect plus a valid offset.
2414 register -> register move where REGNO(dst) == REGNO(src + 1)
2415 someone (Tim/Tege?) claimed this can happen for parameter loads.
2417 Handle mem -> register case first. */
2418 if (optype0 == REGOP
2419 && (optype1 == MEMOP || optype1 == OFFSOP)
2420 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2423 /* Do the late half first. */
2425 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2426 output_asm_insn (singlemove_string (latehalf), latehalf);
2430 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2431 return singlemove_string (operands);
2434 /* Now handle register -> register case. */
2435 if (optype0 == REGOP && optype1 == REGOP
2436 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2438 output_asm_insn (singlemove_string (latehalf), latehalf);
2439 return singlemove_string (operands);
2442 /* Normal case: do the two words, low-numbered first. */
2444 output_asm_insn (singlemove_string (operands), operands);
2446 /* Make any unoffsettable addresses point at high-numbered word. */
2448 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2450 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2453 output_asm_insn (singlemove_string (latehalf), latehalf);
2455 /* Undo the adds we just did. */
2457 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2459 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2465 output_fp_move_double (rtx *operands)
2467 if (FP_REG_P (operands[0]))
2469 if (FP_REG_P (operands[1])
2470 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2471 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2473 output_asm_insn ("fldd%F1 %1,%0", operands);
2475 else if (FP_REG_P (operands[1]))
2477 output_asm_insn ("fstd%F0 %1,%0", operands);
2483 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2485 /* This is a pain. You have to be prepared to deal with an
2486 arbitrary address here including pre/post increment/decrement.
2488 so avoid this in the MD. */
2489 gcc_assert (GET_CODE (operands[0]) == REG);
2491 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2492 xoperands[0] = operands[0];
2493 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2498 /* Return a REG that occurs in ADDR with coefficient 1.
2499 ADDR can be effectively incremented by incrementing REG. */
2502 find_addr_reg (rtx addr)
2504 while (GET_CODE (addr) == PLUS)
2506 if (GET_CODE (XEXP (addr, 0)) == REG)
2507 addr = XEXP (addr, 0);
2508 else if (GET_CODE (XEXP (addr, 1)) == REG)
2509 addr = XEXP (addr, 1);
2510 else if (CONSTANT_P (XEXP (addr, 0)))
2511 addr = XEXP (addr, 1);
2512 else if (CONSTANT_P (XEXP (addr, 1)))
2513 addr = XEXP (addr, 0);
2517 gcc_assert (GET_CODE (addr) == REG);
2521 /* Emit code to perform a block move.
2523 OPERANDS[0] is the destination pointer as a REG, clobbered.
2524 OPERANDS[1] is the source pointer as a REG, clobbered.
2525 OPERANDS[2] is a register for temporary storage.
2526 OPERANDS[3] is a register for temporary storage.
2527 OPERANDS[4] is the size as a CONST_INT
2528 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2529 OPERANDS[6] is another temporary register. */
2532 output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2534 int align = INTVAL (operands[5]);
2535 unsigned long n_bytes = INTVAL (operands[4]);
2537 /* We can't move more than a word at a time because the PA
2538 has no longer integer move insns. (Could use fp mem ops?) */
2539 if (align > (TARGET_64BIT ? 8 : 4))
2540 align = (TARGET_64BIT ? 8 : 4);
2542 /* Note that we know each loop below will execute at least twice
2543 (else we would have open-coded the copy). */
2547 /* Pre-adjust the loop counter. */
2548 operands[4] = GEN_INT (n_bytes - 16);
2549 output_asm_insn ("ldi %4,%2", operands);
2552 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2553 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2554 output_asm_insn ("std,ma %3,8(%0)", operands);
2555 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2556 output_asm_insn ("std,ma %6,8(%0)", operands);
2558 /* Handle the residual. There could be up to 7 bytes of
2559 residual to copy! */
2560 if (n_bytes % 16 != 0)
2562 operands[4] = GEN_INT (n_bytes % 8);
2563 if (n_bytes % 16 >= 8)
2564 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2565 if (n_bytes % 8 != 0)
2566 output_asm_insn ("ldd 0(%1),%6", operands);
2567 if (n_bytes % 16 >= 8)
2568 output_asm_insn ("std,ma %3,8(%0)", operands);
2569 if (n_bytes % 8 != 0)
2570 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2575 /* Pre-adjust the loop counter. */
2576 operands[4] = GEN_INT (n_bytes - 8);
2577 output_asm_insn ("ldi %4,%2", operands);
2580 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2581 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2582 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2583 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2584 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2586 /* Handle the residual. There could be up to 7 bytes of
2587 residual to copy! */
2588 if (n_bytes % 8 != 0)
2590 operands[4] = GEN_INT (n_bytes % 4);
2591 if (n_bytes % 8 >= 4)
2592 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2593 if (n_bytes % 4 != 0)
2594 output_asm_insn ("ldw 0(%1),%6", operands);
2595 if (n_bytes % 8 >= 4)
2596 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2597 if (n_bytes % 4 != 0)
2598 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2603 /* Pre-adjust the loop counter. */
2604 operands[4] = GEN_INT (n_bytes - 4);
2605 output_asm_insn ("ldi %4,%2", operands);
2608 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2609 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2610 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2611 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2612 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2614 /* Handle the residual. */
2615 if (n_bytes % 4 != 0)
2617 if (n_bytes % 4 >= 2)
2618 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2619 if (n_bytes % 2 != 0)
2620 output_asm_insn ("ldb 0(%1),%6", operands);
2621 if (n_bytes % 4 >= 2)
2622 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2623 if (n_bytes % 2 != 0)
2624 output_asm_insn ("stb %6,0(%0)", operands);
2629 /* Pre-adjust the loop counter. */
2630 operands[4] = GEN_INT (n_bytes - 2);
2631 output_asm_insn ("ldi %4,%2", operands);
2634 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2635 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2636 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2637 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2638 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2640 /* Handle the residual. */
2641 if (n_bytes % 2 != 0)
2643 output_asm_insn ("ldb 0(%1),%3", operands);
2644 output_asm_insn ("stb %3,0(%0)", operands);
2653 /* Count the number of insns necessary to handle this block move.
2655 Basic structure is the same as emit_block_move, except that we
2656 count insns rather than emit them. */
2659 compute_movmem_length (rtx insn)
2661 rtx pat = PATTERN (insn);
2662 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2663 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2664 unsigned int n_insns = 0;
2666 /* We can't move more than four bytes at a time because the PA
2667 has no longer integer move insns. (Could use fp mem ops?) */
2668 if (align > (TARGET_64BIT ? 8 : 4))
2669 align = (TARGET_64BIT ? 8 : 4);
2671 /* The basic copying loop. */
2675 if (n_bytes % (2 * align) != 0)
2677 if ((n_bytes % (2 * align)) >= align)
2680 if ((n_bytes % align) != 0)
2684 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2688 /* Emit code to perform a block clear.
2690 OPERANDS[0] is the destination pointer as a REG, clobbered.
2691 OPERANDS[1] is a register for temporary storage.
2692 OPERANDS[2] is the size as a CONST_INT
2693 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2696 output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2698 int align = INTVAL (operands[3]);
2699 unsigned long n_bytes = INTVAL (operands[2]);
2701 /* We can't clear more than a word at a time because the PA
2702 has no longer integer move insns. */
2703 if (align > (TARGET_64BIT ? 8 : 4))
2704 align = (TARGET_64BIT ? 8 : 4);
2706 /* Note that we know each loop below will execute at least twice
2707 (else we would have open-coded the copy). */
2711 /* Pre-adjust the loop counter. */
2712 operands[2] = GEN_INT (n_bytes - 16);
2713 output_asm_insn ("ldi %2,%1", operands);
2716 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2717 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2718 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2720 /* Handle the residual. There could be up to 7 bytes of
2721 residual to copy! */
2722 if (n_bytes % 16 != 0)
2724 operands[2] = GEN_INT (n_bytes % 8);
2725 if (n_bytes % 16 >= 8)
2726 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2727 if (n_bytes % 8 != 0)
2728 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2733 /* Pre-adjust the loop counter. */
2734 operands[2] = GEN_INT (n_bytes - 8);
2735 output_asm_insn ("ldi %2,%1", operands);
2738 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2739 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2740 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2742 /* Handle the residual. There could be up to 7 bytes of
2743 residual to copy! */
2744 if (n_bytes % 8 != 0)
2746 operands[2] = GEN_INT (n_bytes % 4);
2747 if (n_bytes % 8 >= 4)
2748 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2749 if (n_bytes % 4 != 0)
2750 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2755 /* Pre-adjust the loop counter. */
2756 operands[2] = GEN_INT (n_bytes - 4);
2757 output_asm_insn ("ldi %2,%1", operands);
2760 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2761 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2762 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2764 /* Handle the residual. */
2765 if (n_bytes % 4 != 0)
2767 if (n_bytes % 4 >= 2)
2768 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2769 if (n_bytes % 2 != 0)
2770 output_asm_insn ("stb %%r0,0(%0)", operands);
2775 /* Pre-adjust the loop counter. */
2776 operands[2] = GEN_INT (n_bytes - 2);
2777 output_asm_insn ("ldi %2,%1", operands);
2780 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2781 output_asm_insn ("addib,>= -2,%1,.-4", operands);
2782 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2784 /* Handle the residual. */
2785 if (n_bytes % 2 != 0)
2786 output_asm_insn ("stb %%r0,0(%0)", operands);
2795 /* Count the number of insns necessary to handle this block move.
2797 Basic structure is the same as emit_block_move, except that we
2798 count insns rather than emit them. */
2801 compute_clrmem_length (rtx insn)
2803 rtx pat = PATTERN (insn);
2804 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
2805 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
2806 unsigned int n_insns = 0;
2808 /* We can't clear more than a word at a time because the PA
2809 has no longer integer move insns. */
2810 if (align > (TARGET_64BIT ? 8 : 4))
2811 align = (TARGET_64BIT ? 8 : 4);
2813 /* The basic loop. */
2817 if (n_bytes % (2 * align) != 0)
2819 if ((n_bytes % (2 * align)) >= align)
2822 if ((n_bytes % align) != 0)
2826 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2832 output_and (rtx *operands)
2834 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2836 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2837 int ls0, ls1, ms0, p, len;
2839 for (ls0 = 0; ls0 < 32; ls0++)
2840 if ((mask & (1 << ls0)) == 0)
2843 for (ls1 = ls0; ls1 < 32; ls1++)
2844 if ((mask & (1 << ls1)) != 0)
2847 for (ms0 = ls1; ms0 < 32; ms0++)
2848 if ((mask & (1 << ms0)) == 0)
2851 gcc_assert (ms0 == 32);
2859 operands[2] = GEN_INT (len);
2860 return "{extru|extrw,u} %1,31,%2,%0";
2864 /* We could use this `depi' for the case above as well, but `depi'
2865 requires one more register file access than an `extru'. */
2870 operands[2] = GEN_INT (p);
2871 operands[3] = GEN_INT (len);
2872 return "{depi|depwi} 0,%2,%3,%0";
2876 return "and %1,%2,%0";
2879 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2880 storing the result in operands[0]. */
2882 output_64bit_and (rtx *operands)
2884 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2886 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2887 int ls0, ls1, ms0, p, len;
2889 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
2890 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
2893 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
2894 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
2897 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
2898 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
2901 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
2903 if (ls1 == HOST_BITS_PER_WIDE_INT)
2909 operands[2] = GEN_INT (len);
2910 return "extrd,u %1,63,%2,%0";
2914 /* We could use this `depi' for the case above as well, but `depi'
2915 requires one more register file access than an `extru'. */
2920 operands[2] = GEN_INT (p);
2921 operands[3] = GEN_INT (len);
2922 return "depdi 0,%2,%3,%0";
2926 return "and %1,%2,%0";
2930 output_ior (rtx *operands)
2932 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2933 int bs0, bs1, p, len;
2935 if (INTVAL (operands[2]) == 0)
2936 return "copy %1,%0";
2938 for (bs0 = 0; bs0 < 32; bs0++)
2939 if ((mask & (1 << bs0)) != 0)
2942 for (bs1 = bs0; bs1 < 32; bs1++)
2943 if ((mask & (1 << bs1)) == 0)
2946 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
2951 operands[2] = GEN_INT (p);
2952 operands[3] = GEN_INT (len);
2953 return "{depi|depwi} -1,%2,%3,%0";
2956 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2957 storing the result in operands[0]. */
2959 output_64bit_ior (rtx *operands)
2961 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2962 int bs0, bs1, p, len;
2964 if (INTVAL (operands[2]) == 0)
2965 return "copy %1,%0";
2967 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
2968 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
2971 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
2972 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
2975 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
2976 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
2981 operands[2] = GEN_INT (p);
2982 operands[3] = GEN_INT (len);
2983 return "depdi -1,%2,%3,%0";
2986 /* Target hook for assembling integer objects. This code handles
2987 aligned SI and DI integers specially since function references
2988 must be preceded by P%. */
2991 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
2993 if (size == UNITS_PER_WORD
2995 && function_label_operand (x, VOIDmode))
2997 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
2998 output_addr_const (asm_out_file, x);
2999 fputc ('\n', asm_out_file);
3002 return default_assemble_integer (x, size, aligned_p);
3005 /* Output an ascii string. */
3007 output_ascii (FILE *file, const char *p, int size)
3011 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3013 /* The HP assembler can only take strings of 256 characters at one
3014 time. This is a limitation on input line length, *not* the
3015 length of the string. Sigh. Even worse, it seems that the
3016 restriction is in number of input characters (see \xnn &
3017 \whatever). So we have to do this very carefully. */
3019 fputs ("\t.STRING \"", file);
3022 for (i = 0; i < size; i += 4)
3026 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3028 register unsigned int c = (unsigned char) p[i + io];
3030 if (c == '\"' || c == '\\')
3031 partial_output[co++] = '\\';
3032 if (c >= ' ' && c < 0177)
3033 partial_output[co++] = c;
3037 partial_output[co++] = '\\';
3038 partial_output[co++] = 'x';
3039 hexd = c / 16 - 0 + '0';
3041 hexd -= '9' - 'a' + 1;
3042 partial_output[co++] = hexd;
3043 hexd = c % 16 - 0 + '0';
3045 hexd -= '9' - 'a' + 1;
3046 partial_output[co++] = hexd;
3049 if (chars_output + co > 243)
3051 fputs ("\"\n\t.STRING \"", file);
3054 fwrite (partial_output, 1, (size_t) co, file);
3058 fputs ("\"\n", file);
3061 /* Try to rewrite floating point comparisons & branches to avoid
3062 useless add,tr insns.
3064 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3065 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3066 first attempt to remove useless add,tr insns. It is zero
3067 for the second pass as reorg sometimes leaves bogus REG_DEAD
3070 When CHECK_NOTES is zero we can only eliminate add,tr insns
3071 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3074 remove_useless_addtr_insns (int check_notes)
3077 static int pass = 0;
3079 /* This is fairly cheap, so always run it when optimizing. */
3083 int fbranch_count = 0;
3085 /* Walk all the insns in this function looking for fcmp & fbranch
3086 instructions. Keep track of how many of each we find. */
3087 for (insn = get_insns (); insn; insn = next_insn (insn))
3091 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3092 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
3095 tmp = PATTERN (insn);
3097 /* It must be a set. */
3098 if (GET_CODE (tmp) != SET)
3101 /* If the destination is CCFP, then we've found an fcmp insn. */
3102 tmp = SET_DEST (tmp);
3103 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3109 tmp = PATTERN (insn);
3110 /* If this is an fbranch instruction, bump the fbranch counter. */
3111 if (GET_CODE (tmp) == SET
3112 && SET_DEST (tmp) == pc_rtx
3113 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3114 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3115 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3116 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3124 /* Find all floating point compare + branch insns. If possible,
3125 reverse the comparison & the branch to avoid add,tr insns. */
3126 for (insn = get_insns (); insn; insn = next_insn (insn))
3130 /* Ignore anything that isn't an INSN. */
3131 if (GET_CODE (insn) != INSN)
3134 tmp = PATTERN (insn);
3136 /* It must be a set. */
3137 if (GET_CODE (tmp) != SET)
3140 /* The destination must be CCFP, which is register zero. */
3141 tmp = SET_DEST (tmp);
3142 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3145 /* INSN should be a set of CCFP.
3147 See if the result of this insn is used in a reversed FP
3148 conditional branch. If so, reverse our condition and
3149 the branch. Doing so avoids useless add,tr insns. */
3150 next = next_insn (insn);
3153 /* Jumps, calls and labels stop our search. */
3154 if (GET_CODE (next) == JUMP_INSN
3155 || GET_CODE (next) == CALL_INSN
3156 || GET_CODE (next) == CODE_LABEL)
3159 /* As does another fcmp insn. */
3160 if (GET_CODE (next) == INSN
3161 && GET_CODE (PATTERN (next)) == SET
3162 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3163 && REGNO (SET_DEST (PATTERN (next))) == 0)
3166 next = next_insn (next);
3169 /* Is NEXT_INSN a branch? */
3171 && GET_CODE (next) == JUMP_INSN)
3173 rtx pattern = PATTERN (next);
3175 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3176 and CCFP dies, then reverse our conditional and the branch
3177 to avoid the add,tr. */
3178 if (GET_CODE (pattern) == SET
3179 && SET_DEST (pattern) == pc_rtx
3180 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3181 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3182 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3183 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3184 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3185 && (fcmp_count == fbranch_count
3187 && find_regno_note (next, REG_DEAD, 0))))
3189 /* Reverse the branch. */
3190 tmp = XEXP (SET_SRC (pattern), 1);
3191 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3192 XEXP (SET_SRC (pattern), 2) = tmp;
3193 INSN_CODE (next) = -1;
3195 /* Reverse our condition. */
3196 tmp = PATTERN (insn);
3197 PUT_CODE (XEXP (tmp, 1),
3198 (reverse_condition_maybe_unordered
3199 (GET_CODE (XEXP (tmp, 1)))));
3209 /* You may have trouble believing this, but this is the 32 bit HP-PA
3214 Variable arguments (optional; any number may be allocated)
3216 SP-(4*(N+9)) arg word N
3221 Fixed arguments (must be allocated; may remain unused)
3230 SP-32 External Data Pointer (DP)
3232 SP-24 External/stub RP (RP')
3236 SP-8 Calling Stub RP (RP'')
3241 SP-0 Stack Pointer (points to next available address)
3245 /* This function saves registers as follows. Registers marked with ' are
3246 this function's registers (as opposed to the previous function's).
3247 If a frame_pointer isn't needed, r4 is saved as a general register;
3248 the space for the frame pointer is still allocated, though, to keep
3254 SP (FP') Previous FP
3255 SP + 4 Alignment filler (sigh)
3256 SP + 8 Space for locals reserved here.
3260 SP + n All call saved register used.
3264 SP + o All call saved fp registers used.
3268 SP + p (SP') points to next available address.
3272 /* Global variables set by output_function_prologue(). */
3273 /* Size of frame. Need to know this to emit return insns from
3275 static HOST_WIDE_INT actual_fsize, local_fsize;
3276 static int save_fregs;
3278 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3279 Handle case where DISP > 8k by using the add_high_const patterns.
3281 Note in DISP > 8k case, we will leave the high part of the address
3282 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3285 store_reg (int reg, HOST_WIDE_INT disp, int base)
3287 rtx insn, dest, src, basereg;
3289 src = gen_rtx_REG (word_mode, reg);
3290 basereg = gen_rtx_REG (Pmode, base);
3291 if (VAL_14_BITS_P (disp))
3293 dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3294 insn = emit_move_insn (dest, src);
3296 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3298 rtx delta = GEN_INT (disp);
3299 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3301 emit_move_insn (tmpreg, delta);
3302 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3306 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3307 gen_rtx_SET (VOIDmode, tmpreg,
3308 gen_rtx_PLUS (Pmode, basereg, delta)),
3310 RTX_FRAME_RELATED_P (insn) = 1;
3312 dest = gen_rtx_MEM (word_mode, tmpreg);
3313 insn = emit_move_insn (dest, src);
3317 rtx delta = GEN_INT (disp);
3318 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3319 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3321 emit_move_insn (tmpreg, high);
3322 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3323 insn = emit_move_insn (dest, src);
3327 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3328 gen_rtx_SET (VOIDmode,
3329 gen_rtx_MEM (word_mode,
3330 gen_rtx_PLUS (word_mode, basereg,
3338 RTX_FRAME_RELATED_P (insn) = 1;
3341 /* Emit RTL to store REG at the memory location specified by BASE and then
3342 add MOD to BASE. MOD must be <= 8k. */
3345 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3347 rtx insn, basereg, srcreg, delta;
3349 gcc_assert (VAL_14_BITS_P (mod));
3351 basereg = gen_rtx_REG (Pmode, base);
3352 srcreg = gen_rtx_REG (word_mode, reg);
3353 delta = GEN_INT (mod);
3355 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3358 RTX_FRAME_RELATED_P (insn) = 1;
3360 /* RTX_FRAME_RELATED_P must be set on each frame related set
3361 in a parallel with more than one element. */
3362 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3363 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3367 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3368 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3369 whether to add a frame note or not.
3371 In the DISP > 8k case, we leave the high part of the address in %r1.
3372 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3375 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3379 if (VAL_14_BITS_P (disp))
3381 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3382 plus_constant (gen_rtx_REG (Pmode, base), disp));
3384 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3386 rtx basereg = gen_rtx_REG (Pmode, base);
3387 rtx delta = GEN_INT (disp);
3388 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3390 emit_move_insn (tmpreg, delta);
3391 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3392 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3395 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3396 gen_rtx_SET (VOIDmode, tmpreg,
3397 gen_rtx_PLUS (Pmode, basereg, delta)),
3402 rtx basereg = gen_rtx_REG (Pmode, base);
3403 rtx delta = GEN_INT (disp);
3404 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3406 emit_move_insn (tmpreg,
3407 gen_rtx_PLUS (Pmode, basereg,
3408 gen_rtx_HIGH (Pmode, delta)));
3409 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3410 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3413 if (DO_FRAME_NOTES && note)
3414 RTX_FRAME_RELATED_P (insn) = 1;
3418 compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3423 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3424 be consistent with the rounding and size calculation done here.
3425 Change them at the same time. */
3427 /* We do our own stack alignment. First, round the size of the
3428 stack locals up to a word boundary. */
3429 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3431 /* Space for previous frame pointer + filler. If any frame is
3432 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3433 waste some space here for the sake of HP compatibility. The
3434 first slot is only used when the frame pointer is needed. */
3435 if (size || frame_pointer_needed)
3436 size += STARTING_FRAME_OFFSET;
3438 /* If the current function calls __builtin_eh_return, then we need
3439 to allocate stack space for registers that will hold data for
3440 the exception handler. */
3441 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3445 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3447 size += i * UNITS_PER_WORD;
3450 /* Account for space used by the callee general register saves. */
3451 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3452 if (regs_ever_live[i])
3453 size += UNITS_PER_WORD;
3455 /* Account for space used by the callee floating point register saves. */
3456 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3457 if (regs_ever_live[i]
3458 || (!TARGET_64BIT && regs_ever_live[i + 1]))
3462 /* We always save both halves of the FP register, so always
3463 increment the frame size by 8 bytes. */
3467 /* If any of the floating registers are saved, account for the
3468 alignment needed for the floating point register save block. */
3471 size = (size + 7) & ~7;
3476 /* The various ABIs include space for the outgoing parameters in the
3477 size of the current function's stack frame. We don't need to align
3478 for the outgoing arguments as their alignment is set by the final
3479 rounding for the frame as a whole. */
3480 size += current_function_outgoing_args_size;
3482 /* Allocate space for the fixed frame marker. This space must be
3483 allocated for any function that makes calls or allocates
3485 if (!current_function_is_leaf || size)
3486 size += TARGET_64BIT ? 48 : 32;
3488 /* Finally, round to the preferred stack boundary. */
3489 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3490 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3493 /* Generate the assembly code for function entry. FILE is a stdio
3494 stream to output the code to. SIZE is an int: how many units of
3495 temporary storage to allocate.
3497 Refer to the array `regs_ever_live' to determine which registers to
3498 save; `regs_ever_live[I]' is nonzero if register number I is ever
3499 used in the function. This function is responsible for knowing
3500 which registers should not be saved even if used. */
3502 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3503 of memory. If any fpu reg is used in the function, we allocate
3504 such a block here, at the bottom of the frame, just in case it's needed.
3506 If this function is a leaf procedure, then we may choose not
3507 to do a "save" insn. The decision about whether or not
3508 to do this is made in regclass.c. */
3511 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3513 /* The function's label and associated .PROC must never be
3514 separated and must be output *after* any profiling declarations
3515 to avoid changing spaces/subspaces within a procedure. */
3516 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3517 fputs ("\t.PROC\n", file);
3519 /* hppa_expand_prologue does the dirty work now. We just need
3520 to output the assembler directives which denote the start
3522 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3523 if (regs_ever_live[2])
3524 fputs (",CALLS,SAVE_RP", file);
3526 fputs (",NO_CALLS", file);
3528 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3529 at the beginning of the frame and that it is used as the frame
3530 pointer for the frame. We do this because our current frame
3531 layout doesn't conform to that specified in the HP runtime
3532 documentation and we need a way to indicate to programs such as
3533 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3534 isn't used by HP compilers but is supported by the assembler.
3535 However, SAVE_SP is supposed to indicate that the previous stack
3536 pointer has been saved in the frame marker. */
3537 if (frame_pointer_needed)
3538 fputs (",SAVE_SP", file);
3540 /* Pass on information about the number of callee register saves
3541 performed in the prologue.
3543 The compiler is supposed to pass the highest register number
3544 saved, the assembler then has to adjust that number before
3545 entering it into the unwind descriptor (to account for any
3546 caller saved registers with lower register numbers than the
3547 first callee saved register). */
3549 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3552 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3554 fputs ("\n\t.ENTRY\n", file);
3556 remove_useless_addtr_insns (0);
3560 hppa_expand_prologue (void)
3562 int merge_sp_adjust_with_store = 0;
3563 HOST_WIDE_INT size = get_frame_size ();
3564 HOST_WIDE_INT offset;
3572 /* Compute total size for frame pointer, filler, locals and rounding to
3573 the next word boundary. Similar code appears in compute_frame_size
3574 and must be changed in tandem with this code. */
3575 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3576 if (local_fsize || frame_pointer_needed)
3577 local_fsize += STARTING_FRAME_OFFSET;
3579 actual_fsize = compute_frame_size (size, &save_fregs);
3581 /* Compute a few things we will use often. */
3582 tmpreg = gen_rtx_REG (word_mode, 1);
3584 /* Save RP first. The calling conventions manual states RP will
3585 always be stored into the caller's frame at sp - 20 or sp - 16
3586 depending on which ABI is in use. */
3587 if (regs_ever_live[2] || current_function_calls_eh_return)
3588 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3590 /* Allocate the local frame and set up the frame pointer if needed. */
3591 if (actual_fsize != 0)
3593 if (frame_pointer_needed)
3595 /* Copy the old frame pointer temporarily into %r1. Set up the
3596 new stack pointer, then store away the saved old frame pointer
3597 into the stack at sp and at the same time update the stack
3598 pointer by actual_fsize bytes. Two versions, first
3599 handles small (<8k) frames. The second handles large (>=8k)
3601 insn = emit_move_insn (tmpreg, frame_pointer_rtx);
3603 RTX_FRAME_RELATED_P (insn) = 1;
3605 insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
3607 RTX_FRAME_RELATED_P (insn) = 1;
3609 if (VAL_14_BITS_P (actual_fsize))
3610 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3613 /* It is incorrect to store the saved frame pointer at *sp,
3614 then increment sp (writes beyond the current stack boundary).
3616 So instead use stwm to store at *sp and post-increment the
3617 stack pointer as an atomic operation. Then increment sp to
3618 finish allocating the new frame. */
3619 HOST_WIDE_INT adjust1 = 8192 - 64;
3620 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3622 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3623 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3627 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3628 we need to store the previous stack pointer (frame pointer)
3629 into the frame marker on targets that use the HP unwind
3630 library. This allows the HP unwind library to be used to
3631 unwind GCC frames. However, we are not fully compatible
3632 with the HP library because our frame layout differs from
3633 that specified in the HP runtime specification.
3635 We don't want a frame note on this instruction as the frame
3636 marker moves during dynamic stack allocation.
3638 This instruction also serves as a blockage to prevent
3639 register spills from being scheduled before the stack
3640 pointer is raised. This is necessary as we store
3641 registers using the frame pointer as a base register,
3642 and the frame pointer is set before sp is raised. */
3643 if (TARGET_HPUX_UNWIND_LIBRARY)
3645 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3646 GEN_INT (TARGET_64BIT ? -8 : -4));
3648 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3652 emit_insn (gen_blockage ());
3654 /* no frame pointer needed. */
3657 /* In some cases we can perform the first callee register save
3658 and allocating the stack frame at the same time. If so, just
3659 make a note of it and defer allocating the frame until saving
3660 the callee registers. */
3661 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3662 merge_sp_adjust_with_store = 1;
3663 /* Can not optimize. Adjust the stack frame by actual_fsize
3666 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3671 /* Normal register save.
3673 Do not save the frame pointer in the frame_pointer_needed case. It
3674 was done earlier. */
3675 if (frame_pointer_needed)
3677 offset = local_fsize;
3679 /* Saving the EH return data registers in the frame is the simplest
3680 way to get the frame unwind information emitted. We put them
3681 just before the general registers. */
3682 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3684 unsigned int i, regno;
3688 regno = EH_RETURN_DATA_REGNO (i);
3689 if (regno == INVALID_REGNUM)
3692 store_reg (regno, offset, FRAME_POINTER_REGNUM);
3693 offset += UNITS_PER_WORD;
3697 for (i = 18; i >= 4; i--)
3698 if (regs_ever_live[i] && ! call_used_regs[i])
3700 store_reg (i, offset, FRAME_POINTER_REGNUM);
3701 offset += UNITS_PER_WORD;
3704 /* Account for %r3 which is saved in a special place. */
3707 /* No frame pointer needed. */
3710 offset = local_fsize - actual_fsize;
3712 /* Saving the EH return data registers in the frame is the simplest
3713 way to get the frame unwind information emitted. */
3714 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3716 unsigned int i, regno;
3720 regno = EH_RETURN_DATA_REGNO (i);
3721 if (regno == INVALID_REGNUM)
3724 /* If merge_sp_adjust_with_store is nonzero, then we can
3725 optimize the first save. */
3726 if (merge_sp_adjust_with_store)
3728 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3729 merge_sp_adjust_with_store = 0;
3732 store_reg (regno, offset, STACK_POINTER_REGNUM);
3733 offset += UNITS_PER_WORD;
3737 for (i = 18; i >= 3; i--)
3738 if (regs_ever_live[i] && ! call_used_regs[i])
3740 /* If merge_sp_adjust_with_store is nonzero, then we can
3741 optimize the first GR save. */
3742 if (merge_sp_adjust_with_store)
3744 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3745 merge_sp_adjust_with_store = 0;
3748 store_reg (i, offset, STACK_POINTER_REGNUM);
3749 offset += UNITS_PER_WORD;
3753 /* If we wanted to merge the SP adjustment with a GR save, but we never
3754 did any GR saves, then just emit the adjustment here. */
3755 if (merge_sp_adjust_with_store)
3756 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3760 /* The hppa calling conventions say that %r19, the pic offset
3761 register, is saved at sp - 32 (in this function's frame)
3762 when generating PIC code. FIXME: What is the correct thing
3763 to do for functions which make no calls and allocate no
3764 frame? Do we need to allocate a frame, or can we just omit
3765 the save? For now we'll just omit the save.
3767 We don't want a note on this insn as the frame marker can
3768 move if there is a dynamic stack allocation. */
3769 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
3771 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
3773 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
3777 /* Align pointer properly (doubleword boundary). */
3778 offset = (offset + 7) & ~7;
3780 /* Floating point register store. */
3785 /* First get the frame or stack pointer to the start of the FP register
3787 if (frame_pointer_needed)
3789 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
3790 base = frame_pointer_rtx;
3794 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
3795 base = stack_pointer_rtx;
3798 /* Now actually save the FP registers. */
3799 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3801 if (regs_ever_live[i]
3802 || (! TARGET_64BIT && regs_ever_live[i + 1]))
3804 rtx addr, insn, reg;
3805 addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
3806 reg = gen_rtx_REG (DFmode, i);
3807 insn = emit_move_insn (addr, reg);
3810 RTX_FRAME_RELATED_P (insn) = 1;
3813 rtx mem = gen_rtx_MEM (DFmode,
3814 plus_constant (base, offset));
3816 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3817 gen_rtx_SET (VOIDmode, mem, reg),
3822 rtx meml = gen_rtx_MEM (SFmode,
3823 plus_constant (base, offset));
3824 rtx memr = gen_rtx_MEM (SFmode,
3825 plus_constant (base, offset + 4));
3826 rtx regl = gen_rtx_REG (SFmode, i);
3827 rtx regr = gen_rtx_REG (SFmode, i + 1);
3828 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
3829 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
3832 RTX_FRAME_RELATED_P (setl) = 1;
3833 RTX_FRAME_RELATED_P (setr) = 1;
3834 vec = gen_rtvec (2, setl, setr);
3836 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3837 gen_rtx_SEQUENCE (VOIDmode, vec),
3841 offset += GET_MODE_SIZE (DFmode);
3848 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
3849 Handle case where DISP > 8k by using the add_high_const patterns. */
3852 load_reg (int reg, HOST_WIDE_INT disp, int base)
3854 rtx dest = gen_rtx_REG (word_mode, reg);
3855 rtx basereg = gen_rtx_REG (Pmode, base);
3858 if (VAL_14_BITS_P (disp))
3859 src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3860 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3862 rtx delta = GEN_INT (disp);
3863 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3865 emit_move_insn (tmpreg, delta);
3866 if (TARGET_DISABLE_INDEXING)
3868 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3869 src = gen_rtx_MEM (word_mode, tmpreg);
3872 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3876 rtx delta = GEN_INT (disp);
3877 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3878 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3880 emit_move_insn (tmpreg, high);
3881 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3884 emit_move_insn (dest, src);
3887 /* Update the total code bytes output to the text section. */
3890 update_total_code_bytes (int nbytes)
3892 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
3893 && !IN_NAMED_SECTION_P (cfun->decl))
3895 if (INSN_ADDRESSES_SET_P ())
3897 unsigned long old_total = total_code_bytes;
3899 total_code_bytes += nbytes;
3901 /* Be prepared to handle overflows. */
3902 if (old_total > total_code_bytes)
3903 total_code_bytes = -1;
3906 total_code_bytes = -1;
3910 /* This function generates the assembly code for function exit.
3911 Args are as for output_function_prologue ().
3913 The function epilogue should not depend on the current stack
3914 pointer! It should use the frame pointer only. This is mandatory
3915 because of alloca; we also take advantage of it to omit stack
3916 adjustments before returning. */
3919 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3921 rtx insn = get_last_insn ();
3925 /* hppa_expand_epilogue does the dirty work now. We just need
3926 to output the assembler directives which denote the end
3929 To make debuggers happy, emit a nop if the epilogue was completely
3930 eliminated due to a volatile call as the last insn in the
3931 current function. That way the return address (in %r2) will
3932 always point to a valid instruction in the current function. */
3934 /* Get the last real insn. */
3935 if (GET_CODE (insn) == NOTE)
3936 insn = prev_real_insn (insn);
3938 /* If it is a sequence, then look inside. */
3939 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
3940 insn = XVECEXP (PATTERN (insn), 0, 0);
3942 /* If insn is a CALL_INSN, then it must be a call to a volatile
3943 function (otherwise there would be epilogue insns). */
3944 if (insn && GET_CODE (insn) == CALL_INSN)
3946 fputs ("\tnop\n", file);
3950 fputs ("\t.EXIT\n\t.PROCEND\n", file);
3952 if (TARGET_SOM && TARGET_GAS)
3954 /* We done with this subspace except possibly for some additional
3955 debug information. Forget that we are in this subspace to ensure
3956 that the next function is output in its own subspace. */
3958 cfun->machine->in_nsubspa = 2;
3961 if (INSN_ADDRESSES_SET_P ())
3963 insn = get_last_nonnote_insn ();
3964 last_address += INSN_ADDRESSES (INSN_UID (insn));
3966 last_address += insn_default_length (insn);
3967 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
3968 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
3971 /* Finally, update the total number of code bytes output so far. */
3972 update_total_code_bytes (last_address);
3976 hppa_expand_epilogue (void)
3979 HOST_WIDE_INT offset;
3980 HOST_WIDE_INT ret_off = 0;
3982 int merge_sp_adjust_with_load = 0;
3984 /* We will use this often. */
3985 tmpreg = gen_rtx_REG (word_mode, 1);
3987 /* Try to restore RP early to avoid load/use interlocks when
3988 RP gets used in the return (bv) instruction. This appears to still
3989 be necessary even when we schedule the prologue and epilogue. */
3990 if (regs_ever_live [2] || current_function_calls_eh_return)
3992 ret_off = TARGET_64BIT ? -16 : -20;
3993 if (frame_pointer_needed)
3995 load_reg (2, ret_off, FRAME_POINTER_REGNUM);
4000 /* No frame pointer, and stack is smaller than 8k. */
4001 if (VAL_14_BITS_P (ret_off - actual_fsize))
4003 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4009 /* General register restores. */
4010 if (frame_pointer_needed)
4012 offset = local_fsize;
4014 /* If the current function calls __builtin_eh_return, then we need
4015 to restore the saved EH data registers. */
4016 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4018 unsigned int i, regno;
4022 regno = EH_RETURN_DATA_REGNO (i);
4023 if (regno == INVALID_REGNUM)
4026 load_reg (regno, offset, FRAME_POINTER_REGNUM);
4027 offset += UNITS_PER_WORD;
4031 for (i = 18; i >= 4; i--)
4032 if (regs_ever_live[i] && ! call_used_regs[i])
4034 load_reg (i, offset, FRAME_POINTER_REGNUM);
4035 offset += UNITS_PER_WORD;
4040 offset = local_fsize - actual_fsize;
4042 /* If the current function calls __builtin_eh_return, then we need
4043 to restore the saved EH data registers. */
4044 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4046 unsigned int i, regno;
4050 regno = EH_RETURN_DATA_REGNO (i);
4051 if (regno == INVALID_REGNUM)
4054 /* Only for the first load.
4055 merge_sp_adjust_with_load holds the register load
4056 with which we will merge the sp adjustment. */
4057 if (merge_sp_adjust_with_load == 0
4059 && VAL_14_BITS_P (-actual_fsize))
4060 merge_sp_adjust_with_load = regno;
4062 load_reg (regno, offset, STACK_POINTER_REGNUM);
4063 offset += UNITS_PER_WORD;
4067 for (i = 18; i >= 3; i--)
4069 if (regs_ever_live[i] && ! call_used_regs[i])
4071 /* Only for the first load.
4072 merge_sp_adjust_with_load holds the register load
4073 with which we will merge the sp adjustment. */
4074 if (merge_sp_adjust_with_load == 0
4076 && VAL_14_BITS_P (-actual_fsize))
4077 merge_sp_adjust_with_load = i;
4079 load_reg (i, offset, STACK_POINTER_REGNUM);
4080 offset += UNITS_PER_WORD;
4085 /* Align pointer properly (doubleword boundary). */
4086 offset = (offset + 7) & ~7;
4088 /* FP register restores. */
4091 /* Adjust the register to index off of. */
4092 if (frame_pointer_needed)
4093 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
4095 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4097 /* Actually do the restores now. */
4098 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4099 if (regs_ever_live[i]
4100 || (! TARGET_64BIT && regs_ever_live[i + 1]))
4102 rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4103 rtx dest = gen_rtx_REG (DFmode, i);
4104 emit_move_insn (dest, src);
4108 /* Emit a blockage insn here to keep these insns from being moved to
4109 an earlier spot in the epilogue, or into the main instruction stream.
4111 This is necessary as we must not cut the stack back before all the
4112 restores are finished. */
4113 emit_insn (gen_blockage ());
4115 /* Reset stack pointer (and possibly frame pointer). The stack
4116 pointer is initially set to fp + 64 to avoid a race condition. */
4117 if (frame_pointer_needed)
4119 rtx delta = GEN_INT (-64);
4121 set_reg_plus_d (STACK_POINTER_REGNUM, FRAME_POINTER_REGNUM, 64, 0);
4122 emit_insn (gen_pre_load (frame_pointer_rtx, stack_pointer_rtx, delta));
4124 /* If we were deferring a callee register restore, do it now. */
4125 else if (merge_sp_adjust_with_load)
4127 rtx delta = GEN_INT (-actual_fsize);
4128 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4130 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4132 else if (actual_fsize != 0)
4133 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4136 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4137 frame greater than 8k), do so now. */
4139 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4141 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4143 rtx sa = EH_RETURN_STACKADJ_RTX;
4145 emit_insn (gen_blockage ());
4146 emit_insn (TARGET_64BIT
4147 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4148 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4153 hppa_pic_save_rtx (void)
4155 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4158 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4159 #define NO_DEFERRED_PROFILE_COUNTERS 0
4162 /* Define heap vector type for funcdef numbers. */
4164 DEF_VEC_ALLOC_I(int,heap);
4166 /* Vector of funcdef numbers. */
4167 static VEC(int,heap) *funcdef_nos;
4169 /* Output deferred profile counters. */
4171 output_deferred_profile_counters (void)
4176 if (VEC_empty (int, funcdef_nos))
4179 switch_to_section (data_section);
4180 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4181 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4183 for (i = 0; VEC_iterate (int, funcdef_nos, i, n); i++)
4185 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4186 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4189 VEC_free (int, heap, funcdef_nos);
4193 hppa_profile_hook (int label_no)
4195 /* We use SImode for the address of the function in both 32 and
4196 64-bit code to avoid having to provide DImode versions of the
4197 lcla2 and load_offset_label_address insn patterns. */
4198 rtx reg = gen_reg_rtx (SImode);
4199 rtx label_rtx = gen_label_rtx ();
4200 rtx begin_label_rtx, call_insn;
4201 char begin_label_name[16];
4203 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4205 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4208 emit_move_insn (arg_pointer_rtx,
4209 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4212 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4214 /* The address of the function is loaded into %r25 with a instruction-
4215 relative sequence that avoids the use of relocations. The sequence
4216 is split so that the load_offset_label_address instruction can
4217 occupy the delay slot of the call to _mcount. */
4219 emit_insn (gen_lcla2 (reg, label_rtx));
4221 emit_insn (gen_lcla1 (reg, label_rtx));
4223 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4224 reg, begin_label_rtx, label_rtx));
4226 #if !NO_DEFERRED_PROFILE_COUNTERS
4228 rtx count_label_rtx, addr, r24;
4229 char count_label_name[16];
4231 VEC_safe_push (int, heap, funcdef_nos, label_no);
4232 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4233 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4235 addr = force_reg (Pmode, count_label_rtx);
4236 r24 = gen_rtx_REG (Pmode, 24);
4237 emit_move_insn (r24, addr);
4240 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4241 gen_rtx_SYMBOL_REF (Pmode,
4243 GEN_INT (TARGET_64BIT ? 24 : 12)));
4245 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4250 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4251 gen_rtx_SYMBOL_REF (Pmode,
4253 GEN_INT (TARGET_64BIT ? 16 : 8)));
4257 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4258 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4260 /* Indicate the _mcount call cannot throw, nor will it execute a
4262 REG_NOTES (call_insn)
4263 = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx, REG_NOTES (call_insn));
4266 /* Fetch the return address for the frame COUNT steps up from
4267 the current frame, after the prologue. FRAMEADDR is the
4268 frame pointer of the COUNT frame.
4270 We want to ignore any export stub remnants here. To handle this,
4271 we examine the code at the return address, and if it is an export
4272 stub, we return a memory rtx for the stub return address stored
4275 The value returned is used in two different ways:
4277 1. To find a function's caller.
4279 2. To change the return address for a function.
4281 This function handles most instances of case 1; however, it will
4282 fail if there are two levels of stubs to execute on the return
4283 path. The only way I believe that can happen is if the return value
4284 needs a parameter relocation, which never happens for C code.
4286 This function handles most instances of case 2; however, it will
4287 fail if we did not originally have stub code on the return path
4288 but will need stub code on the new return path. This can happen if
4289 the caller & callee are both in the main program, but the new
4290 return location is in a shared library. */
4293 return_addr_rtx (int count, rtx frameaddr)
4303 rp = get_hard_reg_initial_val (Pmode, 2);
4305 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4308 saved_rp = gen_reg_rtx (Pmode);
4309 emit_move_insn (saved_rp, rp);
4311 /* Get pointer to the instruction stream. We have to mask out the
4312 privilege level from the two low order bits of the return address
4313 pointer here so that ins will point to the start of the first
4314 instruction that would have been executed if we returned. */
4315 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4316 label = gen_label_rtx ();
4318 /* Check the instruction stream at the normal return address for the
4321 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4322 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4323 0x00011820 | stub+16: mtsp r1,sr0
4324 0xe0400002 | stub+20: be,n 0(sr0,rp)
4326 If it is an export stub, than our return address is really in
4329 emit_cmp_insn (gen_rtx_MEM (SImode, ins), GEN_INT (0x4bc23fd1), NE,
4330 NULL_RTX, SImode, 1);
4331 emit_jump_insn (gen_bne (label));
4333 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 4)),
4334 GEN_INT (0x004010a1), NE, NULL_RTX, SImode, 1);
4335 emit_jump_insn (gen_bne (label));
4337 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 8)),
4338 GEN_INT (0x00011820), NE, NULL_RTX, SImode, 1);
4339 emit_jump_insn (gen_bne (label));
4341 /* 0xe0400002 must be specified as -532676606 so that it won't be
4342 rejected as an invalid immediate operand on 64-bit hosts. */
4343 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 12)),
4344 GEN_INT (-532676606), NE, NULL_RTX, SImode, 1);
4346 /* If there is no export stub then just use the value saved from
4347 the return pointer register. */
4349 emit_jump_insn (gen_bne (label));
4351 /* Here we know that our return address points to an export
4352 stub. We don't want to return the address of the export stub,
4353 but rather the return address of the export stub. That return
4354 address is stored at -24[frameaddr]. */
4356 emit_move_insn (saved_rp,
4358 memory_address (Pmode,
4359 plus_constant (frameaddr,
4366 /* This is only valid once reload has completed because it depends on
4367 knowing exactly how much (if any) frame there is and...
4369 It's only valid if there is no frame marker to de-allocate and...
4371 It's only valid if %r2 hasn't been saved into the caller's frame
4372 (we're not profiling and %r2 isn't live anywhere). */
4374 hppa_can_use_return_insn_p (void)
4376 return (reload_completed
4377 && (compute_frame_size (get_frame_size (), 0) ? 0 : 1)
4378 && ! regs_ever_live[2]
4379 && ! frame_pointer_needed);
4383 emit_bcond_fp (enum rtx_code code, rtx operand0)
4385 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4386 gen_rtx_IF_THEN_ELSE (VOIDmode,
4387 gen_rtx_fmt_ee (code,
4389 gen_rtx_REG (CCFPmode, 0),
4391 gen_rtx_LABEL_REF (VOIDmode, operand0),
4397 gen_cmp_fp (enum rtx_code code, rtx operand0, rtx operand1)
4399 return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4400 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1));
4403 /* Adjust the cost of a scheduling dependency. Return the new cost of
4404 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4407 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4409 enum attr_type attr_type;
4411 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4412 true dependencies as they are described with bypasses now. */
4413 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4416 if (! recog_memoized (insn))
4419 attr_type = get_attr_type (insn);
4421 switch (REG_NOTE_KIND (link))
4424 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4427 if (attr_type == TYPE_FPLOAD)
4429 rtx pat = PATTERN (insn);
4430 rtx dep_pat = PATTERN (dep_insn);
4431 if (GET_CODE (pat) == PARALLEL)
4433 /* This happens for the fldXs,mb patterns. */
4434 pat = XVECEXP (pat, 0, 0);
4436 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4437 /* If this happens, we have to extend this to schedule
4438 optimally. Return 0 for now. */
4441 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4443 if (! recog_memoized (dep_insn))
4445 switch (get_attr_type (dep_insn))
4452 case TYPE_FPSQRTSGL:
4453 case TYPE_FPSQRTDBL:
4454 /* A fpload can't be issued until one cycle before a
4455 preceding arithmetic operation has finished if
4456 the target of the fpload is any of the sources
4457 (or destination) of the arithmetic operation. */
4458 return insn_default_latency (dep_insn) - 1;
4465 else if (attr_type == TYPE_FPALU)
4467 rtx pat = PATTERN (insn);
4468 rtx dep_pat = PATTERN (dep_insn);
4469 if (GET_CODE (pat) == PARALLEL)
4471 /* This happens for the fldXs,mb patterns. */
4472 pat = XVECEXP (pat, 0, 0);
4474 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4475 /* If this happens, we have to extend this to schedule
4476 optimally. Return 0 for now. */
4479 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4481 if (! recog_memoized (dep_insn))
4483 switch (get_attr_type (dep_insn))
4487 case TYPE_FPSQRTSGL:
4488 case TYPE_FPSQRTDBL:
4489 /* An ALU flop can't be issued until two cycles before a
4490 preceding divide or sqrt operation has finished if
4491 the target of the ALU flop is any of the sources
4492 (or destination) of the divide or sqrt operation. */
4493 return insn_default_latency (dep_insn) - 2;
4501 /* For other anti dependencies, the cost is 0. */
4504 case REG_DEP_OUTPUT:
4505 /* Output dependency; DEP_INSN writes a register that INSN writes some
4507 if (attr_type == TYPE_FPLOAD)
4509 rtx pat = PATTERN (insn);
4510 rtx dep_pat = PATTERN (dep_insn);
4511 if (GET_CODE (pat) == PARALLEL)
4513 /* This happens for the fldXs,mb patterns. */
4514 pat = XVECEXP (pat, 0, 0);
4516 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4517 /* If this happens, we have to extend this to schedule
4518 optimally. Return 0 for now. */
4521 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4523 if (! recog_memoized (dep_insn))
4525 switch (get_attr_type (dep_insn))
4532 case TYPE_FPSQRTSGL:
4533 case TYPE_FPSQRTDBL:
4534 /* A fpload can't be issued until one cycle before a
4535 preceding arithmetic operation has finished if
4536 the target of the fpload is the destination of the
4537 arithmetic operation.
4539 Exception: For PA7100LC, PA7200 and PA7300, the cost
4540 is 3 cycles, unless they bundle together. We also
4541 pay the penalty if the second insn is a fpload. */
4542 return insn_default_latency (dep_insn) - 1;
4549 else if (attr_type == TYPE_FPALU)
4551 rtx pat = PATTERN (insn);
4552 rtx dep_pat = PATTERN (dep_insn);
4553 if (GET_CODE (pat) == PARALLEL)
4555 /* This happens for the fldXs,mb patterns. */
4556 pat = XVECEXP (pat, 0, 0);
4558 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4559 /* If this happens, we have to extend this to schedule
4560 optimally. Return 0 for now. */
4563 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4565 if (! recog_memoized (dep_insn))
4567 switch (get_attr_type (dep_insn))
4571 case TYPE_FPSQRTSGL:
4572 case TYPE_FPSQRTDBL:
4573 /* An ALU flop can't be issued until two cycles before a
4574 preceding divide or sqrt operation has finished if
4575 the target of the ALU flop is also the target of
4576 the divide or sqrt operation. */
4577 return insn_default_latency (dep_insn) - 2;
4585 /* For other output dependencies, the cost is 0. */
4593 /* Adjust scheduling priorities. We use this to try and keep addil
4594 and the next use of %r1 close together. */
4596 pa_adjust_priority (rtx insn, int priority)
4598 rtx set = single_set (insn);
4602 src = SET_SRC (set);
4603 dest = SET_DEST (set);
4604 if (GET_CODE (src) == LO_SUM
4605 && symbolic_operand (XEXP (src, 1), VOIDmode)
4606 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4609 else if (GET_CODE (src) == MEM
4610 && GET_CODE (XEXP (src, 0)) == LO_SUM
4611 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4612 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4615 else if (GET_CODE (dest) == MEM
4616 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4617 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4618 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4624 /* The 700 can only issue a single insn at a time.
4625 The 7XXX processors can issue two insns at a time.
4626 The 8000 can issue 4 insns at a time. */
4628 pa_issue_rate (void)
4632 case PROCESSOR_700: return 1;
4633 case PROCESSOR_7100: return 2;
4634 case PROCESSOR_7100LC: return 2;
4635 case PROCESSOR_7200: return 2;
4636 case PROCESSOR_7300: return 2;
4637 case PROCESSOR_8000: return 4;
4646 /* Return any length adjustment needed by INSN which already has its length
4647 computed as LENGTH. Return zero if no adjustment is necessary.
4649 For the PA: function calls, millicode calls, and backwards short
4650 conditional branches with unfilled delay slots need an adjustment by +1
4651 (to account for the NOP which will be inserted into the instruction stream).
4653 Also compute the length of an inline block move here as it is too
4654 complicated to express as a length attribute in pa.md. */
4656 pa_adjust_insn_length (rtx insn, int length)
4658 rtx pat = PATTERN (insn);
4660 /* Jumps inside switch tables which have unfilled delay slots need
4662 if (GET_CODE (insn) == JUMP_INSN
4663 && GET_CODE (pat) == PARALLEL
4664 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4666 /* Millicode insn with an unfilled delay slot. */
4667 else if (GET_CODE (insn) == INSN
4668 && GET_CODE (pat) != SEQUENCE
4669 && GET_CODE (pat) != USE
4670 && GET_CODE (pat) != CLOBBER
4671 && get_attr_type (insn) == TYPE_MILLI)
4673 /* Block move pattern. */
4674 else if (GET_CODE (insn) == INSN
4675 && GET_CODE (pat) == PARALLEL
4676 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4677 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4678 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4679 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4680 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4681 return compute_movmem_length (insn) - 4;
4682 /* Block clear pattern. */
4683 else if (GET_CODE (insn) == INSN
4684 && GET_CODE (pat) == PARALLEL
4685 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4686 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4687 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4688 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4689 return compute_clrmem_length (insn) - 4;
4690 /* Conditional branch with an unfilled delay slot. */
4691 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4693 /* Adjust a short backwards conditional with an unfilled delay slot. */
4694 if (GET_CODE (pat) == SET
4696 && ! forward_branch_p (insn))
4698 else if (GET_CODE (pat) == PARALLEL
4699 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4702 /* Adjust dbra insn with short backwards conditional branch with
4703 unfilled delay slot -- only for case where counter is in a
4704 general register register. */
4705 else if (GET_CODE (pat) == PARALLEL
4706 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4707 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4708 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4710 && ! forward_branch_p (insn))
4718 /* Print operand X (an rtx) in assembler syntax to file FILE.
4719 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4720 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4723 print_operand (FILE *file, rtx x, int code)
4728 /* Output a 'nop' if there's nothing for the delay slot. */
4729 if (dbr_sequence_length () == 0)
4730 fputs ("\n\tnop", file);
4733 /* Output a nullification completer if there's nothing for the */
4734 /* delay slot or nullification is requested. */
4735 if (dbr_sequence_length () == 0 ||
4737 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
4741 /* Print out the second register name of a register pair.
4742 I.e., R (6) => 7. */
4743 fputs (reg_names[REGNO (x) + 1], file);
4746 /* A register or zero. */
4748 || (x == CONST0_RTX (DFmode))
4749 || (x == CONST0_RTX (SFmode)))
4751 fputs ("%r0", file);
4757 /* A register or zero (floating point). */
4759 || (x == CONST0_RTX (DFmode))
4760 || (x == CONST0_RTX (SFmode)))
4762 fputs ("%fr0", file);
4771 xoperands[0] = XEXP (XEXP (x, 0), 0);
4772 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
4773 output_global_address (file, xoperands[1], 0);
4774 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
4778 case 'C': /* Plain (C)ondition */
4780 switch (GET_CODE (x))
4783 fputs ("=", file); break;
4785 fputs ("<>", file); break;
4787 fputs (">", file); break;
4789 fputs (">=", file); break;
4791 fputs (">>=", file); break;
4793 fputs (">>", file); break;
4795 fputs ("<", file); break;
4797 fputs ("<=", file); break;
4799 fputs ("<<=", file); break;
4801 fputs ("<<", file); break;
4806 case 'N': /* Condition, (N)egated */
4807 switch (GET_CODE (x))
4810 fputs ("<>", file); break;
4812 fputs ("=", file); break;
4814 fputs ("<=", file); break;
4816 fputs ("<", file); break;
4818 fputs ("<<", file); break;
4820 fputs ("<<=", file); break;
4822 fputs (">=", file); break;
4824 fputs (">", file); break;
4826 fputs (">>", file); break;
4828 fputs (">>=", file); break;
4833 /* For floating point comparisons. Note that the output
4834 predicates are the complement of the desired mode. The
4835 conditions for GT, GE, LT, LE and LTGT cause an invalid
4836 operation exception if the result is unordered and this
4837 exception is enabled in the floating-point status register. */
4839 switch (GET_CODE (x))
4842 fputs ("!=", file); break;
4844 fputs ("=", file); break;
4846 fputs ("!>", file); break;
4848 fputs ("!>=", file); break;
4850 fputs ("!<", file); break;
4852 fputs ("!<=", file); break;
4854 fputs ("!<>", file); break;
4856 fputs ("!?<=", file); break;
4858 fputs ("!?<", file); break;
4860 fputs ("!?>=", file); break;
4862 fputs ("!?>", file); break;
4864 fputs ("!?=", file); break;
4866 fputs ("!?", file); break;
4868 fputs ("?", file); break;
4873 case 'S': /* Condition, operands are (S)wapped. */
4874 switch (GET_CODE (x))
4877 fputs ("=", file); break;
4879 fputs ("<>", file); break;
4881 fputs ("<", file); break;
4883 fputs ("<=", file); break;
4885 fputs ("<<=", file); break;
4887 fputs ("<<", file); break;
4889 fputs (">", file); break;
4891 fputs (">=", file); break;
4893 fputs (">>=", file); break;
4895 fputs (">>", file); break;
4900 case 'B': /* Condition, (B)oth swapped and negate. */
4901 switch (GET_CODE (x))
4904 fputs ("<>", file); break;
4906 fputs ("=", file); break;
4908 fputs (">=", file); break;
4910 fputs (">", file); break;
4912 fputs (">>", file); break;
4914 fputs (">>=", file); break;
4916 fputs ("<=", file); break;
4918 fputs ("<", file); break;
4920 fputs ("<<", file); break;
4922 fputs ("<<=", file); break;
4928 gcc_assert (GET_CODE (x) == CONST_INT);
4929 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
4932 gcc_assert (GET_CODE (x) == CONST_INT);
4933 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
4936 gcc_assert (GET_CODE (x) == CONST_INT);
4937 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
4940 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
4941 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4944 gcc_assert (GET_CODE (x) == CONST_INT);
4945 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
4948 gcc_assert (GET_CODE (x) == CONST_INT);
4949 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
4952 if (GET_CODE (x) == CONST_INT)
4957 switch (GET_CODE (XEXP (x, 0)))
4961 if (ASSEMBLER_DIALECT == 0)
4962 fputs ("s,mb", file);
4964 fputs (",mb", file);
4968 if (ASSEMBLER_DIALECT == 0)
4969 fputs ("s,ma", file);
4971 fputs (",ma", file);
4974 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
4975 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
4977 if (ASSEMBLER_DIALECT == 0)
4980 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4981 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
4983 if (ASSEMBLER_DIALECT == 0)
4984 fputs ("x,s", file);
4988 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
4992 if (code == 'F' && ASSEMBLER_DIALECT == 0)
4998 output_global_address (file, x, 0);
5001 output_global_address (file, x, 1);
5003 case 0: /* Don't do anything special */
5008 compute_zdepwi_operands (INTVAL (x), op);
5009 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5015 compute_zdepdi_operands (INTVAL (x), op);
5016 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5020 /* We can get here from a .vtable_inherit due to our
5021 CONSTANT_ADDRESS_P rejecting perfectly good constant
5027 if (GET_CODE (x) == REG)
5029 fputs (reg_names [REGNO (x)], file);
5030 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5036 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5037 && (REGNO (x) & 1) == 0)
5040 else if (GET_CODE (x) == MEM)
5042 int size = GET_MODE_SIZE (GET_MODE (x));
5043 rtx base = NULL_RTX;
5044 switch (GET_CODE (XEXP (x, 0)))
5048 base = XEXP (XEXP (x, 0), 0);
5049 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5053 base = XEXP (XEXP (x, 0), 0);
5054 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5057 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5058 fprintf (file, "%s(%s)",
5059 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5060 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5061 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5062 fprintf (file, "%s(%s)",
5063 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5064 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5065 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5066 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5068 /* Because the REG_POINTER flag can get lost during reload,
5069 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5070 index and base registers in the combined move patterns. */
5071 rtx base = XEXP (XEXP (x, 0), 1);
5072 rtx index = XEXP (XEXP (x, 0), 0);
5074 fprintf (file, "%s(%s)",
5075 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5078 output_address (XEXP (x, 0));
5081 output_address (XEXP (x, 0));
5086 output_addr_const (file, x);
5089 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5092 output_global_address (FILE *file, rtx x, int round_constant)
5095 /* Imagine (high (const (plus ...))). */
5096 if (GET_CODE (x) == HIGH)
5099 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5100 output_addr_const (file, x);
5101 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5103 output_addr_const (file, x);
5104 fputs ("-$global$", file);
5106 else if (GET_CODE (x) == CONST)
5108 const char *sep = "";
5109 int offset = 0; /* assembler wants -$global$ at end */
5110 rtx base = NULL_RTX;
5112 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5115 base = XEXP (XEXP (x, 0), 0);
5116 output_addr_const (file, base);
5119 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5125 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5128 base = XEXP (XEXP (x, 0), 1);
5129 output_addr_const (file, base);
5132 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5138 /* How bogus. The compiler is apparently responsible for
5139 rounding the constant if it uses an LR field selector.
5141 The linker and/or assembler seem a better place since
5142 they have to do this kind of thing already.
5144 If we fail to do this, HP's optimizing linker may eliminate
5145 an addil, but not update the ldw/stw/ldo instruction that
5146 uses the result of the addil. */
5148 offset = ((offset + 0x1000) & ~0x1fff);
5150 switch (GET_CODE (XEXP (x, 0)))
5163 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5171 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5172 fputs ("-$global$", file);
5174 fprintf (file, "%s%d", sep, offset);
5177 output_addr_const (file, x);
5180 /* Output boilerplate text to appear at the beginning of the file.
5181 There are several possible versions. */
5182 #define aputs(x) fputs(x, asm_out_file)
5184 pa_file_start_level (void)
5187 aputs ("\t.LEVEL 2.0w\n");
5188 else if (TARGET_PA_20)
5189 aputs ("\t.LEVEL 2.0\n");
5190 else if (TARGET_PA_11)
5191 aputs ("\t.LEVEL 1.1\n");
5193 aputs ("\t.LEVEL 1.0\n");
5197 pa_file_start_space (int sortspace)
5199 aputs ("\t.SPACE $PRIVATE$");
5202 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5203 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5204 "\n\t.SPACE $TEXT$");
5207 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5208 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5212 pa_file_start_file (int want_version)
5214 if (write_symbols != NO_DEBUG)
5216 output_file_directive (asm_out_file, main_input_filename);
5218 aputs ("\t.version\t\"01.01\"\n");
5223 pa_file_start_mcount (const char *aswhat)
5226 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5230 pa_elf_file_start (void)
5232 pa_file_start_level ();
5233 pa_file_start_mcount ("ENTRY");
5234 pa_file_start_file (0);
5238 pa_som_file_start (void)
5240 pa_file_start_level ();
5241 pa_file_start_space (0);
5242 aputs ("\t.IMPORT $global$,DATA\n"
5243 "\t.IMPORT $$dyncall,MILLICODE\n");
5244 pa_file_start_mcount ("CODE");
5245 pa_file_start_file (0);
5249 pa_linux_file_start (void)
5251 pa_file_start_file (1);
5252 pa_file_start_level ();
5253 pa_file_start_mcount ("CODE");
5257 pa_hpux64_gas_file_start (void)
5259 pa_file_start_level ();
5260 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5262 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5264 pa_file_start_file (1);
5268 pa_hpux64_hpas_file_start (void)
5270 pa_file_start_level ();
5271 pa_file_start_space (1);
5272 pa_file_start_mcount ("CODE");
5273 pa_file_start_file (0);
5277 /* Search the deferred plabel list for SYMBOL and return its internal
5278 label. If an entry for SYMBOL is not found, a new entry is created. */
5281 get_deferred_plabel (rtx symbol)
5283 const char *fname = XSTR (symbol, 0);
5286 /* See if we have already put this function on the list of deferred
5287 plabels. This list is generally small, so a liner search is not
5288 too ugly. If it proves too slow replace it with something faster. */
5289 for (i = 0; i < n_deferred_plabels; i++)
5290 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5293 /* If the deferred plabel list is empty, or this entry was not found
5294 on the list, create a new entry on the list. */
5295 if (deferred_plabels == NULL || i == n_deferred_plabels)
5299 if (deferred_plabels == 0)
5300 deferred_plabels = (struct deferred_plabel *)
5301 ggc_alloc (sizeof (struct deferred_plabel));
5303 deferred_plabels = (struct deferred_plabel *)
5304 ggc_realloc (deferred_plabels,
5305 ((n_deferred_plabels + 1)
5306 * sizeof (struct deferred_plabel)));
5308 i = n_deferred_plabels++;
5309 deferred_plabels[i].internal_label = gen_label_rtx ();
5310 deferred_plabels[i].symbol = symbol;
5312 /* Gross. We have just implicitly taken the address of this
5313 function. Mark it in the same manner as assemble_name. */
5314 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5316 mark_referenced (id);
5319 return deferred_plabels[i].internal_label;
5323 output_deferred_plabels (void)
5327 /* If we have some deferred plabels, then we need to switch into the
5328 data or readonly data section, and align it to a 4 byte boundary
5329 before outputting the deferred plabels. */
5330 if (n_deferred_plabels)
5332 switch_to_section (flag_pic ? data_section : readonly_data_section);
5333 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5336 /* Now output the deferred plabels. */
5337 for (i = 0; i < n_deferred_plabels; i++)
5339 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5340 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5341 assemble_integer (deferred_plabels[i].symbol,
5342 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5346 #ifdef HPUX_LONG_DOUBLE_LIBRARY
5347 /* Initialize optabs to point to HPUX long double emulation routines. */
5349 pa_hpux_init_libfuncs (void)
5351 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5352 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5353 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5354 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5355 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5356 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5357 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5358 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5359 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5361 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5362 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5363 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5364 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5365 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5366 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5367 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5369 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5370 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5371 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5372 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5374 set_conv_libfunc (sfix_optab, SImode, TFmode, TARGET_64BIT
5375 ? "__U_Qfcnvfxt_quad_to_sgl"
5376 : "_U_Qfcnvfxt_quad_to_sgl");
5377 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
5378 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_usgl");
5379 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_udbl");
5381 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
5382 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
5383 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_U_Qfcnvxf_usgl_to_quad");
5384 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxf_udbl_to_quad");
5388 /* HP's millicode routines mean something special to the assembler.
5389 Keep track of which ones we have used. */
5391 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5392 static void import_milli (enum millicodes);
5393 static char imported[(int) end1000];
5394 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5395 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5396 #define MILLI_START 10
5399 import_milli (enum millicodes code)
5401 char str[sizeof (import_string)];
5403 if (!imported[(int) code])
5405 imported[(int) code] = 1;
5406 strcpy (str, import_string);
5407 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5408 output_asm_insn (str, 0);
5412 /* The register constraints have put the operands and return value in
5413 the proper registers. */
5416 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5418 import_milli (mulI);
5419 return output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5422 /* Emit the rtl for doing a division by a constant. */
5424 /* Do magic division millicodes exist for this value? */
5425 const int magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5427 /* We'll use an array to keep track of the magic millicodes and
5428 whether or not we've used them already. [n][0] is signed, [n][1] is
5431 static int div_milli[16][2];
5434 emit_hpdiv_const (rtx *operands, int unsignedp)
5436 if (GET_CODE (operands[2]) == CONST_INT
5437 && INTVAL (operands[2]) > 0
5438 && INTVAL (operands[2]) < 16
5439 && magic_milli[INTVAL (operands[2])])
5441 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5443 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5447 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5448 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5450 gen_rtx_REG (SImode, 26),
5452 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5453 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5454 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5455 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5456 gen_rtx_CLOBBER (VOIDmode, ret))));
5457 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5464 output_div_insn (rtx *operands, int unsignedp, rtx insn)
5468 /* If the divisor is a constant, try to use one of the special
5470 if (GET_CODE (operands[0]) == CONST_INT)
5472 static char buf[100];
5473 divisor = INTVAL (operands[0]);
5474 if (!div_milli[divisor][unsignedp])
5476 div_milli[divisor][unsignedp] = 1;
5478 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5480 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5484 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5485 INTVAL (operands[0]));
5486 return output_millicode_call (insn,
5487 gen_rtx_SYMBOL_REF (SImode, buf));
5491 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5492 INTVAL (operands[0]));
5493 return output_millicode_call (insn,
5494 gen_rtx_SYMBOL_REF (SImode, buf));
5497 /* Divisor isn't a special constant. */
5502 import_milli (divU);
5503 return output_millicode_call (insn,
5504 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5508 import_milli (divI);
5509 return output_millicode_call (insn,
5510 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5515 /* Output a $$rem millicode to do mod. */
5518 output_mod_insn (int unsignedp, rtx insn)
5522 import_milli (remU);
5523 return output_millicode_call (insn,
5524 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5528 import_milli (remI);
5529 return output_millicode_call (insn,
5530 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5535 output_arg_descriptor (rtx call_insn)
5537 const char *arg_regs[4];
5538 enum machine_mode arg_mode;
5540 int i, output_flag = 0;
5543 /* We neither need nor want argument location descriptors for the
5544 64bit runtime environment or the ELF32 environment. */
5545 if (TARGET_64BIT || TARGET_ELF32)
5548 for (i = 0; i < 4; i++)
5551 /* Specify explicitly that no argument relocations should take place
5552 if using the portable runtime calling conventions. */
5553 if (TARGET_PORTABLE_RUNTIME)
5555 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5560 gcc_assert (GET_CODE (call_insn) == CALL_INSN);
5561 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5562 link; link = XEXP (link, 1))
5564 rtx use = XEXP (link, 0);
5566 if (! (GET_CODE (use) == USE
5567 && GET_CODE (XEXP (use, 0)) == REG
5568 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5571 arg_mode = GET_MODE (XEXP (use, 0));
5572 regno = REGNO (XEXP (use, 0));
5573 if (regno >= 23 && regno <= 26)
5575 arg_regs[26 - regno] = "GR";
5576 if (arg_mode == DImode)
5577 arg_regs[25 - regno] = "GR";
5579 else if (regno >= 32 && regno <= 39)
5581 if (arg_mode == SFmode)
5582 arg_regs[(regno - 32) / 2] = "FR";
5585 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5586 arg_regs[(regno - 34) / 2] = "FR";
5587 arg_regs[(regno - 34) / 2 + 1] = "FU";
5589 arg_regs[(regno - 34) / 2] = "FU";
5590 arg_regs[(regno - 34) / 2 + 1] = "FR";
5595 fputs ("\t.CALL ", asm_out_file);
5596 for (i = 0; i < 4; i++)
5601 fputc (',', asm_out_file);
5602 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5605 fputc ('\n', asm_out_file);
5608 static enum reg_class
5609 pa_secondary_reload (bool in_p, rtx x, enum reg_class class,
5610 enum machine_mode mode, secondary_reload_info *sri)
5612 int is_symbolic, regno;
5614 /* Handle the easy stuff first. */
5615 if (class == R1_REGS)
5621 if (class == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5627 /* If we have something like (mem (mem (...)), we can safely assume the
5628 inner MEM will end up in a general register after reloading, so there's
5629 no need for a secondary reload. */
5630 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5633 /* Trying to load a constant into a FP register during PIC code
5634 generation requires %r1 as a scratch register. */
5636 && (mode == SImode || mode == DImode)
5637 && FP_REG_CLASS_P (class)
5638 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5640 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5641 : CODE_FOR_reload_indi_r1);
5645 /* Profiling showed the PA port spends about 1.3% of its compilation
5646 time in true_regnum from calls inside pa_secondary_reload_class. */
5647 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5648 regno = true_regnum (x);
5650 /* Handle out of range displacement for integer mode loads/stores of
5652 if (((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5653 && GET_MODE_CLASS (mode) == MODE_INT
5654 && FP_REG_CLASS_P (class))
5655 || (class == SHIFT_REGS && (regno <= 0 || regno >= 32)))
5657 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5661 /* A SAR<->FP register copy requires a secondary register (GPR) as
5662 well as secondary memory. */
5663 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
5664 && ((REGNO_REG_CLASS (regno) == SHIFT_REGS && FP_REG_CLASS_P (class))
5665 || (class == SHIFT_REGS
5666 && FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))))
5668 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5672 /* Secondary reloads of symbolic operands require %r1 as a scratch
5673 register when we're generating PIC code and the operand isn't
5675 if (GET_CODE (x) == HIGH)
5678 /* Profiling has showed GCC spends about 2.6% of its compilation
5679 time in symbolic_operand from calls inside pa_secondary_reload_class.
5680 So, we use an inline copy to avoid useless work. */
5681 switch (GET_CODE (x))
5686 is_symbolic = !SYMBOL_REF_TLS_MODEL (x);
5693 is_symbolic = (((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
5694 && !SYMBOL_REF_TLS_MODEL (XEXP (op, 0)))
5695 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
5696 && GET_CODE (XEXP (op, 1)) == CONST_INT);
5703 if (is_symbolic && (flag_pic || !read_only_operand (x, VOIDmode)))
5705 gcc_assert (mode == SImode || mode == DImode);
5706 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5707 : CODE_FOR_reload_indi_r1);
5713 /* In the 32-bit runtime, arguments larger than eight bytes are passed
5714 by invisible reference. As a GCC extension, we also pass anything
5715 with a zero or variable size by reference.
5717 The 64-bit runtime does not describe passing any types by invisible
5718 reference. The internals of GCC can't currently handle passing
5719 empty structures, and zero or variable length arrays when they are
5720 not passed entirely on the stack or by reference. Thus, as a GCC
5721 extension, we pass these types by reference. The HP compiler doesn't
5722 support these types, so hopefully there shouldn't be any compatibility
5723 issues. This may have to be revisited when HP releases a C99 compiler
5724 or updates the ABI. */
5727 pa_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5728 enum machine_mode mode, tree type,
5729 bool named ATTRIBUTE_UNUSED)
5734 size = int_size_in_bytes (type);
5736 size = GET_MODE_SIZE (mode);
5741 return size <= 0 || size > 8;
5745 function_arg_padding (enum machine_mode mode, tree type)
5748 || (TARGET_64BIT && type && AGGREGATE_TYPE_P (type)))
5750 /* Return none if justification is not required. */
5752 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
5753 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
5756 /* The directions set here are ignored when a BLKmode argument larger
5757 than a word is placed in a register. Different code is used for
5758 the stack and registers. This makes it difficult to have a
5759 consistent data representation for both the stack and registers.
5760 For both runtimes, the justification and padding for arguments on
5761 the stack and in registers should be identical. */
5763 /* The 64-bit runtime specifies left justification for aggregates. */
5766 /* The 32-bit runtime architecture specifies right justification.
5767 When the argument is passed on the stack, the argument is padded
5768 with garbage on the left. The HP compiler pads with zeros. */
5772 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
5779 /* Do what is necessary for `va_start'. We look at the current function
5780 to determine if stdargs or varargs is used and fill in an initial
5781 va_list. A pointer to this constructor is returned. */
5784 hppa_builtin_saveregs (void)
5787 tree fntype = TREE_TYPE (current_function_decl);
5788 int argadj = ((!(TYPE_ARG_TYPES (fntype) != 0
5789 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
5790 != void_type_node)))
5791 ? UNITS_PER_WORD : 0);
5794 offset = plus_constant (current_function_arg_offset_rtx, argadj);
5796 offset = current_function_arg_offset_rtx;
5802 /* Adjust for varargs/stdarg differences. */
5804 offset = plus_constant (current_function_arg_offset_rtx, -argadj);
5806 offset = current_function_arg_offset_rtx;
5808 /* We need to save %r26 .. %r19 inclusive starting at offset -64
5809 from the incoming arg pointer and growing to larger addresses. */
5810 for (i = 26, off = -64; i >= 19; i--, off += 8)
5811 emit_move_insn (gen_rtx_MEM (word_mode,
5812 plus_constant (arg_pointer_rtx, off)),
5813 gen_rtx_REG (word_mode, i));
5815 /* The incoming args pointer points just beyond the flushback area;
5816 normally this is not a serious concern. However, when we are doing
5817 varargs/stdargs we want to make the arg pointer point to the start
5818 of the incoming argument area. */
5819 emit_move_insn (virtual_incoming_args_rtx,
5820 plus_constant (arg_pointer_rtx, -64));
5822 /* Now return a pointer to the first anonymous argument. */
5823 return copy_to_reg (expand_binop (Pmode, add_optab,
5824 virtual_incoming_args_rtx,
5825 offset, 0, 0, OPTAB_LIB_WIDEN));
5828 /* Store general registers on the stack. */
5829 dest = gen_rtx_MEM (BLKmode,
5830 plus_constant (current_function_internal_arg_pointer,
5832 set_mem_alias_set (dest, get_varargs_alias_set ());
5833 set_mem_align (dest, BITS_PER_WORD);
5834 move_block_from_reg (23, dest, 4);
5836 /* move_block_from_reg will emit code to store the argument registers
5837 individually as scalar stores.
5839 However, other insns may later load from the same addresses for
5840 a structure load (passing a struct to a varargs routine).
5842 The alias code assumes that such aliasing can never happen, so we
5843 have to keep memory referencing insns from moving up beyond the
5844 last argument register store. So we emit a blockage insn here. */
5845 emit_insn (gen_blockage ());
5847 return copy_to_reg (expand_binop (Pmode, add_optab,
5848 current_function_internal_arg_pointer,
5849 offset, 0, 0, OPTAB_LIB_WIDEN));
5853 hppa_va_start (tree valist, rtx nextarg)
5855 nextarg = expand_builtin_saveregs ();
5856 std_expand_builtin_va_start (valist, nextarg);
5860 hppa_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
5864 /* Args grow upward. We can use the generic routines. */
5865 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5867 else /* !TARGET_64BIT */
5869 tree ptr = build_pointer_type (type);
5872 unsigned int size, ofs;
5875 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
5879 ptr = build_pointer_type (type);
5881 size = int_size_in_bytes (type);
5882 valist_type = TREE_TYPE (valist);
5884 /* Args grow down. Not handled by generic routines. */
5886 u = fold_convert (valist_type, size_in_bytes (type));
5887 t = build2 (MINUS_EXPR, valist_type, valist, u);
5889 /* Copied from va-pa.h, but we probably don't need to align to
5890 word size, since we generate and preserve that invariant. */
5891 u = build_int_cst (valist_type, (size > 4 ? -8 : -4));
5892 t = build2 (BIT_AND_EXPR, valist_type, t, u);
5894 t = build2 (GIMPLE_MODIFY_STMT, valist_type, valist, t);
5896 ofs = (8 - size) % 4;
5899 u = fold_convert (valist_type, size_int (ofs));
5900 t = build2 (PLUS_EXPR, valist_type, t, u);
5903 t = fold_convert (ptr, t);
5904 t = build_va_arg_indirect_ref (t);
5907 t = build_va_arg_indirect_ref (t);
5913 /* True if MODE is valid for the target. By "valid", we mean able to
5914 be manipulated in non-trivial ways. In particular, this means all
5915 the arithmetic is supported.
5917 Currently, TImode is not valid as the HP 64-bit runtime documentation
5918 doesn't document the alignment and calling conventions for this type.
5919 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
5920 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
5923 pa_scalar_mode_supported_p (enum machine_mode mode)
5925 int precision = GET_MODE_PRECISION (mode);
5927 switch (GET_MODE_CLASS (mode))
5929 case MODE_PARTIAL_INT:
5931 if (precision == CHAR_TYPE_SIZE)
5933 if (precision == SHORT_TYPE_SIZE)
5935 if (precision == INT_TYPE_SIZE)
5937 if (precision == LONG_TYPE_SIZE)
5939 if (precision == LONG_LONG_TYPE_SIZE)
5944 if (precision == FLOAT_TYPE_SIZE)
5946 if (precision == DOUBLE_TYPE_SIZE)
5948 if (precision == LONG_DOUBLE_TYPE_SIZE)
5952 case MODE_DECIMAL_FLOAT:
5960 /* This routine handles all the normal conditional branch sequences we
5961 might need to generate. It handles compare immediate vs compare
5962 register, nullification of delay slots, varying length branches,
5963 negated branches, and all combinations of the above. It returns the
5964 output appropriate to emit the branch corresponding to all given
5968 output_cbranch (rtx *operands, int negated, rtx insn)
5970 static char buf[100];
5972 int nullify = INSN_ANNULLED_BRANCH_P (insn);
5973 int length = get_attr_length (insn);
5976 /* A conditional branch to the following instruction (e.g. the delay slot)
5977 is asking for a disaster. This can happen when not optimizing and
5978 when jump optimization fails.
5980 While it is usually safe to emit nothing, this can fail if the
5981 preceding instruction is a nullified branch with an empty delay
5982 slot and the same branch target as this branch. We could check
5983 for this but jump optimization should eliminate nop jumps. It
5984 is always safe to emit a nop. */
5985 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
5988 /* The doubleword form of the cmpib instruction doesn't have the LEU
5989 and GTU conditions while the cmpb instruction does. Since we accept
5990 zero for cmpb, we must ensure that we use cmpb for the comparison. */
5991 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
5992 operands[2] = gen_rtx_REG (DImode, 0);
5993 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
5994 operands[1] = gen_rtx_REG (DImode, 0);
5996 /* If this is a long branch with its delay slot unfilled, set `nullify'
5997 as it can nullify the delay slot and save a nop. */
5998 if (length == 8 && dbr_sequence_length () == 0)
6001 /* If this is a short forward conditional branch which did not get
6002 its delay slot filled, the delay slot can still be nullified. */
6003 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6004 nullify = forward_branch_p (insn);
6006 /* A forward branch over a single nullified insn can be done with a
6007 comclr instruction. This avoids a single cycle penalty due to
6008 mis-predicted branch if we fall through (branch not taken). */
6010 && next_real_insn (insn) != 0
6011 && get_attr_length (next_real_insn (insn)) == 4
6012 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6018 /* All short conditional branches except backwards with an unfilled
6022 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6024 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6025 if (GET_MODE (operands[1]) == DImode)
6028 strcat (buf, "%B3");
6030 strcat (buf, "%S3");
6032 strcat (buf, " %2,%r1,%%r0");
6034 strcat (buf, ",n %2,%r1,%0");
6036 strcat (buf, " %2,%r1,%0");
6039 /* All long conditionals. Note a short backward branch with an
6040 unfilled delay slot is treated just like a long backward branch
6041 with an unfilled delay slot. */
6043 /* Handle weird backwards branch with a filled delay slot
6044 which is nullified. */
6045 if (dbr_sequence_length () != 0
6046 && ! forward_branch_p (insn)
6049 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6050 if (GET_MODE (operands[1]) == DImode)
6053 strcat (buf, "%S3");
6055 strcat (buf, "%B3");
6056 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6058 /* Handle short backwards branch with an unfilled delay slot.
6059 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6060 taken and untaken branches. */
6061 else if (dbr_sequence_length () == 0
6062 && ! forward_branch_p (insn)
6063 && INSN_ADDRESSES_SET_P ()
6064 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6065 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6067 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6068 if (GET_MODE (operands[1]) == DImode)
6071 strcat (buf, "%B3 %2,%r1,%0%#");
6073 strcat (buf, "%S3 %2,%r1,%0%#");
6077 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6078 if (GET_MODE (operands[1]) == DImode)
6081 strcat (buf, "%S3");
6083 strcat (buf, "%B3");
6085 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6087 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6092 /* The reversed conditional branch must branch over one additional
6093 instruction if the delay slot is filled and needs to be extracted
6094 by output_lbranch. If the delay slot is empty or this is a
6095 nullified forward branch, the instruction after the reversed
6096 condition branch must be nullified. */
6097 if (dbr_sequence_length () == 0
6098 || (nullify && forward_branch_p (insn)))
6102 operands[4] = GEN_INT (length);
6107 operands[4] = GEN_INT (length + 4);
6110 /* Create a reversed conditional branch which branches around
6111 the following insns. */
6112 if (GET_MODE (operands[1]) != DImode)
6118 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6121 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6127 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6130 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6139 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6142 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6148 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6151 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6155 output_asm_insn (buf, operands);
6156 return output_lbranch (operands[0], insn, xdelay);
6161 /* This routine handles output of long unconditional branches that
6162 exceed the maximum range of a simple branch instruction. Since
6163 we don't have a register available for the branch, we save register
6164 %r1 in the frame marker, load the branch destination DEST into %r1,
6165 execute the branch, and restore %r1 in the delay slot of the branch.
6167 Since long branches may have an insn in the delay slot and the
6168 delay slot is used to restore %r1, we in general need to extract
6169 this insn and execute it before the branch. However, to facilitate
6170 use of this function by conditional branches, we also provide an
6171 option to not extract the delay insn so that it will be emitted
6172 after the long branch. So, if there is an insn in the delay slot,
6173 it is extracted if XDELAY is nonzero.
6175 The lengths of the various long-branch sequences are 20, 16 and 24
6176 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6179 output_lbranch (rtx dest, rtx insn, int xdelay)
6183 xoperands[0] = dest;
6185 /* First, free up the delay slot. */
6186 if (xdelay && dbr_sequence_length () != 0)
6188 /* We can't handle a jump in the delay slot. */
6189 gcc_assert (GET_CODE (NEXT_INSN (insn)) != JUMP_INSN);
6191 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6194 /* Now delete the delay insn. */
6195 PUT_CODE (NEXT_INSN (insn), NOTE);
6196 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
6197 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
6200 /* Output an insn to save %r1. The runtime documentation doesn't
6201 specify whether the "Clean Up" slot in the callers frame can
6202 be clobbered by the callee. It isn't copied by HP's builtin
6203 alloca, so this suggests that it can be clobbered if necessary.
6204 The "Static Link" location is copied by HP builtin alloca, so
6205 we avoid using it. Using the cleanup slot might be a problem
6206 if we have to interoperate with languages that pass cleanup
6207 information. However, it should be possible to handle these
6208 situations with GCC's asm feature.
6210 The "Current RP" slot is reserved for the called procedure, so
6211 we try to use it when we don't have a frame of our own. It's
6212 rather unlikely that we won't have a frame when we need to emit
6215 Really the way to go long term is a register scavenger; goto
6216 the target of the jump and find a register which we can use
6217 as a scratch to hold the value in %r1. Then, we wouldn't have
6218 to free up the delay slot or clobber a slot that may be needed
6219 for other purposes. */
6222 if (actual_fsize == 0 && !regs_ever_live[2])
6223 /* Use the return pointer slot in the frame marker. */
6224 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6226 /* Use the slot at -40 in the frame marker since HP builtin
6227 alloca doesn't copy it. */
6228 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6232 if (actual_fsize == 0 && !regs_ever_live[2])
6233 /* Use the return pointer slot in the frame marker. */
6234 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6236 /* Use the "Clean Up" slot in the frame marker. In GCC,
6237 the only other use of this location is for copying a
6238 floating point double argument from a floating-point
6239 register to two general registers. The copy is done
6240 as an "atomic" operation when outputting a call, so it
6241 won't interfere with our using the location here. */
6242 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6245 if (TARGET_PORTABLE_RUNTIME)
6247 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6248 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6249 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6253 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6254 if (TARGET_SOM || !TARGET_GAS)
6256 xoperands[1] = gen_label_rtx ();
6257 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6258 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6259 CODE_LABEL_NUMBER (xoperands[1]));
6260 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6264 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6265 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6267 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6270 /* Now output a very long branch to the original target. */
6271 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6273 /* Now restore the value of %r1 in the delay slot. */
6276 if (actual_fsize == 0 && !regs_ever_live[2])
6277 return "ldd -16(%%r30),%%r1";
6279 return "ldd -40(%%r30),%%r1";
6283 if (actual_fsize == 0 && !regs_ever_live[2])
6284 return "ldw -20(%%r30),%%r1";
6286 return "ldw -12(%%r30),%%r1";
6290 /* This routine handles all the branch-on-bit conditional branch sequences we
6291 might need to generate. It handles nullification of delay slots,
6292 varying length branches, negated branches and all combinations of the
6293 above. it returns the appropriate output template to emit the branch. */
6296 output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6298 static char buf[100];
6300 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6301 int length = get_attr_length (insn);
6304 /* A conditional branch to the following instruction (e.g. the delay slot) is
6305 asking for a disaster. I do not think this can happen as this pattern
6306 is only used when optimizing; jump optimization should eliminate the
6307 jump. But be prepared just in case. */
6309 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6312 /* If this is a long branch with its delay slot unfilled, set `nullify'
6313 as it can nullify the delay slot and save a nop. */
6314 if (length == 8 && dbr_sequence_length () == 0)
6317 /* If this is a short forward conditional branch which did not get
6318 its delay slot filled, the delay slot can still be nullified. */
6319 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6320 nullify = forward_branch_p (insn);
6322 /* A forward branch over a single nullified insn can be done with a
6323 extrs instruction. This avoids a single cycle penalty due to
6324 mis-predicted branch if we fall through (branch not taken). */
6327 && next_real_insn (insn) != 0
6328 && get_attr_length (next_real_insn (insn)) == 4
6329 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6336 /* All short conditional branches except backwards with an unfilled
6340 strcpy (buf, "{extrs,|extrw,s,}");
6342 strcpy (buf, "bb,");
6343 if (useskip && GET_MODE (operands[0]) == DImode)
6344 strcpy (buf, "extrd,s,*");
6345 else if (GET_MODE (operands[0]) == DImode)
6346 strcpy (buf, "bb,*");
6347 if ((which == 0 && negated)
6348 || (which == 1 && ! negated))
6353 strcat (buf, " %0,%1,1,%%r0");
6354 else if (nullify && negated)
6355 strcat (buf, ",n %0,%1,%3");
6356 else if (nullify && ! negated)
6357 strcat (buf, ",n %0,%1,%2");
6358 else if (! nullify && negated)
6359 strcat (buf, "%0,%1,%3");
6360 else if (! nullify && ! negated)
6361 strcat (buf, " %0,%1,%2");
6364 /* All long conditionals. Note a short backward branch with an
6365 unfilled delay slot is treated just like a long backward branch
6366 with an unfilled delay slot. */
6368 /* Handle weird backwards branch with a filled delay slot
6369 which is nullified. */
6370 if (dbr_sequence_length () != 0
6371 && ! forward_branch_p (insn)
6374 strcpy (buf, "bb,");
6375 if (GET_MODE (operands[0]) == DImode)
6377 if ((which == 0 && negated)
6378 || (which == 1 && ! negated))
6383 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6385 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6387 /* Handle short backwards branch with an unfilled delay slot.
6388 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6389 taken and untaken branches. */
6390 else if (dbr_sequence_length () == 0
6391 && ! forward_branch_p (insn)
6392 && INSN_ADDRESSES_SET_P ()
6393 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6394 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6396 strcpy (buf, "bb,");
6397 if (GET_MODE (operands[0]) == DImode)
6399 if ((which == 0 && negated)
6400 || (which == 1 && ! negated))
6405 strcat (buf, " %0,%1,%3%#");
6407 strcat (buf, " %0,%1,%2%#");
6411 if (GET_MODE (operands[0]) == DImode)
6412 strcpy (buf, "extrd,s,*");
6414 strcpy (buf, "{extrs,|extrw,s,}");
6415 if ((which == 0 && negated)
6416 || (which == 1 && ! negated))
6420 if (nullify && negated)
6421 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6422 else if (nullify && ! negated)
6423 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6425 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6427 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6432 /* The reversed conditional branch must branch over one additional
6433 instruction if the delay slot is filled and needs to be extracted
6434 by output_lbranch. If the delay slot is empty or this is a
6435 nullified forward branch, the instruction after the reversed
6436 condition branch must be nullified. */
6437 if (dbr_sequence_length () == 0
6438 || (nullify && forward_branch_p (insn)))
6442 operands[4] = GEN_INT (length);
6447 operands[4] = GEN_INT (length + 4);
6450 if (GET_MODE (operands[0]) == DImode)
6451 strcpy (buf, "bb,*");
6453 strcpy (buf, "bb,");
6454 if ((which == 0 && negated)
6455 || (which == 1 && !negated))
6460 strcat (buf, ",n %0,%1,.+%4");
6462 strcat (buf, " %0,%1,.+%4");
6463 output_asm_insn (buf, operands);
6464 return output_lbranch (negated ? operands[3] : operands[2],
6470 /* This routine handles all the branch-on-variable-bit conditional branch
6471 sequences we might need to generate. It handles nullification of delay
6472 slots, varying length branches, negated branches and all combinations
6473 of the above. it returns the appropriate output template to emit the
6477 output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6479 static char buf[100];
6481 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6482 int length = get_attr_length (insn);
6485 /* A conditional branch to the following instruction (e.g. the delay slot) is
6486 asking for a disaster. I do not think this can happen as this pattern
6487 is only used when optimizing; jump optimization should eliminate the
6488 jump. But be prepared just in case. */
6490 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6493 /* If this is a long branch with its delay slot unfilled, set `nullify'
6494 as it can nullify the delay slot and save a nop. */
6495 if (length == 8 && dbr_sequence_length () == 0)
6498 /* If this is a short forward conditional branch which did not get
6499 its delay slot filled, the delay slot can still be nullified. */
6500 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6501 nullify = forward_branch_p (insn);
6503 /* A forward branch over a single nullified insn can be done with a
6504 extrs instruction. This avoids a single cycle penalty due to
6505 mis-predicted branch if we fall through (branch not taken). */
6508 && next_real_insn (insn) != 0
6509 && get_attr_length (next_real_insn (insn)) == 4
6510 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6517 /* All short conditional branches except backwards with an unfilled
6521 strcpy (buf, "{vextrs,|extrw,s,}");
6523 strcpy (buf, "{bvb,|bb,}");
6524 if (useskip && GET_MODE (operands[0]) == DImode)
6525 strcpy (buf, "extrd,s,*");
6526 else if (GET_MODE (operands[0]) == DImode)
6527 strcpy (buf, "bb,*");
6528 if ((which == 0 && negated)
6529 || (which == 1 && ! negated))
6534 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6535 else if (nullify && negated)
6536 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6537 else if (nullify && ! negated)
6538 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6539 else if (! nullify && negated)
6540 strcat (buf, "{%0,%3|%0,%%sar,%3}");
6541 else if (! nullify && ! negated)
6542 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6545 /* All long conditionals. Note a short backward branch with an
6546 unfilled delay slot is treated just like a long backward branch
6547 with an unfilled delay slot. */
6549 /* Handle weird backwards branch with a filled delay slot
6550 which is nullified. */
6551 if (dbr_sequence_length () != 0
6552 && ! forward_branch_p (insn)
6555 strcpy (buf, "{bvb,|bb,}");
6556 if (GET_MODE (operands[0]) == DImode)
6558 if ((which == 0 && negated)
6559 || (which == 1 && ! negated))
6564 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6566 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6568 /* Handle short backwards branch with an unfilled delay slot.
6569 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6570 taken and untaken branches. */
6571 else if (dbr_sequence_length () == 0
6572 && ! forward_branch_p (insn)
6573 && INSN_ADDRESSES_SET_P ()
6574 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6575 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6577 strcpy (buf, "{bvb,|bb,}");
6578 if (GET_MODE (operands[0]) == DImode)
6580 if ((which == 0 && negated)
6581 || (which == 1 && ! negated))
6586 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
6588 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
6592 strcpy (buf, "{vextrs,|extrw,s,}");
6593 if (GET_MODE (operands[0]) == DImode)
6594 strcpy (buf, "extrd,s,*");
6595 if ((which == 0 && negated)
6596 || (which == 1 && ! negated))
6600 if (nullify && negated)
6601 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6602 else if (nullify && ! negated)
6603 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6605 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6607 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6612 /* The reversed conditional branch must branch over one additional
6613 instruction if the delay slot is filled and needs to be extracted
6614 by output_lbranch. If the delay slot is empty or this is a
6615 nullified forward branch, the instruction after the reversed
6616 condition branch must be nullified. */
6617 if (dbr_sequence_length () == 0
6618 || (nullify && forward_branch_p (insn)))
6622 operands[4] = GEN_INT (length);
6627 operands[4] = GEN_INT (length + 4);
6630 if (GET_MODE (operands[0]) == DImode)
6631 strcpy (buf, "bb,*");
6633 strcpy (buf, "{bvb,|bb,}");
6634 if ((which == 0 && negated)
6635 || (which == 1 && !negated))
6640 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
6642 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
6643 output_asm_insn (buf, operands);
6644 return output_lbranch (negated ? operands[3] : operands[2],
6650 /* Return the output template for emitting a dbra type insn.
6652 Note it may perform some output operations on its own before
6653 returning the final output string. */
6655 output_dbra (rtx *operands, rtx insn, int which_alternative)
6657 int length = get_attr_length (insn);
6659 /* A conditional branch to the following instruction (e.g. the delay slot) is
6660 asking for a disaster. Be prepared! */
6662 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6664 if (which_alternative == 0)
6665 return "ldo %1(%0),%0";
6666 else if (which_alternative == 1)
6668 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
6669 output_asm_insn ("ldw -16(%%r30),%4", operands);
6670 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6671 return "{fldws|fldw} -16(%%r30),%0";
6675 output_asm_insn ("ldw %0,%4", operands);
6676 return "ldo %1(%4),%4\n\tstw %4,%0";
6680 if (which_alternative == 0)
6682 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6685 /* If this is a long branch with its delay slot unfilled, set `nullify'
6686 as it can nullify the delay slot and save a nop. */
6687 if (length == 8 && dbr_sequence_length () == 0)
6690 /* If this is a short forward conditional branch which did not get
6691 its delay slot filled, the delay slot can still be nullified. */
6692 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6693 nullify = forward_branch_p (insn);
6699 return "addib,%C2,n %1,%0,%3";
6701 return "addib,%C2 %1,%0,%3";
6704 /* Handle weird backwards branch with a fulled delay slot
6705 which is nullified. */
6706 if (dbr_sequence_length () != 0
6707 && ! forward_branch_p (insn)
6709 return "addib,%N2,n %1,%0,.+12\n\tb %3";
6710 /* Handle short backwards branch with an unfilled delay slot.
6711 Using a addb;nop rather than addi;bl saves 1 cycle for both
6712 taken and untaken branches. */
6713 else if (dbr_sequence_length () == 0
6714 && ! forward_branch_p (insn)
6715 && INSN_ADDRESSES_SET_P ()
6716 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6717 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6718 return "addib,%C2 %1,%0,%3%#";
6720 /* Handle normal cases. */
6722 return "addi,%N2 %1,%0,%0\n\tb,n %3";
6724 return "addi,%N2 %1,%0,%0\n\tb %3";
6727 /* The reversed conditional branch must branch over one additional
6728 instruction if the delay slot is filled and needs to be extracted
6729 by output_lbranch. If the delay slot is empty or this is a
6730 nullified forward branch, the instruction after the reversed
6731 condition branch must be nullified. */
6732 if (dbr_sequence_length () == 0
6733 || (nullify && forward_branch_p (insn)))
6737 operands[4] = GEN_INT (length);
6742 operands[4] = GEN_INT (length + 4);
6746 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
6748 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
6750 return output_lbranch (operands[3], insn, xdelay);
6754 /* Deal with gross reload from FP register case. */
6755 else if (which_alternative == 1)
6757 /* Move loop counter from FP register to MEM then into a GR,
6758 increment the GR, store the GR into MEM, and finally reload
6759 the FP register from MEM from within the branch's delay slot. */
6760 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
6762 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6764 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
6765 else if (length == 28)
6766 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6769 operands[5] = GEN_INT (length - 16);
6770 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
6771 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
6772 return output_lbranch (operands[3], insn, 0);
6775 /* Deal with gross reload from memory case. */
6778 /* Reload loop counter from memory, the store back to memory
6779 happens in the branch's delay slot. */
6780 output_asm_insn ("ldw %0,%4", operands);
6782 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
6783 else if (length == 16)
6784 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
6787 operands[5] = GEN_INT (length - 4);
6788 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
6789 return output_lbranch (operands[3], insn, 0);
6794 /* Return the output template for emitting a movb type insn.
6796 Note it may perform some output operations on its own before
6797 returning the final output string. */
6799 output_movb (rtx *operands, rtx insn, int which_alternative,
6800 int reverse_comparison)
6802 int length = get_attr_length (insn);
6804 /* A conditional branch to the following instruction (e.g. the delay slot) is
6805 asking for a disaster. Be prepared! */
6807 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6809 if (which_alternative == 0)
6810 return "copy %1,%0";
6811 else if (which_alternative == 1)
6813 output_asm_insn ("stw %1,-16(%%r30)", operands);
6814 return "{fldws|fldw} -16(%%r30),%0";
6816 else if (which_alternative == 2)
6822 /* Support the second variant. */
6823 if (reverse_comparison)
6824 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
6826 if (which_alternative == 0)
6828 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6831 /* If this is a long branch with its delay slot unfilled, set `nullify'
6832 as it can nullify the delay slot and save a nop. */
6833 if (length == 8 && dbr_sequence_length () == 0)
6836 /* If this is a short forward conditional branch which did not get
6837 its delay slot filled, the delay slot can still be nullified. */
6838 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6839 nullify = forward_branch_p (insn);
6845 return "movb,%C2,n %1,%0,%3";
6847 return "movb,%C2 %1,%0,%3";
6850 /* Handle weird backwards branch with a filled delay slot
6851 which is nullified. */
6852 if (dbr_sequence_length () != 0
6853 && ! forward_branch_p (insn)
6855 return "movb,%N2,n %1,%0,.+12\n\tb %3";
6857 /* Handle short backwards branch with an unfilled delay slot.
6858 Using a movb;nop rather than or;bl saves 1 cycle for both
6859 taken and untaken branches. */
6860 else if (dbr_sequence_length () == 0
6861 && ! forward_branch_p (insn)
6862 && INSN_ADDRESSES_SET_P ()
6863 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6864 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6865 return "movb,%C2 %1,%0,%3%#";
6866 /* Handle normal cases. */
6868 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
6870 return "or,%N2 %1,%%r0,%0\n\tb %3";
6873 /* The reversed conditional branch must branch over one additional
6874 instruction if the delay slot is filled and needs to be extracted
6875 by output_lbranch. If the delay slot is empty or this is a
6876 nullified forward branch, the instruction after the reversed
6877 condition branch must be nullified. */
6878 if (dbr_sequence_length () == 0
6879 || (nullify && forward_branch_p (insn)))
6883 operands[4] = GEN_INT (length);
6888 operands[4] = GEN_INT (length + 4);
6892 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
6894 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
6896 return output_lbranch (operands[3], insn, xdelay);
6899 /* Deal with gross reload for FP destination register case. */
6900 else if (which_alternative == 1)
6902 /* Move source register to MEM, perform the branch test, then
6903 finally load the FP register from MEM from within the branch's
6905 output_asm_insn ("stw %1,-16(%%r30)", operands);
6907 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
6908 else if (length == 16)
6909 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6912 operands[4] = GEN_INT (length - 4);
6913 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
6914 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
6915 return output_lbranch (operands[3], insn, 0);
6918 /* Deal with gross reload from memory case. */
6919 else if (which_alternative == 2)
6921 /* Reload loop counter from memory, the store back to memory
6922 happens in the branch's delay slot. */
6924 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
6925 else if (length == 12)
6926 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
6929 operands[4] = GEN_INT (length);
6930 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
6932 return output_lbranch (operands[3], insn, 0);
6935 /* Handle SAR as a destination. */
6939 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
6940 else if (length == 12)
6941 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
6944 operands[4] = GEN_INT (length);
6945 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
6947 return output_lbranch (operands[3], insn, 0);
6952 /* Copy any FP arguments in INSN into integer registers. */
6954 copy_fp_args (rtx insn)
6959 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
6961 int arg_mode, regno;
6962 rtx use = XEXP (link, 0);
6964 if (! (GET_CODE (use) == USE
6965 && GET_CODE (XEXP (use, 0)) == REG
6966 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6969 arg_mode = GET_MODE (XEXP (use, 0));
6970 regno = REGNO (XEXP (use, 0));
6972 /* Is it a floating point register? */
6973 if (regno >= 32 && regno <= 39)
6975 /* Copy the FP register into an integer register via memory. */
6976 if (arg_mode == SFmode)
6978 xoperands[0] = XEXP (use, 0);
6979 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
6980 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
6981 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6985 xoperands[0] = XEXP (use, 0);
6986 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
6987 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
6988 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
6989 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6995 /* Compute length of the FP argument copy sequence for INSN. */
6997 length_fp_args (rtx insn)
7002 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7004 int arg_mode, regno;
7005 rtx use = XEXP (link, 0);
7007 if (! (GET_CODE (use) == USE
7008 && GET_CODE (XEXP (use, 0)) == REG
7009 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7012 arg_mode = GET_MODE (XEXP (use, 0));
7013 regno = REGNO (XEXP (use, 0));
7015 /* Is it a floating point register? */
7016 if (regno >= 32 && regno <= 39)
7018 if (arg_mode == SFmode)
7028 /* Return the attribute length for the millicode call instruction INSN.
7029 The length must match the code generated by output_millicode_call.
7030 We include the delay slot in the returned length as it is better to
7031 over estimate the length than to under estimate it. */
7034 attr_length_millicode_call (rtx insn)
7036 unsigned long distance = -1;
7037 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7039 if (INSN_ADDRESSES_SET_P ())
7041 distance = (total + insn_current_reference_address (insn));
7042 if (distance < total)
7048 if (!TARGET_LONG_CALLS && distance < 7600000)
7053 else if (TARGET_PORTABLE_RUNTIME)
7057 if (!TARGET_LONG_CALLS && distance < 240000)
7060 if (TARGET_LONG_ABS_CALL && !flag_pic)
7067 /* INSN is a function call. It may have an unconditional jump
7070 CALL_DEST is the routine we are calling. */
7073 output_millicode_call (rtx insn, rtx call_dest)
7075 int attr_length = get_attr_length (insn);
7076 int seq_length = dbr_sequence_length ();
7081 xoperands[0] = call_dest;
7082 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7084 /* Handle the common case where we are sure that the branch will
7085 reach the beginning of the $CODE$ subspace. The within reach
7086 form of the $$sh_func_adrs call has a length of 28. Because
7087 it has an attribute type of multi, it never has a nonzero
7088 sequence length. The length of the $$sh_func_adrs is the same
7089 as certain out of reach PIC calls to other routines. */
7090 if (!TARGET_LONG_CALLS
7091 && ((seq_length == 0
7092 && (attr_length == 12
7093 || (attr_length == 28 && get_attr_type (insn) == TYPE_MULTI)))
7094 || (seq_length != 0 && attr_length == 8)))
7096 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7102 /* It might seem that one insn could be saved by accessing
7103 the millicode function using the linkage table. However,
7104 this doesn't work in shared libraries and other dynamically
7105 loaded objects. Using a pc-relative sequence also avoids
7106 problems related to the implicit use of the gp register. */
7107 output_asm_insn ("b,l .+8,%%r1", xoperands);
7111 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7112 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7116 xoperands[1] = gen_label_rtx ();
7117 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7118 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7119 CODE_LABEL_NUMBER (xoperands[1]));
7120 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7123 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7125 else if (TARGET_PORTABLE_RUNTIME)
7127 /* Pure portable runtime doesn't allow be/ble; we also don't
7128 have PIC support in the assembler/linker, so this sequence
7131 /* Get the address of our target into %r1. */
7132 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7133 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7135 /* Get our return address into %r31. */
7136 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7137 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7139 /* Jump to our target address in %r1. */
7140 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7144 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7146 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7148 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7152 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7153 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7155 if (TARGET_SOM || !TARGET_GAS)
7157 /* The HP assembler can generate relocations for the
7158 difference of two symbols. GAS can do this for a
7159 millicode symbol but not an arbitrary external
7160 symbol when generating SOM output. */
7161 xoperands[1] = gen_label_rtx ();
7162 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7163 CODE_LABEL_NUMBER (xoperands[1]));
7164 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7165 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7169 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7170 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7174 /* Jump to our target address in %r1. */
7175 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7179 if (seq_length == 0)
7180 output_asm_insn ("nop", xoperands);
7182 /* We are done if there isn't a jump in the delay slot. */
7183 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7186 /* This call has an unconditional jump in its delay slot. */
7187 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7189 /* See if the return address can be adjusted. Use the containing
7190 sequence insn's address. */
7191 if (INSN_ADDRESSES_SET_P ())
7193 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7194 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7195 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7197 if (VAL_14_BITS_P (distance))
7199 xoperands[1] = gen_label_rtx ();
7200 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7201 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7202 CODE_LABEL_NUMBER (xoperands[1]));
7205 /* ??? This branch may not reach its target. */
7206 output_asm_insn ("nop\n\tb,n %0", xoperands);
7209 /* ??? This branch may not reach its target. */
7210 output_asm_insn ("nop\n\tb,n %0", xoperands);
7212 /* Delete the jump. */
7213 PUT_CODE (NEXT_INSN (insn), NOTE);
7214 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7215 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7220 /* Return the attribute length of the call instruction INSN. The SIBCALL
7221 flag indicates whether INSN is a regular call or a sibling call. The
7222 length returned must be longer than the code actually generated by
7223 output_call. Since branch shortening is done before delay branch
7224 sequencing, there is no way to determine whether or not the delay
7225 slot will be filled during branch shortening. Even when the delay
7226 slot is filled, we may have to add a nop if the delay slot contains
7227 a branch that can't reach its target. Thus, we always have to include
7228 the delay slot in the length estimate. This used to be done in
7229 pa_adjust_insn_length but we do it here now as some sequences always
7230 fill the delay slot and we can save four bytes in the estimate for
7234 attr_length_call (rtx insn, int sibcall)
7240 rtx pat = PATTERN (insn);
7241 unsigned long distance = -1;
7243 if (INSN_ADDRESSES_SET_P ())
7245 unsigned long total;
7247 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7248 distance = (total + insn_current_reference_address (insn));
7249 if (distance < total)
7253 /* Determine if this is a local call. */
7254 if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL)
7255 call_dest = XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0);
7257 call_dest = XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0);
7259 call_decl = SYMBOL_REF_DECL (call_dest);
7260 local_call = call_decl && (*targetm.binds_local_p) (call_decl);
7262 /* pc-relative branch. */
7263 if (!TARGET_LONG_CALLS
7264 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7265 || distance < 240000))
7268 /* 64-bit plabel sequence. */
7269 else if (TARGET_64BIT && !local_call)
7270 length += sibcall ? 28 : 24;
7272 /* non-pic long absolute branch sequence. */
7273 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7276 /* long pc-relative branch sequence. */
7277 else if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7278 || (TARGET_64BIT && !TARGET_GAS)
7279 || (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7283 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS)
7287 /* 32-bit plabel sequence. */
7293 length += length_fp_args (insn);
7303 if (!TARGET_NO_SPACE_REGS)
7311 /* INSN is a function call. It may have an unconditional jump
7314 CALL_DEST is the routine we are calling. */
7317 output_call (rtx insn, rtx call_dest, int sibcall)
7319 int delay_insn_deleted = 0;
7320 int delay_slot_filled = 0;
7321 int seq_length = dbr_sequence_length ();
7322 tree call_decl = SYMBOL_REF_DECL (call_dest);
7323 int local_call = call_decl && (*targetm.binds_local_p) (call_decl);
7326 xoperands[0] = call_dest;
7328 /* Handle the common case where we're sure that the branch will reach
7329 the beginning of the "$CODE$" subspace. This is the beginning of
7330 the current function if we are in a named section. */
7331 if (!TARGET_LONG_CALLS && attr_length_call (insn, sibcall) == 8)
7333 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7334 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7338 if (TARGET_64BIT && !local_call)
7340 /* ??? As far as I can tell, the HP linker doesn't support the
7341 long pc-relative sequence described in the 64-bit runtime
7342 architecture. So, we use a slightly longer indirect call. */
7343 xoperands[0] = get_deferred_plabel (call_dest);
7344 xoperands[1] = gen_label_rtx ();
7346 /* If this isn't a sibcall, we put the load of %r27 into the
7347 delay slot. We can't do this in a sibcall as we don't
7348 have a second call-clobbered scratch register available. */
7350 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7353 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7356 /* Now delete the delay insn. */
7357 PUT_CODE (NEXT_INSN (insn), NOTE);
7358 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7359 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7360 delay_insn_deleted = 1;
7363 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7364 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7365 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7369 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7370 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7371 output_asm_insn ("bve (%%r1)", xoperands);
7375 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7376 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7377 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7378 delay_slot_filled = 1;
7383 int indirect_call = 0;
7385 /* Emit a long call. There are several different sequences
7386 of increasing length and complexity. In most cases,
7387 they don't allow an instruction in the delay slot. */
7388 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7389 && !(TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7390 && !(TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7395 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7397 && (!TARGET_PA_20 || indirect_call))
7399 /* A non-jump insn in the delay slot. By definition we can
7400 emit this insn before the call (and in fact before argument
7402 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7405 /* Now delete the delay insn. */
7406 PUT_CODE (NEXT_INSN (insn), NOTE);
7407 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7408 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7409 delay_insn_deleted = 1;
7412 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7414 /* This is the best sequence for making long calls in
7415 non-pic code. Unfortunately, GNU ld doesn't provide
7416 the stub needed for external calls, and GAS's support
7417 for this with the SOM linker is buggy. It is safe
7418 to use this for local calls. */
7419 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7421 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7425 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7428 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7430 output_asm_insn ("copy %%r31,%%r2", xoperands);
7431 delay_slot_filled = 1;
7436 if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7437 || (TARGET_64BIT && !TARGET_GAS))
7439 /* The HP assembler and linker can handle relocations
7440 for the difference of two symbols. GAS and the HP
7441 linker can't do this when one of the symbols is
7443 xoperands[1] = gen_label_rtx ();
7444 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7445 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7446 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7447 CODE_LABEL_NUMBER (xoperands[1]));
7448 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7450 else if (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7452 /* GAS currently can't generate the relocations that
7453 are needed for the SOM linker under HP-UX using this
7454 sequence. The GNU linker doesn't generate the stubs
7455 that are needed for external calls on TARGET_ELF32
7456 with this sequence. For now, we have to use a
7457 longer plabel sequence when using GAS. */
7458 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7459 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7461 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7466 /* Emit a long plabel-based call sequence. This is
7467 essentially an inline implementation of $$dyncall.
7468 We don't actually try to call $$dyncall as this is
7469 as difficult as calling the function itself. */
7470 xoperands[0] = get_deferred_plabel (call_dest);
7471 xoperands[1] = gen_label_rtx ();
7473 /* Since the call is indirect, FP arguments in registers
7474 need to be copied to the general registers. Then, the
7475 argument relocation stub will copy them back. */
7477 copy_fp_args (insn);
7481 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7482 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7483 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7487 output_asm_insn ("addil LR'%0-$global$,%%r27",
7489 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7493 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7494 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7495 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7496 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7498 if (!sibcall && !TARGET_PA_20)
7500 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7501 if (TARGET_NO_SPACE_REGS)
7502 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7504 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7511 output_asm_insn ("bve (%%r1)", xoperands);
7516 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7517 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7518 delay_slot_filled = 1;
7521 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7526 if (!TARGET_NO_SPACE_REGS)
7527 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7532 if (TARGET_NO_SPACE_REGS)
7533 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7535 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7539 if (TARGET_NO_SPACE_REGS)
7540 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
7542 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
7545 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
7547 output_asm_insn ("copy %%r31,%%r2", xoperands);
7548 delay_slot_filled = 1;
7555 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
7556 output_asm_insn ("nop", xoperands);
7558 /* We are done if there isn't a jump in the delay slot. */
7560 || delay_insn_deleted
7561 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7564 /* A sibcall should never have a branch in the delay slot. */
7565 gcc_assert (!sibcall);
7567 /* This call has an unconditional jump in its delay slot. */
7568 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7570 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
7572 /* See if the return address can be adjusted. Use the containing
7573 sequence insn's address. */
7574 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7575 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7576 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7578 if (VAL_14_BITS_P (distance))
7580 xoperands[1] = gen_label_rtx ();
7581 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
7582 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7583 CODE_LABEL_NUMBER (xoperands[1]));
7586 output_asm_insn ("nop\n\tb,n %0", xoperands);
7589 output_asm_insn ("b,n %0", xoperands);
7591 /* Delete the jump. */
7592 PUT_CODE (NEXT_INSN (insn), NOTE);
7593 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7594 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7599 /* Return the attribute length of the indirect call instruction INSN.
7600 The length must match the code generated by output_indirect call.
7601 The returned length includes the delay slot. Currently, the delay
7602 slot of an indirect call sequence is not exposed and it is used by
7603 the sequence itself. */
7606 attr_length_indirect_call (rtx insn)
7608 unsigned long distance = -1;
7609 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7611 if (INSN_ADDRESSES_SET_P ())
7613 distance = (total + insn_current_reference_address (insn));
7614 if (distance < total)
7621 if (TARGET_FAST_INDIRECT_CALLS
7622 || (!TARGET_PORTABLE_RUNTIME
7623 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
7624 || distance < 240000)))
7630 if (TARGET_PORTABLE_RUNTIME)
7633 /* Out of reach, can use ble. */
7638 output_indirect_call (rtx insn, rtx call_dest)
7644 xoperands[0] = call_dest;
7645 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
7646 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
7650 /* First the special case for kernels, level 0 systems, etc. */
7651 if (TARGET_FAST_INDIRECT_CALLS)
7652 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
7654 /* Now the normal case -- we can reach $$dyncall directly or
7655 we're sure that we can get there via a long-branch stub.
7657 No need to check target flags as the length uniquely identifies
7658 the remaining cases. */
7659 if (attr_length_indirect_call (insn) == 8)
7661 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
7662 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
7663 variant of the B,L instruction can't be used on the SOM target. */
7664 if (TARGET_PA_20 && !TARGET_SOM)
7665 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
7667 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
7670 /* Long millicode call, but we are not generating PIC or portable runtime
7672 if (attr_length_indirect_call (insn) == 12)
7673 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
7675 /* Long millicode call for portable runtime. */
7676 if (attr_length_indirect_call (insn) == 20)
7677 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
7679 /* We need a long PIC call to $$dyncall. */
7680 xoperands[0] = NULL_RTX;
7681 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7682 if (TARGET_SOM || !TARGET_GAS)
7684 xoperands[0] = gen_label_rtx ();
7685 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
7686 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7687 CODE_LABEL_NUMBER (xoperands[0]));
7688 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
7692 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
7693 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
7696 output_asm_insn ("blr %%r0,%%r2", xoperands);
7697 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
7701 /* Return the total length of the save and restore instructions needed for
7702 the data linkage table pointer (i.e., the PIC register) across the call
7703 instruction INSN. No-return calls do not require a save and restore.
7704 In addition, we may be able to avoid the save and restore for calls
7705 within the same translation unit. */
7708 attr_length_save_restore_dltp (rtx insn)
7710 if (find_reg_note (insn, REG_NORETURN, NULL_RTX))
7716 /* In HPUX 8.0's shared library scheme, special relocations are needed
7717 for function labels if they might be passed to a function
7718 in a shared library (because shared libraries don't live in code
7719 space), and special magic is needed to construct their address. */
7722 hppa_encode_label (rtx sym)
7724 const char *str = XSTR (sym, 0);
7725 int len = strlen (str) + 1;
7728 p = newstr = alloca (len + 1);
7732 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
7736 pa_encode_section_info (tree decl, rtx rtl, int first)
7738 default_encode_section_info (decl, rtl, first);
7740 if (first && TEXT_SPACE_P (decl))
7742 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
7743 if (TREE_CODE (decl) == FUNCTION_DECL)
7744 hppa_encode_label (XEXP (rtl, 0));
7748 /* This is sort of inverse to pa_encode_section_info. */
7751 pa_strip_name_encoding (const char *str)
7753 str += (*str == '@');
7754 str += (*str == '*');
7759 function_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7761 return GET_CODE (op) == SYMBOL_REF && FUNCTION_NAME_P (XSTR (op, 0));
7764 /* Returns 1 if OP is a function label involved in a simple addition
7765 with a constant. Used to keep certain patterns from matching
7766 during instruction combination. */
7768 is_function_label_plus_const (rtx op)
7770 /* Strip off any CONST. */
7771 if (GET_CODE (op) == CONST)
7774 return (GET_CODE (op) == PLUS
7775 && function_label_operand (XEXP (op, 0), Pmode)
7776 && GET_CODE (XEXP (op, 1)) == CONST_INT);
7779 /* Output assembly code for a thunk to FUNCTION. */
7782 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
7783 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
7786 static unsigned int current_thunk_number;
7787 int val_14 = VAL_14_BITS_P (delta);
7792 xoperands[0] = XEXP (DECL_RTL (function), 0);
7793 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
7794 xoperands[2] = GEN_INT (delta);
7796 ASM_OUTPUT_LABEL (file, XSTR (xoperands[1], 0));
7797 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
7799 /* Output the thunk. We know that the function is in the same
7800 translation unit (i.e., the same space) as the thunk, and that
7801 thunks are output after their method. Thus, we don't need an
7802 external branch to reach the function. With SOM and GAS,
7803 functions and thunks are effectively in different sections.
7804 Thus, we can always use a IA-relative branch and the linker
7805 will add a long branch stub if necessary.
7807 However, we have to be careful when generating PIC code on the
7808 SOM port to ensure that the sequence does not transfer to an
7809 import stub for the target function as this could clobber the
7810 return value saved at SP-24. This would also apply to the
7811 32-bit linux port if the multi-space model is implemented. */
7812 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7813 && !(flag_pic && TREE_PUBLIC (function))
7814 && (TARGET_GAS || last_address < 262132))
7815 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7816 && ((targetm.have_named_sections
7817 && DECL_SECTION_NAME (thunk_fndecl) != NULL
7818 /* The GNU 64-bit linker has rather poor stub management.
7819 So, we use a long branch from thunks that aren't in
7820 the same section as the target function. */
7822 && (DECL_SECTION_NAME (thunk_fndecl)
7823 != DECL_SECTION_NAME (function)))
7824 || ((DECL_SECTION_NAME (thunk_fndecl)
7825 == DECL_SECTION_NAME (function))
7826 && last_address < 262132)))
7827 || (!targetm.have_named_sections && last_address < 262132))))
7830 output_asm_insn ("addil L'%2,%%r26", xoperands);
7832 output_asm_insn ("b %0", xoperands);
7836 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7841 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7845 else if (TARGET_64BIT)
7847 /* We only have one call-clobbered scratch register, so we can't
7848 make use of the delay slot if delta doesn't fit in 14 bits. */
7851 output_asm_insn ("addil L'%2,%%r26", xoperands);
7852 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7855 output_asm_insn ("b,l .+8,%%r1", xoperands);
7859 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7860 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7864 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
7865 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
7870 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7871 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7876 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
7880 else if (TARGET_PORTABLE_RUNTIME)
7882 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7883 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
7886 output_asm_insn ("addil L'%2,%%r26", xoperands);
7888 output_asm_insn ("bv %%r0(%%r22)", xoperands);
7892 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7897 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7901 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
7903 /* The function is accessible from outside this module. The only
7904 way to avoid an import stub between the thunk and function is to
7905 call the function directly with an indirect sequence similar to
7906 that used by $$dyncall. This is possible because $$dyncall acts
7907 as the import stub in an indirect call. */
7908 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
7909 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
7910 output_asm_insn ("addil LT'%3,%%r19", xoperands);
7911 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
7912 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
7913 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
7914 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
7915 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
7916 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
7920 output_asm_insn ("addil L'%2,%%r26", xoperands);
7926 output_asm_insn ("bve (%%r22)", xoperands);
7929 else if (TARGET_NO_SPACE_REGS)
7931 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
7936 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
7937 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
7938 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
7943 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7945 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7949 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7951 if (TARGET_SOM || !TARGET_GAS)
7953 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
7954 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
7958 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7959 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
7963 output_asm_insn ("addil L'%2,%%r26", xoperands);
7965 output_asm_insn ("bv %%r0(%%r22)", xoperands);
7969 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7974 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7981 output_asm_insn ("addil L'%2,%%r26", xoperands);
7983 output_asm_insn ("ldil L'%0,%%r22", xoperands);
7984 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
7988 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7993 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7998 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
8000 if (TARGET_SOM && TARGET_GAS)
8002 /* We done with this subspace except possibly for some additional
8003 debug information. Forget that we are in this subspace to ensure
8004 that the next function is output in its own subspace. */
8006 cfun->machine->in_nsubspa = 2;
8009 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8011 switch_to_section (data_section);
8012 output_asm_insn (".align 4", xoperands);
8013 ASM_OUTPUT_LABEL (file, label);
8014 output_asm_insn (".word P'%0", xoperands);
8017 current_thunk_number++;
8018 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8019 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8020 last_address += nbytes;
8021 update_total_code_bytes (nbytes);
8024 /* Only direct calls to static functions are allowed to be sibling (tail)
8027 This restriction is necessary because some linker generated stubs will
8028 store return pointers into rp' in some cases which might clobber a
8029 live value already in rp'.
8031 In a sibcall the current function and the target function share stack
8032 space. Thus if the path to the current function and the path to the
8033 target function save a value in rp', they save the value into the
8034 same stack slot, which has undesirable consequences.
8036 Because of the deferred binding nature of shared libraries any function
8037 with external scope could be in a different load module and thus require
8038 rp' to be saved when calling that function. So sibcall optimizations
8039 can only be safe for static function.
8041 Note that GCC never needs return value relocations, so we don't have to
8042 worry about static calls with return value relocations (which require
8045 It is safe to perform a sibcall optimization when the target function
8046 will never return. */
8048 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8050 if (TARGET_PORTABLE_RUNTIME)
8053 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8054 single subspace mode and the call is not indirect. As far as I know,
8055 there is no operating system support for the multiple subspace mode.
8056 It might be possible to support indirect calls if we didn't use
8057 $$dyncall (see the indirect sequence generated in output_call). */
8059 return (decl != NULL_TREE);
8061 /* Sibcalls are not ok because the arg pointer register is not a fixed
8062 register. This prevents the sibcall optimization from occurring. In
8063 addition, there are problems with stub placement using GNU ld. This
8064 is because a normal sibcall branch uses a 17-bit relocation while
8065 a regular call branch uses a 22-bit relocation. As a result, more
8066 care needs to be taken in the placement of long-branch stubs. */
8070 /* Sibcalls are only ok within a translation unit. */
8071 return (decl && !TREE_PUBLIC (decl));
8074 /* ??? Addition is not commutative on the PA due to the weird implicit
8075 space register selection rules for memory addresses. Therefore, we
8076 don't consider a + b == b + a, as this might be inside a MEM. */
8078 pa_commutative_p (rtx x, int outer_code)
8080 return (COMMUTATIVE_P (x)
8081 && (TARGET_NO_SPACE_REGS
8082 || (outer_code != UNKNOWN && outer_code != MEM)
8083 || GET_CODE (x) != PLUS));
8086 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8087 use in fmpyadd instructions. */
8089 fmpyaddoperands (rtx *operands)
8091 enum machine_mode mode = GET_MODE (operands[0]);
8093 /* Must be a floating point mode. */
8094 if (mode != SFmode && mode != DFmode)
8097 /* All modes must be the same. */
8098 if (! (mode == GET_MODE (operands[1])
8099 && mode == GET_MODE (operands[2])
8100 && mode == GET_MODE (operands[3])
8101 && mode == GET_MODE (operands[4])
8102 && mode == GET_MODE (operands[5])))
8105 /* All operands must be registers. */
8106 if (! (GET_CODE (operands[1]) == REG
8107 && GET_CODE (operands[2]) == REG
8108 && GET_CODE (operands[3]) == REG
8109 && GET_CODE (operands[4]) == REG
8110 && GET_CODE (operands[5]) == REG))
8113 /* Only 2 real operands to the addition. One of the input operands must
8114 be the same as the output operand. */
8115 if (! rtx_equal_p (operands[3], operands[4])
8116 && ! rtx_equal_p (operands[3], operands[5]))
8119 /* Inout operand of add cannot conflict with any operands from multiply. */
8120 if (rtx_equal_p (operands[3], operands[0])
8121 || rtx_equal_p (operands[3], operands[1])
8122 || rtx_equal_p (operands[3], operands[2]))
8125 /* multiply cannot feed into addition operands. */
8126 if (rtx_equal_p (operands[4], operands[0])
8127 || rtx_equal_p (operands[5], operands[0]))
8130 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8132 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8133 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8134 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8135 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8136 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8137 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8140 /* Passed. Operands are suitable for fmpyadd. */
8144 #if !defined(USE_COLLECT2)
8146 pa_asm_out_constructor (rtx symbol, int priority)
8148 if (!function_label_operand (symbol, VOIDmode))
8149 hppa_encode_label (symbol);
8151 #ifdef CTORS_SECTION_ASM_OP
8152 default_ctor_section_asm_out_constructor (symbol, priority);
8154 # ifdef TARGET_ASM_NAMED_SECTION
8155 default_named_section_asm_out_constructor (symbol, priority);
8157 default_stabs_asm_out_constructor (symbol, priority);
8163 pa_asm_out_destructor (rtx symbol, int priority)
8165 if (!function_label_operand (symbol, VOIDmode))
8166 hppa_encode_label (symbol);
8168 #ifdef DTORS_SECTION_ASM_OP
8169 default_dtor_section_asm_out_destructor (symbol, priority);
8171 # ifdef TARGET_ASM_NAMED_SECTION
8172 default_named_section_asm_out_destructor (symbol, priority);
8174 default_stabs_asm_out_destructor (symbol, priority);
8180 /* This function places uninitialized global data in the bss section.
8181 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8182 function on the SOM port to prevent uninitialized global data from
8183 being placed in the data section. */
8186 pa_asm_output_aligned_bss (FILE *stream,
8188 unsigned HOST_WIDE_INT size,
8191 switch_to_section (bss_section);
8192 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8194 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8195 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8198 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8199 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8202 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8203 ASM_OUTPUT_LABEL (stream, name);
8204 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8207 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8208 that doesn't allow the alignment of global common storage to be directly
8209 specified. The SOM linker aligns common storage based on the rounded
8210 value of the NUM_BYTES parameter in the .comm directive. It's not
8211 possible to use the .align directive as it doesn't affect the alignment
8212 of the label associated with a .comm directive. */
8215 pa_asm_output_aligned_common (FILE *stream,
8217 unsigned HOST_WIDE_INT size,
8220 unsigned int max_common_align;
8222 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8223 if (align > max_common_align)
8225 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8226 "for global common data. Using %u",
8227 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8228 align = max_common_align;
8231 switch_to_section (bss_section);
8233 assemble_name (stream, name);
8234 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8235 MAX (size, align / BITS_PER_UNIT));
8238 /* We can't use .comm for local common storage as the SOM linker effectively
8239 treats the symbol as universal and uses the same storage for local symbols
8240 with the same name in different object files. The .block directive
8241 reserves an uninitialized block of storage. However, it's not common
8242 storage. Fortunately, GCC never requests common storage with the same
8243 name in any given translation unit. */
8246 pa_asm_output_aligned_local (FILE *stream,
8248 unsigned HOST_WIDE_INT size,
8251 switch_to_section (bss_section);
8252 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8255 fprintf (stream, "%s", LOCAL_ASM_OP);
8256 assemble_name (stream, name);
8257 fprintf (stream, "\n");
8260 ASM_OUTPUT_LABEL (stream, name);
8261 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8264 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8265 use in fmpysub instructions. */
8267 fmpysuboperands (rtx *operands)
8269 enum machine_mode mode = GET_MODE (operands[0]);
8271 /* Must be a floating point mode. */
8272 if (mode != SFmode && mode != DFmode)
8275 /* All modes must be the same. */
8276 if (! (mode == GET_MODE (operands[1])
8277 && mode == GET_MODE (operands[2])
8278 && mode == GET_MODE (operands[3])
8279 && mode == GET_MODE (operands[4])
8280 && mode == GET_MODE (operands[5])))
8283 /* All operands must be registers. */
8284 if (! (GET_CODE (operands[1]) == REG
8285 && GET_CODE (operands[2]) == REG
8286 && GET_CODE (operands[3]) == REG
8287 && GET_CODE (operands[4]) == REG
8288 && GET_CODE (operands[5]) == REG))
8291 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8292 operation, so operands[4] must be the same as operand[3]. */
8293 if (! rtx_equal_p (operands[3], operands[4]))
8296 /* multiply cannot feed into subtraction. */
8297 if (rtx_equal_p (operands[5], operands[0]))
8300 /* Inout operand of sub cannot conflict with any operands from multiply. */
8301 if (rtx_equal_p (operands[3], operands[0])
8302 || rtx_equal_p (operands[3], operands[1])
8303 || rtx_equal_p (operands[3], operands[2]))
8306 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8308 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8309 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8310 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8311 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8312 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8313 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8316 /* Passed. Operands are suitable for fmpysub. */
8320 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8321 constants for shadd instructions. */
8323 shadd_constant_p (int val)
8325 if (val == 2 || val == 4 || val == 8)
8331 /* Return 1 if OP is valid as a base or index register in a
8335 borx_reg_operand (rtx op, enum machine_mode mode)
8337 if (GET_CODE (op) != REG)
8340 /* We must reject virtual registers as the only expressions that
8341 can be instantiated are REG and REG+CONST. */
8342 if (op == virtual_incoming_args_rtx
8343 || op == virtual_stack_vars_rtx
8344 || op == virtual_stack_dynamic_rtx
8345 || op == virtual_outgoing_args_rtx
8346 || op == virtual_cfa_rtx)
8349 /* While it's always safe to index off the frame pointer, it's not
8350 profitable to do so when the frame pointer is being eliminated. */
8351 if (!reload_completed
8352 && flag_omit_frame_pointer
8353 && !current_function_calls_alloca
8354 && op == frame_pointer_rtx)
8357 return register_operand (op, mode);
8360 /* Return 1 if this operand is anything other than a hard register. */
8363 non_hard_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8365 return ! (GET_CODE (op) == REG && REGNO (op) < FIRST_PSEUDO_REGISTER);
8368 /* Return 1 if INSN branches forward. Should be using insn_addresses
8369 to avoid walking through all the insns... */
8371 forward_branch_p (rtx insn)
8373 rtx label = JUMP_LABEL (insn);
8380 insn = NEXT_INSN (insn);
8383 return (insn == label);
8386 /* Return 1 if OP is an equality comparison, else return 0. */
8388 eq_neq_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8390 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
8393 /* Return 1 if INSN is in the delay slot of a call instruction. */
8395 jump_in_call_delay (rtx insn)
8398 if (GET_CODE (insn) != JUMP_INSN)
8401 if (PREV_INSN (insn)
8402 && PREV_INSN (PREV_INSN (insn))
8403 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8405 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8407 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8408 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8415 /* Output an unconditional move and branch insn. */
8418 output_parallel_movb (rtx *operands, rtx insn)
8420 int length = get_attr_length (insn);
8422 /* These are the cases in which we win. */
8424 return "mov%I1b,tr %1,%0,%2";
8426 /* None of the following cases win, but they don't lose either. */
8429 if (dbr_sequence_length () == 0)
8431 /* Nothing in the delay slot, fake it by putting the combined
8432 insn (the copy or add) in the delay slot of a bl. */
8433 if (GET_CODE (operands[1]) == CONST_INT)
8434 return "b %2\n\tldi %1,%0";
8436 return "b %2\n\tcopy %1,%0";
8440 /* Something in the delay slot, but we've got a long branch. */
8441 if (GET_CODE (operands[1]) == CONST_INT)
8442 return "ldi %1,%0\n\tb %2";
8444 return "copy %1,%0\n\tb %2";
8448 if (GET_CODE (operands[1]) == CONST_INT)
8449 output_asm_insn ("ldi %1,%0", operands);
8451 output_asm_insn ("copy %1,%0", operands);
8452 return output_lbranch (operands[2], insn, 1);
8455 /* Output an unconditional add and branch insn. */
8458 output_parallel_addb (rtx *operands, rtx insn)
8460 int length = get_attr_length (insn);
8462 /* To make life easy we want operand0 to be the shared input/output
8463 operand and operand1 to be the readonly operand. */
8464 if (operands[0] == operands[1])
8465 operands[1] = operands[2];
8467 /* These are the cases in which we win. */
8469 return "add%I1b,tr %1,%0,%3";
8471 /* None of the following cases win, but they don't lose either. */
8474 if (dbr_sequence_length () == 0)
8475 /* Nothing in the delay slot, fake it by putting the combined
8476 insn (the copy or add) in the delay slot of a bl. */
8477 return "b %3\n\tadd%I1 %1,%0,%0";
8479 /* Something in the delay slot, but we've got a long branch. */
8480 return "add%I1 %1,%0,%0\n\tb %3";
8483 output_asm_insn ("add%I1 %1,%0,%0", operands);
8484 return output_lbranch (operands[3], insn, 1);
8487 /* Return nonzero if INSN (a jump insn) immediately follows a call
8488 to a named function. This is used to avoid filling the delay slot
8489 of the jump since it can usually be eliminated by modifying RP in
8490 the delay slot of the call. */
8493 following_call (rtx insn)
8495 if (! TARGET_JUMP_IN_DELAY)
8498 /* Find the previous real insn, skipping NOTEs. */
8499 insn = PREV_INSN (insn);
8500 while (insn && GET_CODE (insn) == NOTE)
8501 insn = PREV_INSN (insn);
8503 /* Check for CALL_INSNs and millicode calls. */
8505 && ((GET_CODE (insn) == CALL_INSN
8506 && get_attr_type (insn) != TYPE_DYNCALL)
8507 || (GET_CODE (insn) == INSN
8508 && GET_CODE (PATTERN (insn)) != SEQUENCE
8509 && GET_CODE (PATTERN (insn)) != USE
8510 && GET_CODE (PATTERN (insn)) != CLOBBER
8511 && get_attr_type (insn) == TYPE_MILLI)))
8517 /* We use this hook to perform a PA specific optimization which is difficult
8518 to do in earlier passes.
8520 We want the delay slots of branches within jump tables to be filled.
8521 None of the compiler passes at the moment even has the notion that a
8522 PA jump table doesn't contain addresses, but instead contains actual
8525 Because we actually jump into the table, the addresses of each entry
8526 must stay constant in relation to the beginning of the table (which
8527 itself must stay constant relative to the instruction to jump into
8528 it). I don't believe we can guarantee earlier passes of the compiler
8529 will adhere to those rules.
8531 So, late in the compilation process we find all the jump tables, and
8532 expand them into real code -- e.g. each entry in the jump table vector
8533 will get an appropriate label followed by a jump to the final target.
8535 Reorg and the final jump pass can then optimize these branches and
8536 fill their delay slots. We end up with smaller, more efficient code.
8538 The jump instructions within the table are special; we must be able
8539 to identify them during assembly output (if the jumps don't get filled
8540 we need to emit a nop rather than nullifying the delay slot)). We
8541 identify jumps in switch tables by using insns with the attribute
8542 type TYPE_BTABLE_BRANCH.
8544 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8545 insns. This serves two purposes, first it prevents jump.c from
8546 noticing that the last N entries in the table jump to the instruction
8547 immediately after the table and deleting the jumps. Second, those
8548 insns mark where we should emit .begin_brtab and .end_brtab directives
8549 when using GAS (allows for better link time optimizations). */
8556 remove_useless_addtr_insns (1);
8558 if (pa_cpu < PROCESSOR_8000)
8559 pa_combine_instructions ();
8562 /* This is fairly cheap, so always run it if optimizing. */
8563 if (optimize > 0 && !TARGET_BIG_SWITCH)
8565 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8566 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8568 rtx pattern, tmp, location, label;
8569 unsigned int length, i;
8571 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8572 if (GET_CODE (insn) != JUMP_INSN
8573 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8574 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8577 /* Emit marker for the beginning of the branch table. */
8578 emit_insn_before (gen_begin_brtab (), insn);
8580 pattern = PATTERN (insn);
8581 location = PREV_INSN (insn);
8582 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
8584 for (i = 0; i < length; i++)
8586 /* Emit a label before each jump to keep jump.c from
8587 removing this code. */
8588 tmp = gen_label_rtx ();
8589 LABEL_NUSES (tmp) = 1;
8590 emit_label_after (tmp, location);
8591 location = NEXT_INSN (location);
8593 if (GET_CODE (pattern) == ADDR_VEC)
8594 label = XEXP (XVECEXP (pattern, 0, i), 0);
8596 label = XEXP (XVECEXP (pattern, 1, i), 0);
8598 tmp = gen_short_jump (label);
8600 /* Emit the jump itself. */
8601 tmp = emit_jump_insn_after (tmp, location);
8602 JUMP_LABEL (tmp) = label;
8603 LABEL_NUSES (label)++;
8604 location = NEXT_INSN (location);
8606 /* Emit a BARRIER after the jump. */
8607 emit_barrier_after (location);
8608 location = NEXT_INSN (location);
8611 /* Emit marker for the end of the branch table. */
8612 emit_insn_before (gen_end_brtab (), location);
8613 location = NEXT_INSN (location);
8614 emit_barrier_after (location);
8616 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8622 /* Still need brtab marker insns. FIXME: the presence of these
8623 markers disables output of the branch table to readonly memory,
8624 and any alignment directives that might be needed. Possibly,
8625 the begin_brtab insn should be output before the label for the
8626 table. This doesn't matter at the moment since the tables are
8627 always output in the text section. */
8628 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8630 /* Find an ADDR_VEC insn. */
8631 if (GET_CODE (insn) != JUMP_INSN
8632 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8633 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8636 /* Now generate markers for the beginning and end of the
8638 emit_insn_before (gen_begin_brtab (), insn);
8639 emit_insn_after (gen_end_brtab (), insn);
8644 /* The PA has a number of odd instructions which can perform multiple
8645 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8646 it may be profitable to combine two instructions into one instruction
8647 with two outputs. It's not profitable PA2.0 machines because the
8648 two outputs would take two slots in the reorder buffers.
8650 This routine finds instructions which can be combined and combines
8651 them. We only support some of the potential combinations, and we
8652 only try common ways to find suitable instructions.
8654 * addb can add two registers or a register and a small integer
8655 and jump to a nearby (+-8k) location. Normally the jump to the
8656 nearby location is conditional on the result of the add, but by
8657 using the "true" condition we can make the jump unconditional.
8658 Thus addb can perform two independent operations in one insn.
8660 * movb is similar to addb in that it can perform a reg->reg
8661 or small immediate->reg copy and jump to a nearby (+-8k location).
8663 * fmpyadd and fmpysub can perform a FP multiply and either an
8664 FP add or FP sub if the operands of the multiply and add/sub are
8665 independent (there are other minor restrictions). Note both
8666 the fmpy and fadd/fsub can in theory move to better spots according
8667 to data dependencies, but for now we require the fmpy stay at a
8670 * Many of the memory operations can perform pre & post updates
8671 of index registers. GCC's pre/post increment/decrement addressing
8672 is far too simple to take advantage of all the possibilities. This
8673 pass may not be suitable since those insns may not be independent.
8675 * comclr can compare two ints or an int and a register, nullify
8676 the following instruction and zero some other register. This
8677 is more difficult to use as it's harder to find an insn which
8678 will generate a comclr than finding something like an unconditional
8679 branch. (conditional moves & long branches create comclr insns).
8681 * Most arithmetic operations can conditionally skip the next
8682 instruction. They can be viewed as "perform this operation
8683 and conditionally jump to this nearby location" (where nearby
8684 is an insns away). These are difficult to use due to the
8685 branch length restrictions. */
8688 pa_combine_instructions (void)
8692 /* This can get expensive since the basic algorithm is on the
8693 order of O(n^2) (or worse). Only do it for -O2 or higher
8694 levels of optimization. */
8698 /* Walk down the list of insns looking for "anchor" insns which
8699 may be combined with "floating" insns. As the name implies,
8700 "anchor" instructions don't move, while "floating" insns may
8702 new = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
8703 new = make_insn_raw (new);
8705 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
8707 enum attr_pa_combine_type anchor_attr;
8708 enum attr_pa_combine_type floater_attr;
8710 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8711 Also ignore any special USE insns. */
8712 if ((GET_CODE (anchor) != INSN
8713 && GET_CODE (anchor) != JUMP_INSN
8714 && GET_CODE (anchor) != CALL_INSN)
8715 || GET_CODE (PATTERN (anchor)) == USE
8716 || GET_CODE (PATTERN (anchor)) == CLOBBER
8717 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
8718 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
8721 anchor_attr = get_attr_pa_combine_type (anchor);
8722 /* See if anchor is an insn suitable for combination. */
8723 if (anchor_attr == PA_COMBINE_TYPE_FMPY
8724 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
8725 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8726 && ! forward_branch_p (anchor)))
8730 for (floater = PREV_INSN (anchor);
8732 floater = PREV_INSN (floater))
8734 if (GET_CODE (floater) == NOTE
8735 || (GET_CODE (floater) == INSN
8736 && (GET_CODE (PATTERN (floater)) == USE
8737 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8740 /* Anything except a regular INSN will stop our search. */
8741 if (GET_CODE (floater) != INSN
8742 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8743 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8749 /* See if FLOATER is suitable for combination with the
8751 floater_attr = get_attr_pa_combine_type (floater);
8752 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8753 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8754 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8755 && floater_attr == PA_COMBINE_TYPE_FMPY))
8757 /* If ANCHOR and FLOATER can be combined, then we're
8758 done with this pass. */
8759 if (pa_can_combine_p (new, anchor, floater, 0,
8760 SET_DEST (PATTERN (floater)),
8761 XEXP (SET_SRC (PATTERN (floater)), 0),
8762 XEXP (SET_SRC (PATTERN (floater)), 1)))
8766 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8767 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
8769 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
8771 if (pa_can_combine_p (new, anchor, floater, 0,
8772 SET_DEST (PATTERN (floater)),
8773 XEXP (SET_SRC (PATTERN (floater)), 0),
8774 XEXP (SET_SRC (PATTERN (floater)), 1)))
8779 if (pa_can_combine_p (new, anchor, floater, 0,
8780 SET_DEST (PATTERN (floater)),
8781 SET_SRC (PATTERN (floater)),
8782 SET_SRC (PATTERN (floater))))
8788 /* If we didn't find anything on the backwards scan try forwards. */
8790 && (anchor_attr == PA_COMBINE_TYPE_FMPY
8791 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
8793 for (floater = anchor; floater; floater = NEXT_INSN (floater))
8795 if (GET_CODE (floater) == NOTE
8796 || (GET_CODE (floater) == INSN
8797 && (GET_CODE (PATTERN (floater)) == USE
8798 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8802 /* Anything except a regular INSN will stop our search. */
8803 if (GET_CODE (floater) != INSN
8804 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8805 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8811 /* See if FLOATER is suitable for combination with the
8813 floater_attr = get_attr_pa_combine_type (floater);
8814 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8815 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8816 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8817 && floater_attr == PA_COMBINE_TYPE_FMPY))
8819 /* If ANCHOR and FLOATER can be combined, then we're
8820 done with this pass. */
8821 if (pa_can_combine_p (new, anchor, floater, 1,
8822 SET_DEST (PATTERN (floater)),
8823 XEXP (SET_SRC (PATTERN (floater)),
8825 XEXP (SET_SRC (PATTERN (floater)),
8832 /* FLOATER will be nonzero if we found a suitable floating
8833 insn for combination with ANCHOR. */
8835 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8836 || anchor_attr == PA_COMBINE_TYPE_FMPY))
8838 /* Emit the new instruction and delete the old anchor. */
8839 emit_insn_before (gen_rtx_PARALLEL
8841 gen_rtvec (2, PATTERN (anchor),
8842 PATTERN (floater))),
8845 PUT_CODE (anchor, NOTE);
8846 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8847 NOTE_SOURCE_FILE (anchor) = 0;
8849 /* Emit a special USE insn for FLOATER, then delete
8850 the floating insn. */
8851 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8852 delete_insn (floater);
8857 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
8860 /* Emit the new_jump instruction and delete the old anchor. */
8862 = emit_jump_insn_before (gen_rtx_PARALLEL
8864 gen_rtvec (2, PATTERN (anchor),
8865 PATTERN (floater))),
8868 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
8869 PUT_CODE (anchor, NOTE);
8870 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8871 NOTE_SOURCE_FILE (anchor) = 0;
8873 /* Emit a special USE insn for FLOATER, then delete
8874 the floating insn. */
8875 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8876 delete_insn (floater);
8884 pa_can_combine_p (rtx new, rtx anchor, rtx floater, int reversed, rtx dest,
8887 int insn_code_number;
8890 /* Create a PARALLEL with the patterns of ANCHOR and
8891 FLOATER, try to recognize it, then test constraints
8892 for the resulting pattern.
8894 If the pattern doesn't match or the constraints
8895 aren't met keep searching for a suitable floater
8897 XVECEXP (PATTERN (new), 0, 0) = PATTERN (anchor);
8898 XVECEXP (PATTERN (new), 0, 1) = PATTERN (floater);
8899 INSN_CODE (new) = -1;
8900 insn_code_number = recog_memoized (new);
8901 if (insn_code_number < 0
8902 || (extract_insn (new), ! constrain_operands (1)))
8916 /* There's up to three operands to consider. One
8917 output and two inputs.
8919 The output must not be used between FLOATER & ANCHOR
8920 exclusive. The inputs must not be set between
8921 FLOATER and ANCHOR exclusive. */
8923 if (reg_used_between_p (dest, start, end))
8926 if (reg_set_between_p (src1, start, end))
8929 if (reg_set_between_p (src2, start, end))
8932 /* If we get here, then everything is good. */
8936 /* Return nonzero if references for INSN are delayed.
8938 Millicode insns are actually function calls with some special
8939 constraints on arguments and register usage.
8941 Millicode calls always expect their arguments in the integer argument
8942 registers, and always return their result in %r29 (ret1). They
8943 are expected to clobber their arguments, %r1, %r29, and the return
8944 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
8946 This function tells reorg that the references to arguments and
8947 millicode calls do not appear to happen until after the millicode call.
8948 This allows reorg to put insns which set the argument registers into the
8949 delay slot of the millicode call -- thus they act more like traditional
8952 Note we cannot consider side effects of the insn to be delayed because
8953 the branch and link insn will clobber the return pointer. If we happened
8954 to use the return pointer in the delay slot of the call, then we lose.
8956 get_attr_type will try to recognize the given insn, so make sure to
8957 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
8960 insn_refs_are_delayed (rtx insn)
8962 return ((GET_CODE (insn) == INSN
8963 && GET_CODE (PATTERN (insn)) != SEQUENCE
8964 && GET_CODE (PATTERN (insn)) != USE
8965 && GET_CODE (PATTERN (insn)) != CLOBBER
8966 && get_attr_type (insn) == TYPE_MILLI));
8969 /* On the HP-PA the value is found in register(s) 28(-29), unless
8970 the mode is SF or DF. Then the value is returned in fr4 (32).
8972 This must perform the same promotions as PROMOTE_MODE, else
8973 TARGET_PROMOTE_FUNCTION_RETURN will not work correctly.
8975 Small structures must be returned in a PARALLEL on PA64 in order
8976 to match the HP Compiler ABI. */
8979 function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
8981 enum machine_mode valmode;
8983 if (AGGREGATE_TYPE_P (valtype)
8984 || TREE_CODE (valtype) == COMPLEX_TYPE
8985 || TREE_CODE (valtype) == VECTOR_TYPE)
8989 /* Aggregates with a size less than or equal to 128 bits are
8990 returned in GR 28(-29). They are left justified. The pad
8991 bits are undefined. Larger aggregates are returned in
8995 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
8997 for (i = 0; i < ub; i++)
8999 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9000 gen_rtx_REG (DImode, 28 + i),
9005 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9007 else if (int_size_in_bytes (valtype) > UNITS_PER_WORD)
9009 /* Aggregates 5 to 8 bytes in size are returned in general
9010 registers r28-r29 in the same manner as other non
9011 floating-point objects. The data is right-justified and
9012 zero-extended to 64 bits. This is opposite to the normal
9013 justification used on big endian targets and requires
9014 special treatment. */
9015 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9016 gen_rtx_REG (DImode, 28), const0_rtx);
9017 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9021 if ((INTEGRAL_TYPE_P (valtype)
9022 && TYPE_PRECISION (valtype) < BITS_PER_WORD)
9023 || POINTER_TYPE_P (valtype))
9024 valmode = word_mode;
9026 valmode = TYPE_MODE (valtype);
9028 if (TREE_CODE (valtype) == REAL_TYPE
9029 && !AGGREGATE_TYPE_P (valtype)
9030 && TYPE_MODE (valtype) != TFmode
9031 && !TARGET_SOFT_FLOAT)
9032 return gen_rtx_REG (valmode, 32);
9034 return gen_rtx_REG (valmode, 28);
9037 /* Return the location of a parameter that is passed in a register or NULL
9038 if the parameter has any component that is passed in memory.
9040 This is new code and will be pushed to into the net sources after
9043 ??? We might want to restructure this so that it looks more like other
9046 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
9047 int named ATTRIBUTE_UNUSED)
9049 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9056 if (mode == VOIDmode)
9059 arg_size = FUNCTION_ARG_SIZE (mode, type);
9061 /* If this arg would be passed partially or totally on the stack, then
9062 this routine should return zero. pa_arg_partial_bytes will
9063 handle arguments which are split between regs and stack slots if
9064 the ABI mandates split arguments. */
9067 /* The 32-bit ABI does not split arguments. */
9068 if (cum->words + arg_size > max_arg_words)
9074 alignment = cum->words & 1;
9075 if (cum->words + alignment >= max_arg_words)
9079 /* The 32bit ABIs and the 64bit ABIs are rather different,
9080 particularly in their handling of FP registers. We might
9081 be able to cleverly share code between them, but I'm not
9082 going to bother in the hope that splitting them up results
9083 in code that is more easily understood. */
9087 /* Advance the base registers to their current locations.
9089 Remember, gprs grow towards smaller register numbers while
9090 fprs grow to higher register numbers. Also remember that
9091 although FP regs are 32-bit addressable, we pretend that
9092 the registers are 64-bits wide. */
9093 gpr_reg_base = 26 - cum->words;
9094 fpr_reg_base = 32 + cum->words;
9096 /* Arguments wider than one word and small aggregates need special
9100 || (type && (AGGREGATE_TYPE_P (type)
9101 || TREE_CODE (type) == COMPLEX_TYPE
9102 || TREE_CODE (type) == VECTOR_TYPE)))
9104 /* Double-extended precision (80-bit), quad-precision (128-bit)
9105 and aggregates including complex numbers are aligned on
9106 128-bit boundaries. The first eight 64-bit argument slots
9107 are associated one-to-one, with general registers r26
9108 through r19, and also with floating-point registers fr4
9109 through fr11. Arguments larger than one word are always
9110 passed in general registers.
9112 Using a PARALLEL with a word mode register results in left
9113 justified data on a big-endian target. */
9116 int i, offset = 0, ub = arg_size;
9118 /* Align the base register. */
9119 gpr_reg_base -= alignment;
9121 ub = MIN (ub, max_arg_words - cum->words - alignment);
9122 for (i = 0; i < ub; i++)
9124 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9125 gen_rtx_REG (DImode, gpr_reg_base),
9131 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9136 /* If the argument is larger than a word, then we know precisely
9137 which registers we must use. */
9151 /* Structures 5 to 8 bytes in size are passed in the general
9152 registers in the same manner as other non floating-point
9153 objects. The data is right-justified and zero-extended
9154 to 64 bits. This is opposite to the normal justification
9155 used on big endian targets and requires special treatment.
9156 We now define BLOCK_REG_PADDING to pad these objects.
9157 Aggregates, complex and vector types are passed in the same
9158 manner as structures. */
9160 || (type && (AGGREGATE_TYPE_P (type)
9161 || TREE_CODE (type) == COMPLEX_TYPE
9162 || TREE_CODE (type) == VECTOR_TYPE)))
9164 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9165 gen_rtx_REG (DImode, gpr_reg_base),
9167 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9172 /* We have a single word (32 bits). A simple computation
9173 will get us the register #s we need. */
9174 gpr_reg_base = 26 - cum->words;
9175 fpr_reg_base = 32 + 2 * cum->words;
9179 /* Determine if the argument needs to be passed in both general and
9180 floating point registers. */
9181 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9182 /* If we are doing soft-float with portable runtime, then there
9183 is no need to worry about FP regs. */
9184 && !TARGET_SOFT_FLOAT
9185 /* The parameter must be some kind of scalar float, else we just
9186 pass it in integer registers. */
9187 && GET_MODE_CLASS (mode) == MODE_FLOAT
9188 /* The target function must not have a prototype. */
9189 && cum->nargs_prototype <= 0
9190 /* libcalls do not need to pass items in both FP and general
9192 && type != NULL_TREE
9193 /* All this hair applies to "outgoing" args only. This includes
9194 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9196 /* Also pass outgoing floating arguments in both registers in indirect
9197 calls with the 32 bit ABI and the HP assembler since there is no
9198 way to the specify argument locations in static functions. */
9203 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9209 gen_rtx_EXPR_LIST (VOIDmode,
9210 gen_rtx_REG (mode, fpr_reg_base),
9212 gen_rtx_EXPR_LIST (VOIDmode,
9213 gen_rtx_REG (mode, gpr_reg_base),
9218 /* See if we should pass this parameter in a general register. */
9219 if (TARGET_SOFT_FLOAT
9220 /* Indirect calls in the normal 32bit ABI require all arguments
9221 to be passed in general registers. */
9222 || (!TARGET_PORTABLE_RUNTIME
9226 /* If the parameter is not a scalar floating-point parameter,
9227 then it belongs in GPRs. */
9228 || GET_MODE_CLASS (mode) != MODE_FLOAT
9229 /* Structure with single SFmode field belongs in GPR. */
9230 || (type && AGGREGATE_TYPE_P (type)))
9231 retval = gen_rtx_REG (mode, gpr_reg_base);
9233 retval = gen_rtx_REG (mode, fpr_reg_base);
9239 /* If this arg would be passed totally in registers or totally on the stack,
9240 then this routine should return zero. */
9243 pa_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
9244 tree type, bool named ATTRIBUTE_UNUSED)
9246 unsigned int max_arg_words = 8;
9247 unsigned int offset = 0;
9252 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9255 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9256 /* Arg fits fully into registers. */
9258 else if (cum->words + offset >= max_arg_words)
9259 /* Arg fully on the stack. */
9263 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9267 /* A get_unnamed_section callback for switching to the text section.
9269 This function is only used with SOM. Because we don't support
9270 named subspaces, we can only create a new subspace or switch back
9271 to the default text subspace. */
9274 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9276 gcc_assert (TARGET_SOM);
9279 if (cfun && !cfun->machine->in_nsubspa)
9281 /* We only want to emit a .nsubspa directive once at the
9282 start of the function. */
9283 cfun->machine->in_nsubspa = 1;
9285 /* Create a new subspace for the text. This provides
9286 better stub placement and one-only functions. */
9288 && DECL_ONE_ONLY (cfun->decl)
9289 && !DECL_WEAK (cfun->decl))
9291 output_section_asm_op ("\t.SPACE $TEXT$\n"
9292 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9293 "ACCESS=44,SORT=24,COMDAT");
9299 /* There isn't a current function or the body of the current
9300 function has been completed. So, we are changing to the
9301 text section to output debugging information. Thus, we
9302 need to forget that we are in the text section so that
9303 varasm.c will call us when text_section is selected again. */
9304 gcc_assert (!cfun || cfun->machine->in_nsubspa == 2);
9307 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9310 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9313 /* A get_unnamed_section callback for switching to comdat data
9314 sections. This function is only used with SOM. */
9317 som_output_comdat_data_section_asm_op (const void *data)
9320 output_section_asm_op (data);
9323 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9326 pa_som_asm_init_sections (void)
9329 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9331 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9332 is not being generated. */
9333 som_readonly_data_section
9334 = get_unnamed_section (0, output_section_asm_op,
9335 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9337 /* When secondary definitions are not supported, SOM makes readonly
9338 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9340 som_one_only_readonly_data_section
9341 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9343 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9344 "ACCESS=0x2c,SORT=16,COMDAT");
9347 /* When secondary definitions are not supported, SOM makes data one-only
9348 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9349 som_one_only_data_section
9350 = get_unnamed_section (SECTION_WRITE,
9351 som_output_comdat_data_section_asm_op,
9352 "\t.SPACE $PRIVATE$\n"
9353 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9354 "ACCESS=31,SORT=24,COMDAT");
9356 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9357 which reference data within the $TEXT$ space (for example constant
9358 strings in the $LIT$ subspace).
9360 The assemblers (GAS and HP as) both have problems with handling
9361 the difference of two symbols which is the other correct way to
9362 reference constant data during PIC code generation.
9364 So, there's no way to reference constant data which is in the
9365 $TEXT$ space during PIC generation. Instead place all constant
9366 data into the $PRIVATE$ subspace (this reduces sharing, but it
9367 works correctly). */
9368 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9370 /* We must not have a reference to an external symbol defined in a
9371 shared library in a readonly section, else the SOM linker will
9374 So, we force exception information into the data section. */
9375 exception_section = data_section;
9378 /* On hpux10, the linker will give an error if we have a reference
9379 in the read-only data section to a symbol defined in a shared
9380 library. Therefore, expressions that might require a reloc can
9381 not be placed in the read-only data section. */
9384 pa_select_section (tree exp, int reloc,
9385 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9387 if (TREE_CODE (exp) == VAR_DECL
9388 && TREE_READONLY (exp)
9389 && !TREE_THIS_VOLATILE (exp)
9390 && DECL_INITIAL (exp)
9391 && (DECL_INITIAL (exp) == error_mark_node
9392 || TREE_CONSTANT (DECL_INITIAL (exp)))
9396 && DECL_ONE_ONLY (exp)
9397 && !DECL_WEAK (exp))
9398 return som_one_only_readonly_data_section;
9400 return readonly_data_section;
9402 else if (CONSTANT_CLASS_P (exp) && !reloc)
9403 return readonly_data_section;
9405 && TREE_CODE (exp) == VAR_DECL
9406 && DECL_ONE_ONLY (exp)
9407 && !DECL_WEAK (exp))
9408 return som_one_only_data_section;
9410 return data_section;
9414 pa_globalize_label (FILE *stream, const char *name)
9416 /* We only handle DATA objects here, functions are globalized in
9417 ASM_DECLARE_FUNCTION_NAME. */
9418 if (! FUNCTION_NAME_P (name))
9420 fputs ("\t.EXPORT ", stream);
9421 assemble_name (stream, name);
9422 fputs (",DATA\n", stream);
9426 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9429 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9430 int incoming ATTRIBUTE_UNUSED)
9432 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9435 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9438 pa_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
9440 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9441 PA64 ABI says that objects larger than 128 bits are returned in memory.
9442 Note, int_size_in_bytes can return -1 if the size of the object is
9443 variable or larger than the maximum value that can be expressed as
9444 a HOST_WIDE_INT. It can also return zero for an empty type. The
9445 simplest way to handle variable and empty types is to pass them in
9446 memory. This avoids problems in defining the boundaries of argument
9447 slots, allocating registers, etc. */
9448 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9449 || int_size_in_bytes (type) <= 0);
9452 /* Structure to hold declaration and name of external symbols that are
9453 emitted by GCC. We generate a vector of these symbols and output them
9454 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9455 This avoids putting out names that are never really used. */
9457 typedef struct extern_symbol GTY(())
9463 /* Define gc'd vector type for extern_symbol. */
9464 DEF_VEC_O(extern_symbol);
9465 DEF_VEC_ALLOC_O(extern_symbol,gc);
9467 /* Vector of extern_symbol pointers. */
9468 static GTY(()) VEC(extern_symbol,gc) *extern_symbols;
9470 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9471 /* Mark DECL (name NAME) as an external reference (assembler output
9472 file FILE). This saves the names to output at the end of the file
9473 if actually referenced. */
9476 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9478 extern_symbol * p = VEC_safe_push (extern_symbol, gc, extern_symbols, NULL);
9480 gcc_assert (file == asm_out_file);
9485 /* Output text required at the end of an assembler file.
9486 This includes deferred plabels and .import directives for
9487 all external symbols that were actually referenced. */
9490 pa_hpux_file_end (void)
9495 if (!NO_DEFERRED_PROFILE_COUNTERS)
9496 output_deferred_profile_counters ();
9498 output_deferred_plabels ();
9500 for (i = 0; VEC_iterate (extern_symbol, extern_symbols, i, p); i++)
9502 tree decl = p->decl;
9504 if (!TREE_ASM_WRITTEN (decl)
9505 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9506 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9509 VEC_free (extern_symbol, gc, extern_symbols);