1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 2, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING. If not, write to
20 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
21 Boston, MA 02110-1301, USA. */
25 #include "coretypes.h"
29 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
41 #include "integrate.h"
49 #include "target-def.h"
51 /* Return nonzero if there is a bypass for the output of
52 OUT_INSN and the fp store IN_INSN. */
54 hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
56 enum machine_mode store_mode;
57 enum machine_mode other_mode;
60 if (recog_memoized (in_insn) < 0
61 || get_attr_type (in_insn) != TYPE_FPSTORE
62 || recog_memoized (out_insn) < 0)
65 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
67 set = single_set (out_insn);
71 other_mode = GET_MODE (SET_SRC (set));
73 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
77 #ifndef DO_FRAME_NOTES
78 #ifdef INCOMING_RETURN_ADDR_RTX
79 #define DO_FRAME_NOTES 1
81 #define DO_FRAME_NOTES 0
85 static void copy_reg_pointer (rtx, rtx);
86 static void fix_range (const char *);
87 static bool pa_handle_option (size_t, const char *, int);
88 static int hppa_address_cost (rtx);
89 static bool hppa_rtx_costs (rtx, int, int, int *);
90 static inline rtx force_mode (enum machine_mode, rtx);
91 static void pa_reorg (void);
92 static void pa_combine_instructions (void);
93 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
94 static int forward_branch_p (rtx);
95 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
96 static int compute_movmem_length (rtx);
97 static int compute_clrmem_length (rtx);
98 static bool pa_assemble_integer (rtx, unsigned int, int);
99 static void remove_useless_addtr_insns (int);
100 static void store_reg (int, HOST_WIDE_INT, int);
101 static void store_reg_modify (int, int, HOST_WIDE_INT);
102 static void load_reg (int, HOST_WIDE_INT, int);
103 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
104 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
105 static void update_total_code_bytes (int);
106 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
107 static int pa_adjust_cost (rtx, rtx, rtx, int);
108 static int pa_adjust_priority (rtx, int);
109 static int pa_issue_rate (void);
110 static void pa_select_section (tree, int, unsigned HOST_WIDE_INT)
112 static void pa_encode_section_info (tree, rtx, int);
113 static const char *pa_strip_name_encoding (const char *);
114 static bool pa_function_ok_for_sibcall (tree, tree);
115 static void pa_globalize_label (FILE *, const char *)
117 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
118 HOST_WIDE_INT, tree);
119 #if !defined(USE_COLLECT2)
120 static void pa_asm_out_constructor (rtx, int);
121 static void pa_asm_out_destructor (rtx, int);
123 static void pa_init_builtins (void);
124 static rtx hppa_builtin_saveregs (void);
125 static tree hppa_gimplify_va_arg_expr (tree, tree, tree *, tree *);
126 static bool pa_scalar_mode_supported_p (enum machine_mode);
127 static bool pa_commutative_p (rtx x, int outer_code);
128 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
129 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
130 static struct deferred_plabel *get_plabel (rtx) ATTRIBUTE_UNUSED;
131 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
132 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
133 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
134 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
135 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
136 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
137 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
138 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
139 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
140 static void output_deferred_plabels (void);
141 #ifdef ASM_OUTPUT_EXTERNAL_REAL
142 static void pa_hpux_file_end (void);
144 #ifdef HPUX_LONG_DOUBLE_LIBRARY
145 static void pa_hpux_init_libfuncs (void);
147 static rtx pa_struct_value_rtx (tree, int);
148 static bool pa_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
150 static int pa_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
152 static struct machine_function * pa_init_machine_status (void);
155 /* Save the operands last given to a compare for use when we
156 generate a scc or bcc insn. */
157 rtx hppa_compare_op0, hppa_compare_op1;
158 enum cmp_type hppa_branch_type;
160 /* Which cpu we are scheduling for. */
161 enum processor_type pa_cpu = TARGET_SCHED_DEFAULT;
163 /* The UNIX standard to use for predefines and linking. */
164 int flag_pa_unix = TARGET_HPUX_11_11 ? 1998 : TARGET_HPUX_10_10 ? 1995 : 1993;
166 /* Counts for the number of callee-saved general and floating point
167 registers which were saved by the current function's prologue. */
168 static int gr_saved, fr_saved;
170 static rtx find_addr_reg (rtx);
172 /* Keep track of the number of bytes we have output in the CODE subspace
173 during this compilation so we'll know when to emit inline long-calls. */
174 unsigned long total_code_bytes;
176 /* The last address of the previous function plus the number of bytes in
177 associated thunks that have been output. This is used to determine if
178 a thunk can use an IA-relative branch to reach its target function. */
179 static int last_address;
181 /* Variables to handle plabels that we discover are necessary at assembly
182 output time. They are output after the current function. */
183 struct deferred_plabel GTY(())
188 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
190 static size_t n_deferred_plabels = 0;
193 /* Initialize the GCC target structure. */
195 #undef TARGET_ASM_ALIGNED_HI_OP
196 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
197 #undef TARGET_ASM_ALIGNED_SI_OP
198 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
199 #undef TARGET_ASM_ALIGNED_DI_OP
200 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
201 #undef TARGET_ASM_UNALIGNED_HI_OP
202 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
203 #undef TARGET_ASM_UNALIGNED_SI_OP
204 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
205 #undef TARGET_ASM_UNALIGNED_DI_OP
206 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
207 #undef TARGET_ASM_INTEGER
208 #define TARGET_ASM_INTEGER pa_assemble_integer
210 #undef TARGET_ASM_FUNCTION_PROLOGUE
211 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
212 #undef TARGET_ASM_FUNCTION_EPILOGUE
213 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
215 #undef TARGET_SCHED_ADJUST_COST
216 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
217 #undef TARGET_SCHED_ADJUST_PRIORITY
218 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
219 #undef TARGET_SCHED_ISSUE_RATE
220 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
222 #undef TARGET_ENCODE_SECTION_INFO
223 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
224 #undef TARGET_STRIP_NAME_ENCODING
225 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
227 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
228 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
230 #undef TARGET_COMMUTATIVE_P
231 #define TARGET_COMMUTATIVE_P pa_commutative_p
233 #undef TARGET_ASM_OUTPUT_MI_THUNK
234 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
235 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
236 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
238 #undef TARGET_ASM_FILE_END
239 #ifdef ASM_OUTPUT_EXTERNAL_REAL
240 #define TARGET_ASM_FILE_END pa_hpux_file_end
242 #define TARGET_ASM_FILE_END output_deferred_plabels
245 #if !defined(USE_COLLECT2)
246 #undef TARGET_ASM_CONSTRUCTOR
247 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
248 #undef TARGET_ASM_DESTRUCTOR
249 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
252 #undef TARGET_DEFAULT_TARGET_FLAGS
253 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
254 #undef TARGET_HANDLE_OPTION
255 #define TARGET_HANDLE_OPTION pa_handle_option
257 #undef TARGET_INIT_BUILTINS
258 #define TARGET_INIT_BUILTINS pa_init_builtins
260 #undef TARGET_RTX_COSTS
261 #define TARGET_RTX_COSTS hppa_rtx_costs
262 #undef TARGET_ADDRESS_COST
263 #define TARGET_ADDRESS_COST hppa_address_cost
265 #undef TARGET_MACHINE_DEPENDENT_REORG
266 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
268 #ifdef HPUX_LONG_DOUBLE_LIBRARY
269 #undef TARGET_INIT_LIBFUNCS
270 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
273 #undef TARGET_PROMOTE_FUNCTION_RETURN
274 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
275 #undef TARGET_PROMOTE_PROTOTYPES
276 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
278 #undef TARGET_STRUCT_VALUE_RTX
279 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
280 #undef TARGET_RETURN_IN_MEMORY
281 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
282 #undef TARGET_MUST_PASS_IN_STACK
283 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
284 #undef TARGET_PASS_BY_REFERENCE
285 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
286 #undef TARGET_CALLEE_COPIES
287 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
288 #undef TARGET_ARG_PARTIAL_BYTES
289 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
291 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
292 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
293 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
294 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
296 #undef TARGET_SCALAR_MODE_SUPPORTED_P
297 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
299 #undef TARGET_CANNOT_FORCE_CONST_MEM
300 #define TARGET_CANNOT_FORCE_CONST_MEM pa_tls_referenced_p
302 struct gcc_target targetm = TARGET_INITIALIZER;
304 /* Parse the -mfixed-range= option string. */
307 fix_range (const char *const_str)
310 char *str, *dash, *comma;
312 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
313 REG2 are either register names or register numbers. The effect
314 of this option is to mark the registers in the range from REG1 to
315 REG2 as ``fixed'' so they won't be used by the compiler. This is
316 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
318 i = strlen (const_str);
319 str = (char *) alloca (i + 1);
320 memcpy (str, const_str, i + 1);
324 dash = strchr (str, '-');
327 warning (0, "value of -mfixed-range must have form REG1-REG2");
332 comma = strchr (dash + 1, ',');
336 first = decode_reg_name (str);
339 warning (0, "unknown register name: %s", str);
343 last = decode_reg_name (dash + 1);
346 warning (0, "unknown register name: %s", dash + 1);
354 warning (0, "%s-%s is an empty range", str, dash + 1);
358 for (i = first; i <= last; ++i)
359 fixed_regs[i] = call_used_regs[i] = 1;
368 /* Check if all floating point registers have been fixed. */
369 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
374 target_flags |= MASK_DISABLE_FPREGS;
377 /* Implement TARGET_HANDLE_OPTION. */
380 pa_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
385 case OPT_mpa_risc_1_0:
387 target_flags &= ~(MASK_PA_11 | MASK_PA_20);
391 case OPT_mpa_risc_1_1:
393 target_flags &= ~MASK_PA_20;
394 target_flags |= MASK_PA_11;
397 case OPT_mpa_risc_2_0:
399 target_flags |= MASK_PA_11 | MASK_PA_20;
403 if (strcmp (arg, "8000") == 0)
404 pa_cpu = PROCESSOR_8000;
405 else if (strcmp (arg, "7100") == 0)
406 pa_cpu = PROCESSOR_7100;
407 else if (strcmp (arg, "700") == 0)
408 pa_cpu = PROCESSOR_700;
409 else if (strcmp (arg, "7100LC") == 0)
410 pa_cpu = PROCESSOR_7100LC;
411 else if (strcmp (arg, "7200") == 0)
412 pa_cpu = PROCESSOR_7200;
413 else if (strcmp (arg, "7300") == 0)
414 pa_cpu = PROCESSOR_7300;
419 case OPT_mfixed_range_:
429 #if TARGET_HPUX_10_10
435 #if TARGET_HPUX_11_11
447 override_options (void)
449 /* Unconditional branches in the delay slot are not compatible with dwarf2
450 call frame information. There is no benefit in using this optimization
451 on PA8000 and later processors. */
452 if (pa_cpu >= PROCESSOR_8000
453 || (! USING_SJLJ_EXCEPTIONS && flag_exceptions)
454 || flag_unwind_tables)
455 target_flags &= ~MASK_JUMP_IN_DELAY;
457 if (flag_pic && TARGET_PORTABLE_RUNTIME)
459 warning (0, "PIC code generation is not supported in the portable runtime model");
462 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
464 warning (0, "PIC code generation is not compatible with fast indirect calls");
467 if (! TARGET_GAS && write_symbols != NO_DEBUG)
469 warning (0, "-g is only supported when using GAS on this processor,");
470 warning (0, "-g option disabled");
471 write_symbols = NO_DEBUG;
474 /* We only support the "big PIC" model now. And we always generate PIC
475 code when in 64bit mode. */
476 if (flag_pic == 1 || TARGET_64BIT)
479 /* We can't guarantee that .dword is available for 32-bit targets. */
480 if (UNITS_PER_WORD == 4)
481 targetm.asm_out.aligned_op.di = NULL;
483 /* The unaligned ops are only available when using GAS. */
486 targetm.asm_out.unaligned_op.hi = NULL;
487 targetm.asm_out.unaligned_op.si = NULL;
488 targetm.asm_out.unaligned_op.di = NULL;
491 init_machine_status = pa_init_machine_status;
495 pa_init_builtins (void)
497 #ifdef DONT_HAVE_FPUTC_UNLOCKED
498 built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] = NULL_TREE;
499 implicit_built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] = NULL_TREE;
503 /* Function to init struct machine_function.
504 This will be called, via a pointer variable,
505 from push_function_context. */
507 static struct machine_function *
508 pa_init_machine_status (void)
510 return ggc_alloc_cleared (sizeof (machine_function));
513 /* If FROM is a probable pointer register, mark TO as a probable
514 pointer register with the same pointer alignment as FROM. */
517 copy_reg_pointer (rtx to, rtx from)
519 if (REG_POINTER (from))
520 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
523 /* Return 1 if X contains a symbolic expression. We know these
524 expressions will have one of a few well defined forms, so
525 we need only check those forms. */
527 symbolic_expression_p (rtx x)
530 /* Strip off any HIGH. */
531 if (GET_CODE (x) == HIGH)
534 return (symbolic_operand (x, VOIDmode));
537 /* Accept any constant that can be moved in one instruction into a
540 cint_ok_for_move (HOST_WIDE_INT intval)
542 /* OK if ldo, ldil, or zdepi, can be used. */
543 return (CONST_OK_FOR_LETTER_P (intval, 'J')
544 || CONST_OK_FOR_LETTER_P (intval, 'N')
545 || CONST_OK_FOR_LETTER_P (intval, 'K'));
548 /* Return truth value of whether OP can be used as an operand in a
551 adddi3_operand (rtx op, enum machine_mode mode)
553 return (register_operand (op, mode)
554 || (GET_CODE (op) == CONST_INT
555 && (TARGET_64BIT ? INT_14_BITS (op) : INT_11_BITS (op))));
558 /* True iff zdepi can be used to generate this CONST_INT.
559 zdepi first sign extends a 5 bit signed number to a given field
560 length, then places this field anywhere in a zero. */
562 zdepi_cint_p (unsigned HOST_WIDE_INT x)
564 unsigned HOST_WIDE_INT lsb_mask, t;
566 /* This might not be obvious, but it's at least fast.
567 This function is critical; we don't have the time loops would take. */
569 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
570 /* Return true iff t is a power of two. */
571 return ((t & (t - 1)) == 0);
574 /* True iff depi or extru can be used to compute (reg & mask).
575 Accept bit pattern like these:
580 and_mask_p (unsigned HOST_WIDE_INT mask)
583 mask += mask & -mask;
584 return (mask & (mask - 1)) == 0;
587 /* True iff depi can be used to compute (reg | MASK). */
589 ior_mask_p (unsigned HOST_WIDE_INT mask)
591 mask += mask & -mask;
592 return (mask & (mask - 1)) == 0;
595 /* Legitimize PIC addresses. If the address is already
596 position-independent, we return ORIG. Newly generated
597 position-independent addresses go to REG. If we need more
598 than one register, we lose. */
601 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
605 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
607 /* Labels need special handling. */
608 if (pic_label_operand (orig, mode))
610 /* We do not want to go through the movXX expanders here since that
611 would create recursion.
613 Nor do we really want to call a generator for a named pattern
614 since that requires multiple patterns if we want to support
617 So instead we just emit the raw set, which avoids the movXX
618 expanders completely. */
619 mark_reg_pointer (reg, BITS_PER_UNIT);
620 emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
621 current_function_uses_pic_offset_table = 1;
624 if (GET_CODE (orig) == SYMBOL_REF)
630 /* Before reload, allocate a temporary register for the intermediate
631 result. This allows the sequence to be deleted when the final
632 result is unused and the insns are trivially dead. */
633 tmp_reg = ((reload_in_progress || reload_completed)
634 ? reg : gen_reg_rtx (Pmode));
636 emit_move_insn (tmp_reg,
637 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
638 gen_rtx_HIGH (word_mode, orig)));
640 = gen_const_mem (Pmode,
641 gen_rtx_LO_SUM (Pmode, tmp_reg,
642 gen_rtx_UNSPEC (Pmode,
646 current_function_uses_pic_offset_table = 1;
647 mark_reg_pointer (reg, BITS_PER_UNIT);
648 insn = emit_move_insn (reg, pic_ref);
650 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
651 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_EQUAL, orig, REG_NOTES (insn));
655 else if (GET_CODE (orig) == CONST)
659 if (GET_CODE (XEXP (orig, 0)) == PLUS
660 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
664 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
666 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
667 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
668 base == reg ? 0 : reg);
670 if (GET_CODE (orig) == CONST_INT)
672 if (INT_14_BITS (orig))
673 return plus_constant (base, INTVAL (orig));
674 orig = force_reg (Pmode, orig);
676 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
677 /* Likewise, should we set special REG_NOTEs here? */
683 static GTY(()) rtx gen_tls_tga;
686 gen_tls_get_addr (void)
689 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
694 hppa_tls_call (rtx arg)
698 ret = gen_reg_rtx (Pmode);
699 emit_library_call_value (gen_tls_get_addr (), ret,
700 LCT_CONST, Pmode, 1, arg, Pmode);
706 legitimize_tls_address (rtx addr)
708 rtx ret, insn, tmp, t1, t2, tp;
709 enum tls_model model = SYMBOL_REF_TLS_MODEL (addr);
713 case TLS_MODEL_GLOBAL_DYNAMIC:
714 tmp = gen_reg_rtx (Pmode);
715 emit_insn (gen_tgd_load (tmp, addr));
716 ret = hppa_tls_call (tmp);
719 case TLS_MODEL_LOCAL_DYNAMIC:
720 ret = gen_reg_rtx (Pmode);
721 tmp = gen_reg_rtx (Pmode);
723 emit_insn (gen_tld_load (tmp, addr));
724 t1 = hppa_tls_call (tmp);
727 t2 = gen_reg_rtx (Pmode);
728 emit_libcall_block (insn, t2, t1,
729 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
731 emit_insn (gen_tld_offset_load (ret, addr, t2));
734 case TLS_MODEL_INITIAL_EXEC:
735 tp = gen_reg_rtx (Pmode);
736 tmp = gen_reg_rtx (Pmode);
737 ret = gen_reg_rtx (Pmode);
738 emit_insn (gen_tp_load (tp));
739 emit_insn (gen_tie_load (tmp, addr));
740 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
743 case TLS_MODEL_LOCAL_EXEC:
744 tp = gen_reg_rtx (Pmode);
745 ret = gen_reg_rtx (Pmode);
746 emit_insn (gen_tp_load (tp));
747 emit_insn (gen_tle_load (ret, addr, tp));
757 /* Try machine-dependent ways of modifying an illegitimate address
758 to be legitimate. If we find one, return the new, valid address.
759 This macro is used in only one place: `memory_address' in explow.c.
761 OLDX is the address as it was before break_out_memory_refs was called.
762 In some cases it is useful to look at this to decide what needs to be done.
764 MODE and WIN are passed so that this macro can use
765 GO_IF_LEGITIMATE_ADDRESS.
767 It is always safe for this macro to do nothing. It exists to recognize
768 opportunities to optimize the output.
770 For the PA, transform:
772 memory(X + <large int>)
776 if (<large int> & mask) >= 16
777 Y = (<large int> & ~mask) + mask + 1 Round up.
779 Y = (<large int> & ~mask) Round down.
781 memory (Z + (<large int> - Y));
783 This is for CSE to find several similar references, and only use one Z.
785 X can either be a SYMBOL_REF or REG, but because combine cannot
786 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
787 D will not fit in 14 bits.
789 MODE_FLOAT references allow displacements which fit in 5 bits, so use
792 MODE_INT references allow displacements which fit in 14 bits, so use
795 This relies on the fact that most mode MODE_FLOAT references will use FP
796 registers and most mode MODE_INT references will use integer registers.
797 (In the rare case of an FP register used in an integer MODE, we depend
798 on secondary reloads to clean things up.)
801 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
802 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
803 addressing modes to be used).
805 Put X and Z into registers. Then put the entire expression into
809 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
810 enum machine_mode mode)
814 /* We need to canonicalize the order of operands in unscaled indexed
815 addresses since the code that checks if an address is valid doesn't
816 always try both orders. */
817 if (!TARGET_NO_SPACE_REGS
818 && GET_CODE (x) == PLUS
819 && GET_MODE (x) == Pmode
820 && REG_P (XEXP (x, 0))
821 && REG_P (XEXP (x, 1))
822 && REG_POINTER (XEXP (x, 0))
823 && !REG_POINTER (XEXP (x, 1)))
824 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
826 if (PA_SYMBOL_REF_TLS_P (x))
827 return legitimize_tls_address (x);
829 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
831 /* Strip off CONST. */
832 if (GET_CODE (x) == CONST)
835 /* Special case. Get the SYMBOL_REF into a register and use indexing.
836 That should always be safe. */
837 if (GET_CODE (x) == PLUS
838 && GET_CODE (XEXP (x, 0)) == REG
839 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
841 rtx reg = force_reg (Pmode, XEXP (x, 1));
842 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
845 /* Note we must reject symbols which represent function addresses
846 since the assembler/linker can't handle arithmetic on plabels. */
847 if (GET_CODE (x) == PLUS
848 && GET_CODE (XEXP (x, 1)) == CONST_INT
849 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
850 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
851 || GET_CODE (XEXP (x, 0)) == REG))
853 rtx int_part, ptr_reg;
855 int offset = INTVAL (XEXP (x, 1));
858 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
859 ? (TARGET_PA_20 ? 0x3fff : 0x1f) : 0x3fff);
861 /* Choose which way to round the offset. Round up if we
862 are >= halfway to the next boundary. */
863 if ((offset & mask) >= ((mask + 1) / 2))
864 newoffset = (offset & ~ mask) + mask + 1;
866 newoffset = (offset & ~ mask);
868 /* If the newoffset will not fit in 14 bits (ldo), then
869 handling this would take 4 or 5 instructions (2 to load
870 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
871 add the new offset and the SYMBOL_REF.) Combine can
872 not handle 4->2 or 5->2 combinations, so do not create
874 if (! VAL_14_BITS_P (newoffset)
875 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
877 rtx const_part = plus_constant (XEXP (x, 0), newoffset);
880 gen_rtx_HIGH (Pmode, const_part));
883 gen_rtx_LO_SUM (Pmode,
884 tmp_reg, const_part));
888 if (! VAL_14_BITS_P (newoffset))
889 int_part = force_reg (Pmode, GEN_INT (newoffset));
891 int_part = GEN_INT (newoffset);
893 ptr_reg = force_reg (Pmode,
895 force_reg (Pmode, XEXP (x, 0)),
898 return plus_constant (ptr_reg, offset - newoffset);
901 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
903 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
904 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
905 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
906 && (OBJECT_P (XEXP (x, 1))
907 || GET_CODE (XEXP (x, 1)) == SUBREG)
908 && GET_CODE (XEXP (x, 1)) != CONST)
910 int val = INTVAL (XEXP (XEXP (x, 0), 1));
914 if (GET_CODE (reg1) != REG)
915 reg1 = force_reg (Pmode, force_operand (reg1, 0));
917 reg2 = XEXP (XEXP (x, 0), 0);
918 if (GET_CODE (reg2) != REG)
919 reg2 = force_reg (Pmode, force_operand (reg2, 0));
921 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
928 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
930 Only do so for floating point modes since this is more speculative
931 and we lose if it's an integer store. */
932 if (GET_CODE (x) == PLUS
933 && GET_CODE (XEXP (x, 0)) == PLUS
934 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
935 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
936 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
937 && (mode == SFmode || mode == DFmode))
940 /* First, try and figure out what to use as a base register. */
941 rtx reg1, reg2, base, idx, orig_base;
943 reg1 = XEXP (XEXP (x, 0), 1);
948 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
949 then emit_move_sequence will turn on REG_POINTER so we'll know
950 it's a base register below. */
951 if (GET_CODE (reg1) != REG)
952 reg1 = force_reg (Pmode, force_operand (reg1, 0));
954 if (GET_CODE (reg2) != REG)
955 reg2 = force_reg (Pmode, force_operand (reg2, 0));
957 /* Figure out what the base and index are. */
959 if (GET_CODE (reg1) == REG
960 && REG_POINTER (reg1))
963 orig_base = XEXP (XEXP (x, 0), 1);
964 idx = gen_rtx_PLUS (Pmode,
966 XEXP (XEXP (XEXP (x, 0), 0), 0),
967 XEXP (XEXP (XEXP (x, 0), 0), 1)),
970 else if (GET_CODE (reg2) == REG
971 && REG_POINTER (reg2))
974 orig_base = XEXP (x, 1);
981 /* If the index adds a large constant, try to scale the
982 constant so that it can be loaded with only one insn. */
983 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
984 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
985 / INTVAL (XEXP (XEXP (idx, 0), 1)))
986 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
988 /* Divide the CONST_INT by the scale factor, then add it to A. */
989 int val = INTVAL (XEXP (idx, 1));
991 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
992 reg1 = XEXP (XEXP (idx, 0), 0);
993 if (GET_CODE (reg1) != REG)
994 reg1 = force_reg (Pmode, force_operand (reg1, 0));
996 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
998 /* We can now generate a simple scaled indexed address. */
1001 (Pmode, gen_rtx_PLUS (Pmode,
1002 gen_rtx_MULT (Pmode, reg1,
1003 XEXP (XEXP (idx, 0), 1)),
1007 /* If B + C is still a valid base register, then add them. */
1008 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1009 && INTVAL (XEXP (idx, 1)) <= 4096
1010 && INTVAL (XEXP (idx, 1)) >= -4096)
1012 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1015 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1017 reg2 = XEXP (XEXP (idx, 0), 0);
1018 if (GET_CODE (reg2) != CONST_INT)
1019 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1021 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1022 gen_rtx_MULT (Pmode,
1028 /* Get the index into a register, then add the base + index and
1029 return a register holding the result. */
1031 /* First get A into a register. */
1032 reg1 = XEXP (XEXP (idx, 0), 0);
1033 if (GET_CODE (reg1) != REG)
1034 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1036 /* And get B into a register. */
1037 reg2 = XEXP (idx, 1);
1038 if (GET_CODE (reg2) != REG)
1039 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1041 reg1 = force_reg (Pmode,
1042 gen_rtx_PLUS (Pmode,
1043 gen_rtx_MULT (Pmode, reg1,
1044 XEXP (XEXP (idx, 0), 1)),
1047 /* Add the result to our base register and return. */
1048 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1052 /* Uh-oh. We might have an address for x[n-100000]. This needs
1053 special handling to avoid creating an indexed memory address
1054 with x-100000 as the base.
1056 If the constant part is small enough, then it's still safe because
1057 there is a guard page at the beginning and end of the data segment.
1059 Scaled references are common enough that we want to try and rearrange the
1060 terms so that we can use indexing for these addresses too. Only
1061 do the optimization for floatint point modes. */
1063 if (GET_CODE (x) == PLUS
1064 && symbolic_expression_p (XEXP (x, 1)))
1066 /* Ugly. We modify things here so that the address offset specified
1067 by the index expression is computed first, then added to x to form
1068 the entire address. */
1070 rtx regx1, regx2, regy1, regy2, y;
1072 /* Strip off any CONST. */
1074 if (GET_CODE (y) == CONST)
1077 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1079 /* See if this looks like
1080 (plus (mult (reg) (shadd_const))
1081 (const (plus (symbol_ref) (const_int))))
1083 Where const_int is small. In that case the const
1084 expression is a valid pointer for indexing.
1086 If const_int is big, but can be divided evenly by shadd_const
1087 and added to (reg). This allows more scaled indexed addresses. */
1088 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1089 && GET_CODE (XEXP (x, 0)) == MULT
1090 && GET_CODE (XEXP (y, 1)) == CONST_INT
1091 && INTVAL (XEXP (y, 1)) >= -4096
1092 && INTVAL (XEXP (y, 1)) <= 4095
1093 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1094 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1096 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1100 if (GET_CODE (reg1) != REG)
1101 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1103 reg2 = XEXP (XEXP (x, 0), 0);
1104 if (GET_CODE (reg2) != REG)
1105 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1107 return force_reg (Pmode,
1108 gen_rtx_PLUS (Pmode,
1109 gen_rtx_MULT (Pmode,
1114 else if ((mode == DFmode || mode == SFmode)
1115 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1116 && GET_CODE (XEXP (x, 0)) == MULT
1117 && GET_CODE (XEXP (y, 1)) == CONST_INT
1118 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1119 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1120 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1123 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1124 / INTVAL (XEXP (XEXP (x, 0), 1))));
1125 regx2 = XEXP (XEXP (x, 0), 0);
1126 if (GET_CODE (regx2) != REG)
1127 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1128 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1132 gen_rtx_PLUS (Pmode,
1133 gen_rtx_MULT (Pmode, regx2,
1134 XEXP (XEXP (x, 0), 1)),
1135 force_reg (Pmode, XEXP (y, 0))));
1137 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1138 && INTVAL (XEXP (y, 1)) >= -4096
1139 && INTVAL (XEXP (y, 1)) <= 4095)
1141 /* This is safe because of the guard page at the
1142 beginning and end of the data space. Just
1143 return the original address. */
1148 /* Doesn't look like one we can optimize. */
1149 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1150 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1151 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1152 regx1 = force_reg (Pmode,
1153 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1155 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1163 /* For the HPPA, REG and REG+CONST is cost 0
1164 and addresses involving symbolic constants are cost 2.
1166 PIC addresses are very expensive.
1168 It is no coincidence that this has the same structure
1169 as GO_IF_LEGITIMATE_ADDRESS. */
1172 hppa_address_cost (rtx X)
1174 switch (GET_CODE (X))
1187 /* Compute a (partial) cost for rtx X. Return true if the complete
1188 cost has been computed, and false if subexpressions should be
1189 scanned. In either case, *TOTAL contains the cost result. */
1192 hppa_rtx_costs (rtx x, int code, int outer_code, int *total)
1197 if (INTVAL (x) == 0)
1199 else if (INT_14_BITS (x))
1216 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1217 && outer_code != SET)
1224 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1225 *total = COSTS_N_INSNS (3);
1226 else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1227 *total = COSTS_N_INSNS (8);
1229 *total = COSTS_N_INSNS (20);
1233 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1235 *total = COSTS_N_INSNS (14);
1243 *total = COSTS_N_INSNS (60);
1246 case PLUS: /* this includes shNadd insns */
1248 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1249 *total = COSTS_N_INSNS (3);
1251 *total = COSTS_N_INSNS (1);
1257 *total = COSTS_N_INSNS (1);
1265 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1266 new rtx with the correct mode. */
1268 force_mode (enum machine_mode mode, rtx orig)
1270 if (mode == GET_MODE (orig))
1273 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1275 return gen_rtx_REG (mode, REGNO (orig));
1278 /* Return 1 if *X is a thread-local symbol. */
1281 pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1283 return PA_SYMBOL_REF_TLS_P (*x);
1286 /* Return 1 if X contains a thread-local symbol. */
1289 pa_tls_referenced_p (rtx x)
1291 if (!TARGET_HAVE_TLS)
1294 return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0);
1297 /* Emit insns to move operands[1] into operands[0].
1299 Return 1 if we have written out everything that needs to be done to
1300 do the move. Otherwise, return 0 and the caller will emit the move
1303 Note SCRATCH_REG may not be in the proper mode depending on how it
1304 will be used. This routine is responsible for creating a new copy
1305 of SCRATCH_REG in the proper mode. */
1308 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1310 register rtx operand0 = operands[0];
1311 register rtx operand1 = operands[1];
1314 /* We can only handle indexed addresses in the destination operand
1315 of floating point stores. Thus, we need to break out indexed
1316 addresses from the destination operand. */
1317 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1319 /* This is only safe up to the beginning of life analysis. */
1320 gcc_assert (!no_new_pseudos);
1322 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1323 operand0 = replace_equiv_address (operand0, tem);
1326 /* On targets with non-equivalent space registers, break out unscaled
1327 indexed addresses from the source operand before the final CSE.
1328 We have to do this because the REG_POINTER flag is not correctly
1329 carried through various optimization passes and CSE may substitute
1330 a pseudo without the pointer set for one with the pointer set. As
1331 a result, we loose various opportunities to create insns with
1332 unscaled indexed addresses. */
1333 if (!TARGET_NO_SPACE_REGS
1334 && !cse_not_expected
1335 && GET_CODE (operand1) == MEM
1336 && GET_CODE (XEXP (operand1, 0)) == PLUS
1337 && REG_P (XEXP (XEXP (operand1, 0), 0))
1338 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1340 = replace_equiv_address (operand1,
1341 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1344 && reload_in_progress && GET_CODE (operand0) == REG
1345 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1346 operand0 = reg_equiv_mem[REGNO (operand0)];
1347 else if (scratch_reg
1348 && reload_in_progress && GET_CODE (operand0) == SUBREG
1349 && GET_CODE (SUBREG_REG (operand0)) == REG
1350 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1352 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1353 the code which tracks sets/uses for delete_output_reload. */
1354 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1355 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
1356 SUBREG_BYTE (operand0));
1357 operand0 = alter_subreg (&temp);
1361 && reload_in_progress && GET_CODE (operand1) == REG
1362 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1363 operand1 = reg_equiv_mem[REGNO (operand1)];
1364 else if (scratch_reg
1365 && reload_in_progress && GET_CODE (operand1) == SUBREG
1366 && GET_CODE (SUBREG_REG (operand1)) == REG
1367 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1369 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1370 the code which tracks sets/uses for delete_output_reload. */
1371 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1372 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
1373 SUBREG_BYTE (operand1));
1374 operand1 = alter_subreg (&temp);
1377 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1378 && ((tem = find_replacement (&XEXP (operand0, 0)))
1379 != XEXP (operand0, 0)))
1380 operand0 = gen_rtx_MEM (GET_MODE (operand0), tem);
1382 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1383 && ((tem = find_replacement (&XEXP (operand1, 0)))
1384 != XEXP (operand1, 0)))
1385 operand1 = gen_rtx_MEM (GET_MODE (operand1), tem);
1387 /* Handle secondary reloads for loads/stores of FP registers from
1388 REG+D addresses where D does not fit in 5 or 14 bits, including
1389 (subreg (mem (addr))) cases. */
1391 && fp_reg_operand (operand0, mode)
1392 && ((GET_CODE (operand1) == MEM
1393 && !memory_address_p ((GET_MODE_SIZE (mode) == 4 ? SFmode : DFmode),
1394 XEXP (operand1, 0)))
1395 || ((GET_CODE (operand1) == SUBREG
1396 && GET_CODE (XEXP (operand1, 0)) == MEM
1397 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1399 XEXP (XEXP (operand1, 0), 0))))))
1401 if (GET_CODE (operand1) == SUBREG)
1402 operand1 = XEXP (operand1, 0);
1404 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1405 it in WORD_MODE regardless of what mode it was originally given
1407 scratch_reg = force_mode (word_mode, scratch_reg);
1409 /* D might not fit in 14 bits either; for such cases load D into
1411 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
1413 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1414 emit_move_insn (scratch_reg,
1415 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1417 XEXP (XEXP (operand1, 0), 0),
1421 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1422 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1423 gen_rtx_MEM (mode, scratch_reg)));
1426 else if (scratch_reg
1427 && fp_reg_operand (operand1, mode)
1428 && ((GET_CODE (operand0) == MEM
1429 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1431 XEXP (operand0, 0)))
1432 || ((GET_CODE (operand0) == SUBREG)
1433 && GET_CODE (XEXP (operand0, 0)) == MEM
1434 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1436 XEXP (XEXP (operand0, 0), 0)))))
1438 if (GET_CODE (operand0) == SUBREG)
1439 operand0 = XEXP (operand0, 0);
1441 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1442 it in WORD_MODE regardless of what mode it was originally given
1444 scratch_reg = force_mode (word_mode, scratch_reg);
1446 /* D might not fit in 14 bits either; for such cases load D into
1448 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
1450 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1451 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1454 XEXP (XEXP (operand0, 0),
1459 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1460 emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (mode, scratch_reg),
1464 /* Handle secondary reloads for loads of FP registers from constant
1465 expressions by forcing the constant into memory.
1467 Use scratch_reg to hold the address of the memory location.
1469 The proper fix is to change PREFERRED_RELOAD_CLASS to return
1470 NO_REGS when presented with a const_int and a register class
1471 containing only FP registers. Doing so unfortunately creates
1472 more problems than it solves. Fix this for 2.5. */
1473 else if (scratch_reg
1474 && CONSTANT_P (operand1)
1475 && fp_reg_operand (operand0, mode))
1479 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1480 it in WORD_MODE regardless of what mode it was originally given
1482 scratch_reg = force_mode (word_mode, scratch_reg);
1484 /* Force the constant into memory and put the address of the
1485 memory location into scratch_reg. */
1486 xoperands[0] = scratch_reg;
1487 xoperands[1] = XEXP (force_const_mem (mode, operand1), 0);
1488 emit_move_sequence (xoperands, Pmode, 0);
1490 /* Now load the destination register. */
1491 emit_insn (gen_rtx_SET (mode, operand0,
1492 gen_rtx_MEM (mode, scratch_reg)));
1495 /* Handle secondary reloads for SAR. These occur when trying to load
1496 the SAR from memory, FP register, or with a constant. */
1497 else if (scratch_reg
1498 && GET_CODE (operand0) == REG
1499 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1500 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1501 && (GET_CODE (operand1) == MEM
1502 || GET_CODE (operand1) == CONST_INT
1503 || (GET_CODE (operand1) == REG
1504 && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1))))))
1506 /* D might not fit in 14 bits either; for such cases load D into
1508 if (GET_CODE (operand1) == MEM
1509 && !memory_address_p (Pmode, XEXP (operand1, 0)))
1511 /* We are reloading the address into the scratch register, so we
1512 want to make sure the scratch register is a full register. */
1513 scratch_reg = force_mode (word_mode, scratch_reg);
1515 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1516 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1519 XEXP (XEXP (operand1, 0),
1523 /* Now we are going to load the scratch register from memory,
1524 we want to load it in the same width as the original MEM,
1525 which must be the same as the width of the ultimate destination,
1527 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1529 emit_move_insn (scratch_reg, gen_rtx_MEM (GET_MODE (operand0),
1534 /* We want to load the scratch register using the same mode as
1535 the ultimate destination. */
1536 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1538 emit_move_insn (scratch_reg, operand1);
1541 /* And emit the insn to set the ultimate destination. We know that
1542 the scratch register has the same mode as the destination at this
1544 emit_move_insn (operand0, scratch_reg);
1547 /* Handle the most common case: storing into a register. */
1548 else if (register_operand (operand0, mode))
1550 if (register_operand (operand1, mode)
1551 || (GET_CODE (operand1) == CONST_INT
1552 && cint_ok_for_move (INTVAL (operand1)))
1553 || (operand1 == CONST0_RTX (mode))
1554 || (GET_CODE (operand1) == HIGH
1555 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1556 /* Only `general_operands' can come here, so MEM is ok. */
1557 || GET_CODE (operand1) == MEM)
1559 /* Various sets are created during RTL generation which don't
1560 have the REG_POINTER flag correctly set. After the CSE pass,
1561 instruction recognition can fail if we don't consistently
1562 set this flag when performing register copies. This should
1563 also improve the opportunities for creating insns that use
1564 unscaled indexing. */
1565 if (REG_P (operand0) && REG_P (operand1))
1567 if (REG_POINTER (operand1)
1568 && !REG_POINTER (operand0)
1569 && !HARD_REGISTER_P (operand0))
1570 copy_reg_pointer (operand0, operand1);
1571 else if (REG_POINTER (operand0)
1572 && !REG_POINTER (operand1)
1573 && !HARD_REGISTER_P (operand1))
1574 copy_reg_pointer (operand1, operand0);
1577 /* When MEMs are broken out, the REG_POINTER flag doesn't
1578 get set. In some cases, we can set the REG_POINTER flag
1579 from the declaration for the MEM. */
1580 if (REG_P (operand0)
1581 && GET_CODE (operand1) == MEM
1582 && !REG_POINTER (operand0))
1584 tree decl = MEM_EXPR (operand1);
1586 /* Set the register pointer flag and register alignment
1587 if the declaration for this memory reference is a
1588 pointer type. Fortran indirect argument references
1591 && !(flag_argument_noalias > 1
1592 && TREE_CODE (decl) == INDIRECT_REF
1593 && TREE_CODE (TREE_OPERAND (decl, 0)) == PARM_DECL))
1597 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1599 if (TREE_CODE (decl) == COMPONENT_REF)
1600 decl = TREE_OPERAND (decl, 1);
1602 type = TREE_TYPE (decl);
1603 if (TREE_CODE (type) == ARRAY_TYPE)
1604 type = get_inner_array_type (type);
1606 if (POINTER_TYPE_P (type))
1610 type = TREE_TYPE (type);
1611 /* Using TYPE_ALIGN_OK is rather conservative as
1612 only the ada frontend actually sets it. */
1613 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1615 mark_reg_pointer (operand0, align);
1620 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1624 else if (GET_CODE (operand0) == MEM)
1626 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1627 && !(reload_in_progress || reload_completed))
1629 rtx temp = gen_reg_rtx (DFmode);
1631 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1632 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1635 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1637 /* Run this case quickly. */
1638 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1641 if (! (reload_in_progress || reload_completed))
1643 operands[0] = validize_mem (operand0);
1644 operands[1] = operand1 = force_reg (mode, operand1);
1648 /* Simplify the source if we need to.
1649 Note we do have to handle function labels here, even though we do
1650 not consider them legitimate constants. Loop optimizations can
1651 call the emit_move_xxx with one as a source. */
1652 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1653 || function_label_operand (operand1, mode)
1654 || (GET_CODE (operand1) == HIGH
1655 && symbolic_operand (XEXP (operand1, 0), mode)))
1659 if (GET_CODE (operand1) == HIGH)
1662 operand1 = XEXP (operand1, 0);
1664 if (symbolic_operand (operand1, mode))
1666 /* Argh. The assembler and linker can't handle arithmetic
1669 So we force the plabel into memory, load operand0 from
1670 the memory location, then add in the constant part. */
1671 if ((GET_CODE (operand1) == CONST
1672 && GET_CODE (XEXP (operand1, 0)) == PLUS
1673 && function_label_operand (XEXP (XEXP (operand1, 0), 0), Pmode))
1674 || function_label_operand (operand1, mode))
1676 rtx temp, const_part;
1678 /* Figure out what (if any) scratch register to use. */
1679 if (reload_in_progress || reload_completed)
1681 scratch_reg = scratch_reg ? scratch_reg : operand0;
1682 /* SCRATCH_REG will hold an address and maybe the actual
1683 data. We want it in WORD_MODE regardless of what mode it
1684 was originally given to us. */
1685 scratch_reg = force_mode (word_mode, scratch_reg);
1688 scratch_reg = gen_reg_rtx (Pmode);
1690 if (GET_CODE (operand1) == CONST)
1692 /* Save away the constant part of the expression. */
1693 const_part = XEXP (XEXP (operand1, 0), 1);
1694 gcc_assert (GET_CODE (const_part) == CONST_INT);
1696 /* Force the function label into memory. */
1697 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1701 /* No constant part. */
1702 const_part = NULL_RTX;
1704 /* Force the function label into memory. */
1705 temp = force_const_mem (mode, operand1);
1709 /* Get the address of the memory location. PIC-ify it if
1711 temp = XEXP (temp, 0);
1713 temp = legitimize_pic_address (temp, mode, scratch_reg);
1715 /* Put the address of the memory location into our destination
1718 emit_move_sequence (operands, mode, scratch_reg);
1720 /* Now load from the memory location into our destination
1722 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1723 emit_move_sequence (operands, mode, scratch_reg);
1725 /* And add back in the constant part. */
1726 if (const_part != NULL_RTX)
1727 expand_inc (operand0, const_part);
1736 if (reload_in_progress || reload_completed)
1738 temp = scratch_reg ? scratch_reg : operand0;
1739 /* TEMP will hold an address and maybe the actual
1740 data. We want it in WORD_MODE regardless of what mode it
1741 was originally given to us. */
1742 temp = force_mode (word_mode, temp);
1745 temp = gen_reg_rtx (Pmode);
1747 /* (const (plus (symbol) (const_int))) must be forced to
1748 memory during/after reload if the const_int will not fit
1750 if (GET_CODE (operand1) == CONST
1751 && GET_CODE (XEXP (operand1, 0)) == PLUS
1752 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
1753 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
1754 && (reload_completed || reload_in_progress)
1757 operands[1] = force_const_mem (mode, operand1);
1758 operands[1] = legitimize_pic_address (XEXP (operands[1], 0),
1760 operands[1] = gen_rtx_MEM (mode, operands[1]);
1761 emit_move_sequence (operands, mode, temp);
1765 operands[1] = legitimize_pic_address (operand1, mode, temp);
1766 if (REG_P (operand0) && REG_P (operands[1]))
1767 copy_reg_pointer (operand0, operands[1]);
1768 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
1771 /* On the HPPA, references to data space are supposed to use dp,
1772 register 27, but showing it in the RTL inhibits various cse
1773 and loop optimizations. */
1778 if (reload_in_progress || reload_completed)
1780 temp = scratch_reg ? scratch_reg : operand0;
1781 /* TEMP will hold an address and maybe the actual
1782 data. We want it in WORD_MODE regardless of what mode it
1783 was originally given to us. */
1784 temp = force_mode (word_mode, temp);
1787 temp = gen_reg_rtx (mode);
1789 /* Loading a SYMBOL_REF into a register makes that register
1790 safe to be used as the base in an indexed address.
1792 Don't mark hard registers though. That loses. */
1793 if (GET_CODE (operand0) == REG
1794 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1795 mark_reg_pointer (operand0, BITS_PER_UNIT);
1796 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
1797 mark_reg_pointer (temp, BITS_PER_UNIT);
1800 set = gen_rtx_SET (mode, operand0, temp);
1802 set = gen_rtx_SET (VOIDmode,
1804 gen_rtx_LO_SUM (mode, temp, operand1));
1806 emit_insn (gen_rtx_SET (VOIDmode,
1808 gen_rtx_HIGH (mode, operand1)));
1814 else if (pa_tls_referenced_p (operand1))
1819 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
1821 addend = XEXP (XEXP (tmp, 0), 1);
1822 tmp = XEXP (XEXP (tmp, 0), 0);
1825 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
1826 tmp = legitimize_tls_address (tmp);
1829 tmp = gen_rtx_PLUS (mode, tmp, addend);
1830 tmp = force_operand (tmp, operands[0]);
1834 else if (GET_CODE (operand1) != CONST_INT
1835 || !cint_ok_for_move (INTVAL (operand1)))
1839 HOST_WIDE_INT value = 0;
1840 HOST_WIDE_INT insv = 0;
1843 if (GET_CODE (operand1) == CONST_INT)
1844 value = INTVAL (operand1);
1847 && GET_CODE (operand1) == CONST_INT
1848 && HOST_BITS_PER_WIDE_INT > 32
1849 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
1853 /* Extract the low order 32 bits of the value and sign extend.
1854 If the new value is the same as the original value, we can
1855 can use the original value as-is. If the new value is
1856 different, we use it and insert the most-significant 32-bits
1857 of the original value into the final result. */
1858 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
1859 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
1862 #if HOST_BITS_PER_WIDE_INT > 32
1863 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
1867 operand1 = GEN_INT (nval);
1871 if (reload_in_progress || reload_completed)
1872 temp = scratch_reg ? scratch_reg : operand0;
1874 temp = gen_reg_rtx (mode);
1876 /* We don't directly split DImode constants on 32-bit targets
1877 because PLUS uses an 11-bit immediate and the insn sequence
1878 generated is not as efficient as the one using HIGH/LO_SUM. */
1879 if (GET_CODE (operand1) == CONST_INT
1880 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
1883 /* Directly break constant into high and low parts. This
1884 provides better optimization opportunities because various
1885 passes recognize constants split with PLUS but not LO_SUM.
1886 We use a 14-bit signed low part except when the addition
1887 of 0x4000 to the high part might change the sign of the
1889 HOST_WIDE_INT low = value & 0x3fff;
1890 HOST_WIDE_INT high = value & ~ 0x3fff;
1894 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
1902 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
1903 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
1907 emit_insn (gen_rtx_SET (VOIDmode, temp,
1908 gen_rtx_HIGH (mode, operand1)));
1909 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
1912 insn = emit_move_insn (operands[0], operands[1]);
1914 /* Now insert the most significant 32 bits of the value
1915 into the register. When we don't have a second register
1916 available, it could take up to nine instructions to load
1917 a 64-bit integer constant. Prior to reload, we force
1918 constants that would take more than three instructions
1919 to load to the constant pool. During and after reload,
1920 we have to handle all possible values. */
1923 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
1924 register and the value to be inserted is outside the
1925 range that can be loaded with three depdi instructions. */
1926 if (temp != operand0 && (insv >= 16384 || insv < -16384))
1928 operand1 = GEN_INT (insv);
1930 emit_insn (gen_rtx_SET (VOIDmode, temp,
1931 gen_rtx_HIGH (mode, operand1)));
1932 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
1933 emit_insn (gen_insv (operand0, GEN_INT (32),
1938 int len = 5, pos = 27;
1940 /* Insert the bits using the depdi instruction. */
1943 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
1944 HOST_WIDE_INT sign = v5 < 0;
1946 /* Left extend the insertion. */
1947 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
1948 while (pos > 0 && (insv & 1) == sign)
1950 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
1955 emit_insn (gen_insv (operand0, GEN_INT (len),
1956 GEN_INT (pos), GEN_INT (v5)));
1958 len = pos > 0 && pos < 5 ? pos : 5;
1965 = gen_rtx_EXPR_LIST (REG_EQUAL, op1, REG_NOTES (insn));
1970 /* Now have insn-emit do whatever it normally does. */
1974 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
1975 it will need a link/runtime reloc). */
1978 reloc_needed (tree exp)
1982 switch (TREE_CODE (exp))
1989 reloc = reloc_needed (TREE_OPERAND (exp, 0));
1990 reloc |= reloc_needed (TREE_OPERAND (exp, 1));
1995 case NON_LVALUE_EXPR:
1996 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2002 for (link = CONSTRUCTOR_ELTS (exp); link; link = TREE_CHAIN (link))
2003 if (TREE_VALUE (link) != 0)
2004 reloc |= reloc_needed (TREE_VALUE (link));
2017 /* Does operand (which is a symbolic_operand) live in text space?
2018 If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
2022 read_only_operand (rtx operand, enum machine_mode mode ATTRIBUTE_UNUSED)
2024 if (GET_CODE (operand) == CONST)
2025 operand = XEXP (XEXP (operand, 0), 0);
2028 if (GET_CODE (operand) == SYMBOL_REF)
2029 return SYMBOL_REF_FLAG (operand) && !CONSTANT_POOL_ADDRESS_P (operand);
2033 if (GET_CODE (operand) == SYMBOL_REF)
2034 return SYMBOL_REF_FLAG (operand) || CONSTANT_POOL_ADDRESS_P (operand);
2040 /* Return the best assembler insn template
2041 for moving operands[1] into operands[0] as a fullword. */
2043 singlemove_string (rtx *operands)
2045 HOST_WIDE_INT intval;
2047 if (GET_CODE (operands[0]) == MEM)
2048 return "stw %r1,%0";
2049 if (GET_CODE (operands[1]) == MEM)
2051 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2056 gcc_assert (GET_MODE (operands[1]) == SFmode);
2058 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2060 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2061 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2063 operands[1] = GEN_INT (i);
2064 /* Fall through to CONST_INT case. */
2066 if (GET_CODE (operands[1]) == CONST_INT)
2068 intval = INTVAL (operands[1]);
2070 if (VAL_14_BITS_P (intval))
2072 else if ((intval & 0x7ff) == 0)
2073 return "ldil L'%1,%0";
2074 else if (zdepi_cint_p (intval))
2075 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2077 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2079 return "copy %1,%0";
2083 /* Compute position (in OP[1]) and width (in OP[2])
2084 useful for copying IMM to a register using the zdepi
2085 instructions. Store the immediate value to insert in OP[0]. */
2087 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2091 /* Find the least significant set bit in IMM. */
2092 for (lsb = 0; lsb < 32; lsb++)
2099 /* Choose variants based on *sign* of the 5-bit field. */
2100 if ((imm & 0x10) == 0)
2101 len = (lsb <= 28) ? 4 : 32 - lsb;
2104 /* Find the width of the bitstring in IMM. */
2105 for (len = 5; len < 32; len++)
2107 if ((imm & (1 << len)) == 0)
2111 /* Sign extend IMM as a 5-bit value. */
2112 imm = (imm & 0xf) - 0x10;
2120 /* Compute position (in OP[1]) and width (in OP[2])
2121 useful for copying IMM to a register using the depdi,z
2122 instructions. Store the immediate value to insert in OP[0]. */
2124 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2126 HOST_WIDE_INT lsb, len;
2128 /* Find the least significant set bit in IMM. */
2129 for (lsb = 0; lsb < HOST_BITS_PER_WIDE_INT; lsb++)
2136 /* Choose variants based on *sign* of the 5-bit field. */
2137 if ((imm & 0x10) == 0)
2138 len = ((lsb <= HOST_BITS_PER_WIDE_INT - 4)
2139 ? 4 : HOST_BITS_PER_WIDE_INT - lsb);
2142 /* Find the width of the bitstring in IMM. */
2143 for (len = 5; len < HOST_BITS_PER_WIDE_INT; len++)
2145 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2149 /* Sign extend IMM as a 5-bit value. */
2150 imm = (imm & 0xf) - 0x10;
2158 /* Output assembler code to perform a doubleword move insn
2159 with operands OPERANDS. */
2162 output_move_double (rtx *operands)
2164 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2166 rtx addreg0 = 0, addreg1 = 0;
2168 /* First classify both operands. */
2170 if (REG_P (operands[0]))
2172 else if (offsettable_memref_p (operands[0]))
2174 else if (GET_CODE (operands[0]) == MEM)
2179 if (REG_P (operands[1]))
2181 else if (CONSTANT_P (operands[1]))
2183 else if (offsettable_memref_p (operands[1]))
2185 else if (GET_CODE (operands[1]) == MEM)
2190 /* Check for the cases that the operand constraints are not
2191 supposed to allow to happen. */
2192 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2194 /* Handle auto decrementing and incrementing loads and stores
2195 specifically, since the structure of the function doesn't work
2196 for them without major modification. Do it better when we learn
2197 this port about the general inc/dec addressing of PA.
2198 (This was written by tege. Chide him if it doesn't work.) */
2200 if (optype0 == MEMOP)
2202 /* We have to output the address syntax ourselves, since print_operand
2203 doesn't deal with the addresses we want to use. Fix this later. */
2205 rtx addr = XEXP (operands[0], 0);
2206 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2208 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2210 operands[0] = XEXP (addr, 0);
2211 gcc_assert (GET_CODE (operands[1]) == REG
2212 && GET_CODE (operands[0]) == REG);
2214 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2216 /* No overlap between high target register and address
2217 register. (We do this in a non-obvious way to
2218 save a register file writeback) */
2219 if (GET_CODE (addr) == POST_INC)
2220 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2221 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2223 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2225 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2227 operands[0] = XEXP (addr, 0);
2228 gcc_assert (GET_CODE (operands[1]) == REG
2229 && GET_CODE (operands[0]) == REG);
2231 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2232 /* No overlap between high target register and address
2233 register. (We do this in a non-obvious way to save a
2234 register file writeback) */
2235 if (GET_CODE (addr) == PRE_INC)
2236 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2237 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2240 if (optype1 == MEMOP)
2242 /* We have to output the address syntax ourselves, since print_operand
2243 doesn't deal with the addresses we want to use. Fix this later. */
2245 rtx addr = XEXP (operands[1], 0);
2246 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2248 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2250 operands[1] = XEXP (addr, 0);
2251 gcc_assert (GET_CODE (operands[0]) == REG
2252 && GET_CODE (operands[1]) == REG);
2254 if (!reg_overlap_mentioned_p (high_reg, addr))
2256 /* No overlap between high target register and address
2257 register. (We do this in a non-obvious way to
2258 save a register file writeback) */
2259 if (GET_CODE (addr) == POST_INC)
2260 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2261 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2265 /* This is an undefined situation. We should load into the
2266 address register *and* update that register. Probably
2267 we don't need to handle this at all. */
2268 if (GET_CODE (addr) == POST_INC)
2269 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2270 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2273 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2275 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2277 operands[1] = XEXP (addr, 0);
2278 gcc_assert (GET_CODE (operands[0]) == REG
2279 && GET_CODE (operands[1]) == REG);
2281 if (!reg_overlap_mentioned_p (high_reg, addr))
2283 /* No overlap between high target register and address
2284 register. (We do this in a non-obvious way to
2285 save a register file writeback) */
2286 if (GET_CODE (addr) == PRE_INC)
2287 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2288 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2292 /* This is an undefined situation. We should load into the
2293 address register *and* update that register. Probably
2294 we don't need to handle this at all. */
2295 if (GET_CODE (addr) == PRE_INC)
2296 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2297 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2300 else if (GET_CODE (addr) == PLUS
2301 && GET_CODE (XEXP (addr, 0)) == MULT)
2303 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2305 if (!reg_overlap_mentioned_p (high_reg, addr))
2309 xoperands[0] = high_reg;
2310 xoperands[1] = XEXP (addr, 1);
2311 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2312 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2313 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2315 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2321 xoperands[0] = high_reg;
2322 xoperands[1] = XEXP (addr, 1);
2323 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2324 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2325 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2327 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2332 /* If an operand is an unoffsettable memory ref, find a register
2333 we can increment temporarily to make it refer to the second word. */
2335 if (optype0 == MEMOP)
2336 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2338 if (optype1 == MEMOP)
2339 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2341 /* Ok, we can do one word at a time.
2342 Normally we do the low-numbered word first.
2344 In either case, set up in LATEHALF the operands to use
2345 for the high-numbered word and in some cases alter the
2346 operands in OPERANDS to be suitable for the low-numbered word. */
2348 if (optype0 == REGOP)
2349 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2350 else if (optype0 == OFFSOP)
2351 latehalf[0] = adjust_address (operands[0], SImode, 4);
2353 latehalf[0] = operands[0];
2355 if (optype1 == REGOP)
2356 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2357 else if (optype1 == OFFSOP)
2358 latehalf[1] = adjust_address (operands[1], SImode, 4);
2359 else if (optype1 == CNSTOP)
2360 split_double (operands[1], &operands[1], &latehalf[1]);
2362 latehalf[1] = operands[1];
2364 /* If the first move would clobber the source of the second one,
2365 do them in the other order.
2367 This can happen in two cases:
2369 mem -> register where the first half of the destination register
2370 is the same register used in the memory's address. Reload
2371 can create such insns.
2373 mem in this case will be either register indirect or register
2374 indirect plus a valid offset.
2376 register -> register move where REGNO(dst) == REGNO(src + 1)
2377 someone (Tim/Tege?) claimed this can happen for parameter loads.
2379 Handle mem -> register case first. */
2380 if (optype0 == REGOP
2381 && (optype1 == MEMOP || optype1 == OFFSOP)
2382 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2385 /* Do the late half first. */
2387 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2388 output_asm_insn (singlemove_string (latehalf), latehalf);
2392 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2393 return singlemove_string (operands);
2396 /* Now handle register -> register case. */
2397 if (optype0 == REGOP && optype1 == REGOP
2398 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2400 output_asm_insn (singlemove_string (latehalf), latehalf);
2401 return singlemove_string (operands);
2404 /* Normal case: do the two words, low-numbered first. */
2406 output_asm_insn (singlemove_string (operands), operands);
2408 /* Make any unoffsettable addresses point at high-numbered word. */
2410 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2412 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2415 output_asm_insn (singlemove_string (latehalf), latehalf);
2417 /* Undo the adds we just did. */
2419 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2421 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2427 output_fp_move_double (rtx *operands)
2429 if (FP_REG_P (operands[0]))
2431 if (FP_REG_P (operands[1])
2432 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2433 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2435 output_asm_insn ("fldd%F1 %1,%0", operands);
2437 else if (FP_REG_P (operands[1]))
2439 output_asm_insn ("fstd%F0 %1,%0", operands);
2445 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2447 /* This is a pain. You have to be prepared to deal with an
2448 arbitrary address here including pre/post increment/decrement.
2450 so avoid this in the MD. */
2451 gcc_assert (GET_CODE (operands[0]) == REG);
2453 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2454 xoperands[0] = operands[0];
2455 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2460 /* Return a REG that occurs in ADDR with coefficient 1.
2461 ADDR can be effectively incremented by incrementing REG. */
2464 find_addr_reg (rtx addr)
2466 while (GET_CODE (addr) == PLUS)
2468 if (GET_CODE (XEXP (addr, 0)) == REG)
2469 addr = XEXP (addr, 0);
2470 else if (GET_CODE (XEXP (addr, 1)) == REG)
2471 addr = XEXP (addr, 1);
2472 else if (CONSTANT_P (XEXP (addr, 0)))
2473 addr = XEXP (addr, 1);
2474 else if (CONSTANT_P (XEXP (addr, 1)))
2475 addr = XEXP (addr, 0);
2479 gcc_assert (GET_CODE (addr) == REG);
2483 /* Emit code to perform a block move.
2485 OPERANDS[0] is the destination pointer as a REG, clobbered.
2486 OPERANDS[1] is the source pointer as a REG, clobbered.
2487 OPERANDS[2] is a register for temporary storage.
2488 OPERANDS[3] is a register for temporary storage.
2489 OPERANDS[4] is the size as a CONST_INT
2490 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2491 OPERANDS[6] is another temporary register. */
2494 output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2496 int align = INTVAL (operands[5]);
2497 unsigned long n_bytes = INTVAL (operands[4]);
2499 /* We can't move more than a word at a time because the PA
2500 has no longer integer move insns. (Could use fp mem ops?) */
2501 if (align > (TARGET_64BIT ? 8 : 4))
2502 align = (TARGET_64BIT ? 8 : 4);
2504 /* Note that we know each loop below will execute at least twice
2505 (else we would have open-coded the copy). */
2509 /* Pre-adjust the loop counter. */
2510 operands[4] = GEN_INT (n_bytes - 16);
2511 output_asm_insn ("ldi %4,%2", operands);
2514 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2515 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2516 output_asm_insn ("std,ma %3,8(%0)", operands);
2517 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2518 output_asm_insn ("std,ma %6,8(%0)", operands);
2520 /* Handle the residual. There could be up to 7 bytes of
2521 residual to copy! */
2522 if (n_bytes % 16 != 0)
2524 operands[4] = GEN_INT (n_bytes % 8);
2525 if (n_bytes % 16 >= 8)
2526 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2527 if (n_bytes % 8 != 0)
2528 output_asm_insn ("ldd 0(%1),%6", operands);
2529 if (n_bytes % 16 >= 8)
2530 output_asm_insn ("std,ma %3,8(%0)", operands);
2531 if (n_bytes % 8 != 0)
2532 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2537 /* Pre-adjust the loop counter. */
2538 operands[4] = GEN_INT (n_bytes - 8);
2539 output_asm_insn ("ldi %4,%2", operands);
2542 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2543 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2544 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2545 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2546 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2548 /* Handle the residual. There could be up to 7 bytes of
2549 residual to copy! */
2550 if (n_bytes % 8 != 0)
2552 operands[4] = GEN_INT (n_bytes % 4);
2553 if (n_bytes % 8 >= 4)
2554 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2555 if (n_bytes % 4 != 0)
2556 output_asm_insn ("ldw 0(%1),%6", operands);
2557 if (n_bytes % 8 >= 4)
2558 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2559 if (n_bytes % 4 != 0)
2560 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2565 /* Pre-adjust the loop counter. */
2566 operands[4] = GEN_INT (n_bytes - 4);
2567 output_asm_insn ("ldi %4,%2", operands);
2570 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2571 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2572 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2573 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2574 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2576 /* Handle the residual. */
2577 if (n_bytes % 4 != 0)
2579 if (n_bytes % 4 >= 2)
2580 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2581 if (n_bytes % 2 != 0)
2582 output_asm_insn ("ldb 0(%1),%6", operands);
2583 if (n_bytes % 4 >= 2)
2584 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2585 if (n_bytes % 2 != 0)
2586 output_asm_insn ("stb %6,0(%0)", operands);
2591 /* Pre-adjust the loop counter. */
2592 operands[4] = GEN_INT (n_bytes - 2);
2593 output_asm_insn ("ldi %4,%2", operands);
2596 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2597 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2598 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2599 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2600 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2602 /* Handle the residual. */
2603 if (n_bytes % 2 != 0)
2605 output_asm_insn ("ldb 0(%1),%3", operands);
2606 output_asm_insn ("stb %3,0(%0)", operands);
2615 /* Count the number of insns necessary to handle this block move.
2617 Basic structure is the same as emit_block_move, except that we
2618 count insns rather than emit them. */
2621 compute_movmem_length (rtx insn)
2623 rtx pat = PATTERN (insn);
2624 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2625 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2626 unsigned int n_insns = 0;
2628 /* We can't move more than four bytes at a time because the PA
2629 has no longer integer move insns. (Could use fp mem ops?) */
2630 if (align > (TARGET_64BIT ? 8 : 4))
2631 align = (TARGET_64BIT ? 8 : 4);
2633 /* The basic copying loop. */
2637 if (n_bytes % (2 * align) != 0)
2639 if ((n_bytes % (2 * align)) >= align)
2642 if ((n_bytes % align) != 0)
2646 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2650 /* Emit code to perform a block clear.
2652 OPERANDS[0] is the destination pointer as a REG, clobbered.
2653 OPERANDS[1] is a register for temporary storage.
2654 OPERANDS[2] is the size as a CONST_INT
2655 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2658 output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2660 int align = INTVAL (operands[3]);
2661 unsigned long n_bytes = INTVAL (operands[2]);
2663 /* We can't clear more than a word at a time because the PA
2664 has no longer integer move insns. */
2665 if (align > (TARGET_64BIT ? 8 : 4))
2666 align = (TARGET_64BIT ? 8 : 4);
2668 /* Note that we know each loop below will execute at least twice
2669 (else we would have open-coded the copy). */
2673 /* Pre-adjust the loop counter. */
2674 operands[2] = GEN_INT (n_bytes - 16);
2675 output_asm_insn ("ldi %2,%1", operands);
2678 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2679 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2680 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2682 /* Handle the residual. There could be up to 7 bytes of
2683 residual to copy! */
2684 if (n_bytes % 16 != 0)
2686 operands[2] = GEN_INT (n_bytes % 8);
2687 if (n_bytes % 16 >= 8)
2688 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2689 if (n_bytes % 8 != 0)
2690 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2695 /* Pre-adjust the loop counter. */
2696 operands[2] = GEN_INT (n_bytes - 8);
2697 output_asm_insn ("ldi %2,%1", operands);
2700 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2701 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2702 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2704 /* Handle the residual. There could be up to 7 bytes of
2705 residual to copy! */
2706 if (n_bytes % 8 != 0)
2708 operands[2] = GEN_INT (n_bytes % 4);
2709 if (n_bytes % 8 >= 4)
2710 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2711 if (n_bytes % 4 != 0)
2712 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2717 /* Pre-adjust the loop counter. */
2718 operands[2] = GEN_INT (n_bytes - 4);
2719 output_asm_insn ("ldi %2,%1", operands);
2722 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2723 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2724 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2726 /* Handle the residual. */
2727 if (n_bytes % 4 != 0)
2729 if (n_bytes % 4 >= 2)
2730 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2731 if (n_bytes % 2 != 0)
2732 output_asm_insn ("stb %%r0,0(%0)", operands);
2737 /* Pre-adjust the loop counter. */
2738 operands[2] = GEN_INT (n_bytes - 2);
2739 output_asm_insn ("ldi %2,%1", operands);
2742 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2743 output_asm_insn ("addib,>= -2,%1,.-4", operands);
2744 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2746 /* Handle the residual. */
2747 if (n_bytes % 2 != 0)
2748 output_asm_insn ("stb %%r0,0(%0)", operands);
2757 /* Count the number of insns necessary to handle this block move.
2759 Basic structure is the same as emit_block_move, except that we
2760 count insns rather than emit them. */
2763 compute_clrmem_length (rtx insn)
2765 rtx pat = PATTERN (insn);
2766 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
2767 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
2768 unsigned int n_insns = 0;
2770 /* We can't clear more than a word at a time because the PA
2771 has no longer integer move insns. */
2772 if (align > (TARGET_64BIT ? 8 : 4))
2773 align = (TARGET_64BIT ? 8 : 4);
2775 /* The basic loop. */
2779 if (n_bytes % (2 * align) != 0)
2781 if ((n_bytes % (2 * align)) >= align)
2784 if ((n_bytes % align) != 0)
2788 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2794 output_and (rtx *operands)
2796 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2798 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2799 int ls0, ls1, ms0, p, len;
2801 for (ls0 = 0; ls0 < 32; ls0++)
2802 if ((mask & (1 << ls0)) == 0)
2805 for (ls1 = ls0; ls1 < 32; ls1++)
2806 if ((mask & (1 << ls1)) != 0)
2809 for (ms0 = ls1; ms0 < 32; ms0++)
2810 if ((mask & (1 << ms0)) == 0)
2813 gcc_assert (ms0 == 32);
2821 operands[2] = GEN_INT (len);
2822 return "{extru|extrw,u} %1,31,%2,%0";
2826 /* We could use this `depi' for the case above as well, but `depi'
2827 requires one more register file access than an `extru'. */
2832 operands[2] = GEN_INT (p);
2833 operands[3] = GEN_INT (len);
2834 return "{depi|depwi} 0,%2,%3,%0";
2838 return "and %1,%2,%0";
2841 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2842 storing the result in operands[0]. */
2844 output_64bit_and (rtx *operands)
2846 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2848 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2849 int ls0, ls1, ms0, p, len;
2851 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
2852 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
2855 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
2856 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
2859 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
2860 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
2863 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
2865 if (ls1 == HOST_BITS_PER_WIDE_INT)
2871 operands[2] = GEN_INT (len);
2872 return "extrd,u %1,63,%2,%0";
2876 /* We could use this `depi' for the case above as well, but `depi'
2877 requires one more register file access than an `extru'. */
2882 operands[2] = GEN_INT (p);
2883 operands[3] = GEN_INT (len);
2884 return "depdi 0,%2,%3,%0";
2888 return "and %1,%2,%0";
2892 output_ior (rtx *operands)
2894 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2895 int bs0, bs1, p, len;
2897 if (INTVAL (operands[2]) == 0)
2898 return "copy %1,%0";
2900 for (bs0 = 0; bs0 < 32; bs0++)
2901 if ((mask & (1 << bs0)) != 0)
2904 for (bs1 = bs0; bs1 < 32; bs1++)
2905 if ((mask & (1 << bs1)) == 0)
2908 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
2913 operands[2] = GEN_INT (p);
2914 operands[3] = GEN_INT (len);
2915 return "{depi|depwi} -1,%2,%3,%0";
2918 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2919 storing the result in operands[0]. */
2921 output_64bit_ior (rtx *operands)
2923 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2924 int bs0, bs1, p, len;
2926 if (INTVAL (operands[2]) == 0)
2927 return "copy %1,%0";
2929 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
2930 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
2933 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
2934 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
2937 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
2938 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
2943 operands[2] = GEN_INT (p);
2944 operands[3] = GEN_INT (len);
2945 return "depdi -1,%2,%3,%0";
2948 /* Target hook for assembling integer objects. This code handles
2949 aligned SI and DI integers specially since function references
2950 must be preceded by P%. */
2953 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
2955 if (size == UNITS_PER_WORD
2957 && function_label_operand (x, VOIDmode))
2959 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
2960 output_addr_const (asm_out_file, x);
2961 fputc ('\n', asm_out_file);
2964 return default_assemble_integer (x, size, aligned_p);
2967 /* Output an ascii string. */
2969 output_ascii (FILE *file, const char *p, int size)
2973 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
2975 /* The HP assembler can only take strings of 256 characters at one
2976 time. This is a limitation on input line length, *not* the
2977 length of the string. Sigh. Even worse, it seems that the
2978 restriction is in number of input characters (see \xnn &
2979 \whatever). So we have to do this very carefully. */
2981 fputs ("\t.STRING \"", file);
2984 for (i = 0; i < size; i += 4)
2988 for (io = 0, co = 0; io < MIN (4, size - i); io++)
2990 register unsigned int c = (unsigned char) p[i + io];
2992 if (c == '\"' || c == '\\')
2993 partial_output[co++] = '\\';
2994 if (c >= ' ' && c < 0177)
2995 partial_output[co++] = c;
2999 partial_output[co++] = '\\';
3000 partial_output[co++] = 'x';
3001 hexd = c / 16 - 0 + '0';
3003 hexd -= '9' - 'a' + 1;
3004 partial_output[co++] = hexd;
3005 hexd = c % 16 - 0 + '0';
3007 hexd -= '9' - 'a' + 1;
3008 partial_output[co++] = hexd;
3011 if (chars_output + co > 243)
3013 fputs ("\"\n\t.STRING \"", file);
3016 fwrite (partial_output, 1, (size_t) co, file);
3020 fputs ("\"\n", file);
3023 /* Try to rewrite floating point comparisons & branches to avoid
3024 useless add,tr insns.
3026 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3027 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3028 first attempt to remove useless add,tr insns. It is zero
3029 for the second pass as reorg sometimes leaves bogus REG_DEAD
3032 When CHECK_NOTES is zero we can only eliminate add,tr insns
3033 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3036 remove_useless_addtr_insns (int check_notes)
3039 static int pass = 0;
3041 /* This is fairly cheap, so always run it when optimizing. */
3045 int fbranch_count = 0;
3047 /* Walk all the insns in this function looking for fcmp & fbranch
3048 instructions. Keep track of how many of each we find. */
3049 for (insn = get_insns (); insn; insn = next_insn (insn))
3053 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3054 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
3057 tmp = PATTERN (insn);
3059 /* It must be a set. */
3060 if (GET_CODE (tmp) != SET)
3063 /* If the destination is CCFP, then we've found an fcmp insn. */
3064 tmp = SET_DEST (tmp);
3065 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3071 tmp = PATTERN (insn);
3072 /* If this is an fbranch instruction, bump the fbranch counter. */
3073 if (GET_CODE (tmp) == SET
3074 && SET_DEST (tmp) == pc_rtx
3075 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3076 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3077 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3078 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3086 /* Find all floating point compare + branch insns. If possible,
3087 reverse the comparison & the branch to avoid add,tr insns. */
3088 for (insn = get_insns (); insn; insn = next_insn (insn))
3092 /* Ignore anything that isn't an INSN. */
3093 if (GET_CODE (insn) != INSN)
3096 tmp = PATTERN (insn);
3098 /* It must be a set. */
3099 if (GET_CODE (tmp) != SET)
3102 /* The destination must be CCFP, which is register zero. */
3103 tmp = SET_DEST (tmp);
3104 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3107 /* INSN should be a set of CCFP.
3109 See if the result of this insn is used in a reversed FP
3110 conditional branch. If so, reverse our condition and
3111 the branch. Doing so avoids useless add,tr insns. */
3112 next = next_insn (insn);
3115 /* Jumps, calls and labels stop our search. */
3116 if (GET_CODE (next) == JUMP_INSN
3117 || GET_CODE (next) == CALL_INSN
3118 || GET_CODE (next) == CODE_LABEL)
3121 /* As does another fcmp insn. */
3122 if (GET_CODE (next) == INSN
3123 && GET_CODE (PATTERN (next)) == SET
3124 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3125 && REGNO (SET_DEST (PATTERN (next))) == 0)
3128 next = next_insn (next);
3131 /* Is NEXT_INSN a branch? */
3133 && GET_CODE (next) == JUMP_INSN)
3135 rtx pattern = PATTERN (next);
3137 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3138 and CCFP dies, then reverse our conditional and the branch
3139 to avoid the add,tr. */
3140 if (GET_CODE (pattern) == SET
3141 && SET_DEST (pattern) == pc_rtx
3142 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3143 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3144 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3145 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3146 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3147 && (fcmp_count == fbranch_count
3149 && find_regno_note (next, REG_DEAD, 0))))
3151 /* Reverse the branch. */
3152 tmp = XEXP (SET_SRC (pattern), 1);
3153 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3154 XEXP (SET_SRC (pattern), 2) = tmp;
3155 INSN_CODE (next) = -1;
3157 /* Reverse our condition. */
3158 tmp = PATTERN (insn);
3159 PUT_CODE (XEXP (tmp, 1),
3160 (reverse_condition_maybe_unordered
3161 (GET_CODE (XEXP (tmp, 1)))));
3171 /* You may have trouble believing this, but this is the 32 bit HP-PA
3176 Variable arguments (optional; any number may be allocated)
3178 SP-(4*(N+9)) arg word N
3183 Fixed arguments (must be allocated; may remain unused)
3192 SP-32 External Data Pointer (DP)
3194 SP-24 External/stub RP (RP')
3198 SP-8 Calling Stub RP (RP'')
3203 SP-0 Stack Pointer (points to next available address)
3207 /* This function saves registers as follows. Registers marked with ' are
3208 this function's registers (as opposed to the previous function's).
3209 If a frame_pointer isn't needed, r4 is saved as a general register;
3210 the space for the frame pointer is still allocated, though, to keep
3216 SP (FP') Previous FP
3217 SP + 4 Alignment filler (sigh)
3218 SP + 8 Space for locals reserved here.
3222 SP + n All call saved register used.
3226 SP + o All call saved fp registers used.
3230 SP + p (SP') points to next available address.
3234 /* Global variables set by output_function_prologue(). */
3235 /* Size of frame. Need to know this to emit return insns from
3237 static HOST_WIDE_INT actual_fsize, local_fsize;
3238 static int save_fregs;
3240 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3241 Handle case where DISP > 8k by using the add_high_const patterns.
3243 Note in DISP > 8k case, we will leave the high part of the address
3244 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3247 store_reg (int reg, HOST_WIDE_INT disp, int base)
3249 rtx insn, dest, src, basereg;
3251 src = gen_rtx_REG (word_mode, reg);
3252 basereg = gen_rtx_REG (Pmode, base);
3253 if (VAL_14_BITS_P (disp))
3255 dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3256 insn = emit_move_insn (dest, src);
3258 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3260 rtx delta = GEN_INT (disp);
3261 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3263 emit_move_insn (tmpreg, delta);
3264 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3265 dest = gen_rtx_MEM (word_mode, tmpreg);
3266 insn = emit_move_insn (dest, src);
3270 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3271 gen_rtx_SET (VOIDmode,
3272 gen_rtx_MEM (word_mode,
3273 gen_rtx_PLUS (word_mode, basereg,
3281 rtx delta = GEN_INT (disp);
3282 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3283 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3285 emit_move_insn (tmpreg, high);
3286 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3287 insn = emit_move_insn (dest, src);
3291 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3292 gen_rtx_SET (VOIDmode,
3293 gen_rtx_MEM (word_mode,
3294 gen_rtx_PLUS (word_mode, basereg,
3302 RTX_FRAME_RELATED_P (insn) = 1;
3305 /* Emit RTL to store REG at the memory location specified by BASE and then
3306 add MOD to BASE. MOD must be <= 8k. */
3309 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3311 rtx insn, basereg, srcreg, delta;
3313 gcc_assert (VAL_14_BITS_P (mod));
3315 basereg = gen_rtx_REG (Pmode, base);
3316 srcreg = gen_rtx_REG (word_mode, reg);
3317 delta = GEN_INT (mod);
3319 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3322 RTX_FRAME_RELATED_P (insn) = 1;
3324 /* RTX_FRAME_RELATED_P must be set on each frame related set
3325 in a parallel with more than one element. Don't set
3326 RTX_FRAME_RELATED_P in the first set if reg is temporary
3327 register 1. The effect of this operation is recorded in
3328 the initial copy. */
3331 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3332 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3336 /* The first element of a PARALLEL is always processed if it is
3337 a SET. Thus, we need an expression list for this case. */
3339 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3340 gen_rtx_SET (VOIDmode, basereg,
3341 gen_rtx_PLUS (word_mode, basereg, delta)),
3347 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3348 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3349 whether to add a frame note or not.
3351 In the DISP > 8k case, we leave the high part of the address in %r1.
3352 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3355 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3359 if (VAL_14_BITS_P (disp))
3361 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3362 plus_constant (gen_rtx_REG (Pmode, base), disp));
3364 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3366 rtx basereg = gen_rtx_REG (Pmode, base);
3367 rtx delta = GEN_INT (disp);
3368 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3370 emit_move_insn (tmpreg, delta);
3371 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3372 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3376 rtx basereg = gen_rtx_REG (Pmode, base);
3377 rtx delta = GEN_INT (disp);
3378 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3380 emit_move_insn (tmpreg,
3381 gen_rtx_PLUS (Pmode, basereg,
3382 gen_rtx_HIGH (Pmode, delta)));
3383 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3384 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3387 if (DO_FRAME_NOTES && note)
3388 RTX_FRAME_RELATED_P (insn) = 1;
3392 compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3397 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3398 be consistent with the rounding and size calculation done here.
3399 Change them at the same time. */
3401 /* We do our own stack alignment. First, round the size of the
3402 stack locals up to a word boundary. */
3403 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3405 /* Space for previous frame pointer + filler. If any frame is
3406 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3407 waste some space here for the sake of HP compatibility. The
3408 first slot is only used when the frame pointer is needed. */
3409 if (size || frame_pointer_needed)
3410 size += STARTING_FRAME_OFFSET;
3412 /* If the current function calls __builtin_eh_return, then we need
3413 to allocate stack space for registers that will hold data for
3414 the exception handler. */
3415 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3419 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3421 size += i * UNITS_PER_WORD;
3424 /* Account for space used by the callee general register saves. */
3425 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3426 if (regs_ever_live[i])
3427 size += UNITS_PER_WORD;
3429 /* Account for space used by the callee floating point register saves. */
3430 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3431 if (regs_ever_live[i]
3432 || (!TARGET_64BIT && regs_ever_live[i + 1]))
3436 /* We always save both halves of the FP register, so always
3437 increment the frame size by 8 bytes. */
3441 /* If any of the floating registers are saved, account for the
3442 alignment needed for the floating point register save block. */
3445 size = (size + 7) & ~7;
3450 /* The various ABIs include space for the outgoing parameters in the
3451 size of the current function's stack frame. We don't need to align
3452 for the outgoing arguments as their alignment is set by the final
3453 rounding for the frame as a whole. */
3454 size += current_function_outgoing_args_size;
3456 /* Allocate space for the fixed frame marker. This space must be
3457 allocated for any function that makes calls or allocates
3459 if (!current_function_is_leaf || size)
3460 size += TARGET_64BIT ? 48 : 32;
3462 /* Finally, round to the preferred stack boundary. */
3463 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3464 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3467 /* Generate the assembly code for function entry. FILE is a stdio
3468 stream to output the code to. SIZE is an int: how many units of
3469 temporary storage to allocate.
3471 Refer to the array `regs_ever_live' to determine which registers to
3472 save; `regs_ever_live[I]' is nonzero if register number I is ever
3473 used in the function. This function is responsible for knowing
3474 which registers should not be saved even if used. */
3476 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3477 of memory. If any fpu reg is used in the function, we allocate
3478 such a block here, at the bottom of the frame, just in case it's needed.
3480 If this function is a leaf procedure, then we may choose not
3481 to do a "save" insn. The decision about whether or not
3482 to do this is made in regclass.c. */
3485 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3487 /* The function's label and associated .PROC must never be
3488 separated and must be output *after* any profiling declarations
3489 to avoid changing spaces/subspaces within a procedure. */
3490 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3491 fputs ("\t.PROC\n", file);
3493 /* hppa_expand_prologue does the dirty work now. We just need
3494 to output the assembler directives which denote the start
3496 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3497 if (regs_ever_live[2])
3498 fputs (",CALLS,SAVE_RP", file);
3500 fputs (",NO_CALLS", file);
3502 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3503 at the beginning of the frame and that it is used as the frame
3504 pointer for the frame. We do this because our current frame
3505 layout doesn't conform to that specified in the the HP runtime
3506 documentation and we need a way to indicate to programs such as
3507 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3508 isn't used by HP compilers but is supported by the assembler.
3509 However, SAVE_SP is supposed to indicate that the previous stack
3510 pointer has been saved in the frame marker. */
3511 if (frame_pointer_needed)
3512 fputs (",SAVE_SP", file);
3514 /* Pass on information about the number of callee register saves
3515 performed in the prologue.
3517 The compiler is supposed to pass the highest register number
3518 saved, the assembler then has to adjust that number before
3519 entering it into the unwind descriptor (to account for any
3520 caller saved registers with lower register numbers than the
3521 first callee saved register). */
3523 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3526 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3528 fputs ("\n\t.ENTRY\n", file);
3530 remove_useless_addtr_insns (0);
3534 hppa_expand_prologue (void)
3536 int merge_sp_adjust_with_store = 0;
3537 HOST_WIDE_INT size = get_frame_size ();
3538 HOST_WIDE_INT offset;
3546 /* Compute total size for frame pointer, filler, locals and rounding to
3547 the next word boundary. Similar code appears in compute_frame_size
3548 and must be changed in tandem with this code. */
3549 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3550 if (local_fsize || frame_pointer_needed)
3551 local_fsize += STARTING_FRAME_OFFSET;
3553 actual_fsize = compute_frame_size (size, &save_fregs);
3555 /* Compute a few things we will use often. */
3556 tmpreg = gen_rtx_REG (word_mode, 1);
3558 /* Save RP first. The calling conventions manual states RP will
3559 always be stored into the caller's frame at sp - 20 or sp - 16
3560 depending on which ABI is in use. */
3561 if (regs_ever_live[2] || current_function_calls_eh_return)
3562 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3564 /* Allocate the local frame and set up the frame pointer if needed. */
3565 if (actual_fsize != 0)
3567 if (frame_pointer_needed)
3569 /* Copy the old frame pointer temporarily into %r1. Set up the
3570 new stack pointer, then store away the saved old frame pointer
3571 into the stack at sp and at the same time update the stack
3572 pointer by actual_fsize bytes. Two versions, first
3573 handles small (<8k) frames. The second handles large (>=8k)
3575 insn = emit_move_insn (tmpreg, frame_pointer_rtx);
3578 /* We need to record the frame pointer save here since the
3579 new frame pointer is set in the following insn. */
3580 RTX_FRAME_RELATED_P (insn) = 1;
3582 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3583 gen_rtx_SET (VOIDmode,
3584 gen_rtx_MEM (word_mode, stack_pointer_rtx),
3589 insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
3591 RTX_FRAME_RELATED_P (insn) = 1;
3593 if (VAL_14_BITS_P (actual_fsize))
3594 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3597 /* It is incorrect to store the saved frame pointer at *sp,
3598 then increment sp (writes beyond the current stack boundary).
3600 So instead use stwm to store at *sp and post-increment the
3601 stack pointer as an atomic operation. Then increment sp to
3602 finish allocating the new frame. */
3603 HOST_WIDE_INT adjust1 = 8192 - 64;
3604 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3606 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3607 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3611 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3612 we need to store the previous stack pointer (frame pointer)
3613 into the frame marker on targets that use the HP unwind
3614 library. This allows the HP unwind library to be used to
3615 unwind GCC frames. However, we are not fully compatible
3616 with the HP library because our frame layout differs from
3617 that specified in the HP runtime specification.
3619 We don't want a frame note on this instruction as the frame
3620 marker moves during dynamic stack allocation.
3622 This instruction also serves as a blockage to prevent
3623 register spills from being scheduled before the stack
3624 pointer is raised. This is necessary as we store
3625 registers using the frame pointer as a base register,
3626 and the frame pointer is set before sp is raised. */
3627 if (TARGET_HPUX_UNWIND_LIBRARY)
3629 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3630 GEN_INT (TARGET_64BIT ? -8 : -4));
3632 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3636 emit_insn (gen_blockage ());
3638 /* no frame pointer needed. */
3641 /* In some cases we can perform the first callee register save
3642 and allocating the stack frame at the same time. If so, just
3643 make a note of it and defer allocating the frame until saving
3644 the callee registers. */
3645 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3646 merge_sp_adjust_with_store = 1;
3647 /* Can not optimize. Adjust the stack frame by actual_fsize
3650 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3655 /* Normal register save.
3657 Do not save the frame pointer in the frame_pointer_needed case. It
3658 was done earlier. */
3659 if (frame_pointer_needed)
3661 offset = local_fsize;
3663 /* Saving the EH return data registers in the frame is the simplest
3664 way to get the frame unwind information emitted. We put them
3665 just before the general registers. */
3666 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3668 unsigned int i, regno;
3672 regno = EH_RETURN_DATA_REGNO (i);
3673 if (regno == INVALID_REGNUM)
3676 store_reg (regno, offset, FRAME_POINTER_REGNUM);
3677 offset += UNITS_PER_WORD;
3681 for (i = 18; i >= 4; i--)
3682 if (regs_ever_live[i] && ! call_used_regs[i])
3684 store_reg (i, offset, FRAME_POINTER_REGNUM);
3685 offset += UNITS_PER_WORD;
3688 /* Account for %r3 which is saved in a special place. */
3691 /* No frame pointer needed. */
3694 offset = local_fsize - actual_fsize;
3696 /* Saving the EH return data registers in the frame is the simplest
3697 way to get the frame unwind information emitted. */
3698 if (DO_FRAME_NOTES && current_function_calls_eh_return)
3700 unsigned int i, regno;
3704 regno = EH_RETURN_DATA_REGNO (i);
3705 if (regno == INVALID_REGNUM)
3708 /* If merge_sp_adjust_with_store is nonzero, then we can
3709 optimize the first save. */
3710 if (merge_sp_adjust_with_store)
3712 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3713 merge_sp_adjust_with_store = 0;
3716 store_reg (regno, offset, STACK_POINTER_REGNUM);
3717 offset += UNITS_PER_WORD;
3721 for (i = 18; i >= 3; i--)
3722 if (regs_ever_live[i] && ! call_used_regs[i])
3724 /* If merge_sp_adjust_with_store is nonzero, then we can
3725 optimize the first GR save. */
3726 if (merge_sp_adjust_with_store)
3728 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3729 merge_sp_adjust_with_store = 0;
3732 store_reg (i, offset, STACK_POINTER_REGNUM);
3733 offset += UNITS_PER_WORD;
3737 /* If we wanted to merge the SP adjustment with a GR save, but we never
3738 did any GR saves, then just emit the adjustment here. */
3739 if (merge_sp_adjust_with_store)
3740 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3744 /* The hppa calling conventions say that %r19, the pic offset
3745 register, is saved at sp - 32 (in this function's frame)
3746 when generating PIC code. FIXME: What is the correct thing
3747 to do for functions which make no calls and allocate no
3748 frame? Do we need to allocate a frame, or can we just omit
3749 the save? For now we'll just omit the save.
3751 We don't want a note on this insn as the frame marker can
3752 move if there is a dynamic stack allocation. */
3753 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
3755 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
3757 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
3761 /* Align pointer properly (doubleword boundary). */
3762 offset = (offset + 7) & ~7;
3764 /* Floating point register store. */
3769 /* First get the frame or stack pointer to the start of the FP register
3771 if (frame_pointer_needed)
3773 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
3774 base = frame_pointer_rtx;
3778 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
3779 base = stack_pointer_rtx;
3782 /* Now actually save the FP registers. */
3783 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3785 if (regs_ever_live[i]
3786 || (! TARGET_64BIT && regs_ever_live[i + 1]))
3788 rtx addr, insn, reg;
3789 addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
3790 reg = gen_rtx_REG (DFmode, i);
3791 insn = emit_move_insn (addr, reg);
3794 RTX_FRAME_RELATED_P (insn) = 1;
3797 rtx mem = gen_rtx_MEM (DFmode,
3798 plus_constant (base, offset));
3800 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3801 gen_rtx_SET (VOIDmode, mem, reg),
3806 rtx meml = gen_rtx_MEM (SFmode,
3807 plus_constant (base, offset));
3808 rtx memr = gen_rtx_MEM (SFmode,
3809 plus_constant (base, offset + 4));
3810 rtx regl = gen_rtx_REG (SFmode, i);
3811 rtx regr = gen_rtx_REG (SFmode, i + 1);
3812 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
3813 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
3816 RTX_FRAME_RELATED_P (setl) = 1;
3817 RTX_FRAME_RELATED_P (setr) = 1;
3818 vec = gen_rtvec (2, setl, setr);
3820 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
3821 gen_rtx_SEQUENCE (VOIDmode, vec),
3825 offset += GET_MODE_SIZE (DFmode);
3832 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
3833 Handle case where DISP > 8k by using the add_high_const patterns. */
3836 load_reg (int reg, HOST_WIDE_INT disp, int base)
3838 rtx dest = gen_rtx_REG (word_mode, reg);
3839 rtx basereg = gen_rtx_REG (Pmode, base);
3842 if (VAL_14_BITS_P (disp))
3843 src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3844 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3846 rtx delta = GEN_INT (disp);
3847 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3849 emit_move_insn (tmpreg, delta);
3850 if (TARGET_DISABLE_INDEXING)
3852 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3853 src = gen_rtx_MEM (word_mode, tmpreg);
3856 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3860 rtx delta = GEN_INT (disp);
3861 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3862 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3864 emit_move_insn (tmpreg, high);
3865 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3868 emit_move_insn (dest, src);
3871 /* Update the total code bytes output to the text section. */
3874 update_total_code_bytes (int nbytes)
3876 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
3877 && !IN_NAMED_SECTION_P (cfun->decl))
3879 if (INSN_ADDRESSES_SET_P ())
3881 unsigned long old_total = total_code_bytes;
3883 total_code_bytes += nbytes;
3885 /* Be prepared to handle overflows. */
3886 if (old_total > total_code_bytes)
3887 total_code_bytes = -1;
3890 total_code_bytes = -1;
3894 /* This function generates the assembly code for function exit.
3895 Args are as for output_function_prologue ().
3897 The function epilogue should not depend on the current stack
3898 pointer! It should use the frame pointer only. This is mandatory
3899 because of alloca; we also take advantage of it to omit stack
3900 adjustments before returning. */
3903 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3905 rtx insn = get_last_insn ();
3909 /* hppa_expand_epilogue does the dirty work now. We just need
3910 to output the assembler directives which denote the end
3913 To make debuggers happy, emit a nop if the epilogue was completely
3914 eliminated due to a volatile call as the last insn in the
3915 current function. That way the return address (in %r2) will
3916 always point to a valid instruction in the current function. */
3918 /* Get the last real insn. */
3919 if (GET_CODE (insn) == NOTE)
3920 insn = prev_real_insn (insn);
3922 /* If it is a sequence, then look inside. */
3923 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
3924 insn = XVECEXP (PATTERN (insn), 0, 0);
3926 /* If insn is a CALL_INSN, then it must be a call to a volatile
3927 function (otherwise there would be epilogue insns). */
3928 if (insn && GET_CODE (insn) == CALL_INSN)
3930 fputs ("\tnop\n", file);
3934 fputs ("\t.EXIT\n\t.PROCEND\n", file);
3936 if (TARGET_SOM && TARGET_GAS)
3938 /* We done with this subspace except possibly for some additional
3939 debug information. Forget that we are in this subspace to ensure
3940 that the next function is output in its own subspace. */
3944 if (INSN_ADDRESSES_SET_P ())
3946 insn = get_last_nonnote_insn ();
3947 last_address += INSN_ADDRESSES (INSN_UID (insn));
3949 last_address += insn_default_length (insn);
3950 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
3951 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
3954 /* Finally, update the total number of code bytes output so far. */
3955 update_total_code_bytes (last_address);
3959 hppa_expand_epilogue (void)
3962 HOST_WIDE_INT offset;
3963 HOST_WIDE_INT ret_off = 0;
3965 int merge_sp_adjust_with_load = 0;
3967 /* We will use this often. */
3968 tmpreg = gen_rtx_REG (word_mode, 1);
3970 /* Try to restore RP early to avoid load/use interlocks when
3971 RP gets used in the return (bv) instruction. This appears to still
3972 be necessary even when we schedule the prologue and epilogue. */
3973 if (regs_ever_live [2] || current_function_calls_eh_return)
3975 ret_off = TARGET_64BIT ? -16 : -20;
3976 if (frame_pointer_needed)
3978 load_reg (2, ret_off, FRAME_POINTER_REGNUM);
3983 /* No frame pointer, and stack is smaller than 8k. */
3984 if (VAL_14_BITS_P (ret_off - actual_fsize))
3986 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
3992 /* General register restores. */
3993 if (frame_pointer_needed)
3995 offset = local_fsize;
3997 /* If the current function calls __builtin_eh_return, then we need
3998 to restore the saved EH data registers. */
3999 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4001 unsigned int i, regno;
4005 regno = EH_RETURN_DATA_REGNO (i);
4006 if (regno == INVALID_REGNUM)
4009 load_reg (regno, offset, FRAME_POINTER_REGNUM);
4010 offset += UNITS_PER_WORD;
4014 for (i = 18; i >= 4; i--)
4015 if (regs_ever_live[i] && ! call_used_regs[i])
4017 load_reg (i, offset, FRAME_POINTER_REGNUM);
4018 offset += UNITS_PER_WORD;
4023 offset = local_fsize - actual_fsize;
4025 /* If the current function calls __builtin_eh_return, then we need
4026 to restore the saved EH data registers. */
4027 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4029 unsigned int i, regno;
4033 regno = EH_RETURN_DATA_REGNO (i);
4034 if (regno == INVALID_REGNUM)
4037 /* Only for the first load.
4038 merge_sp_adjust_with_load holds the register load
4039 with which we will merge the sp adjustment. */
4040 if (merge_sp_adjust_with_load == 0
4042 && VAL_14_BITS_P (-actual_fsize))
4043 merge_sp_adjust_with_load = regno;
4045 load_reg (regno, offset, STACK_POINTER_REGNUM);
4046 offset += UNITS_PER_WORD;
4050 for (i = 18; i >= 3; i--)
4052 if (regs_ever_live[i] && ! call_used_regs[i])
4054 /* Only for the first load.
4055 merge_sp_adjust_with_load holds the register load
4056 with which we will merge the sp adjustment. */
4057 if (merge_sp_adjust_with_load == 0
4059 && VAL_14_BITS_P (-actual_fsize))
4060 merge_sp_adjust_with_load = i;
4062 load_reg (i, offset, STACK_POINTER_REGNUM);
4063 offset += UNITS_PER_WORD;
4068 /* Align pointer properly (doubleword boundary). */
4069 offset = (offset + 7) & ~7;
4071 /* FP register restores. */
4074 /* Adjust the register to index off of. */
4075 if (frame_pointer_needed)
4076 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
4078 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4080 /* Actually do the restores now. */
4081 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4082 if (regs_ever_live[i]
4083 || (! TARGET_64BIT && regs_ever_live[i + 1]))
4085 rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4086 rtx dest = gen_rtx_REG (DFmode, i);
4087 emit_move_insn (dest, src);
4091 /* Emit a blockage insn here to keep these insns from being moved to
4092 an earlier spot in the epilogue, or into the main instruction stream.
4094 This is necessary as we must not cut the stack back before all the
4095 restores are finished. */
4096 emit_insn (gen_blockage ());
4098 /* Reset stack pointer (and possibly frame pointer). The stack
4099 pointer is initially set to fp + 64 to avoid a race condition. */
4100 if (frame_pointer_needed)
4102 rtx delta = GEN_INT (-64);
4104 set_reg_plus_d (STACK_POINTER_REGNUM, FRAME_POINTER_REGNUM, 64, 0);
4105 emit_insn (gen_pre_load (frame_pointer_rtx, stack_pointer_rtx, delta));
4107 /* If we were deferring a callee register restore, do it now. */
4108 else if (merge_sp_adjust_with_load)
4110 rtx delta = GEN_INT (-actual_fsize);
4111 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4113 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4115 else if (actual_fsize != 0)
4116 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4119 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4120 frame greater than 8k), do so now. */
4122 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4124 if (DO_FRAME_NOTES && current_function_calls_eh_return)
4126 rtx sa = EH_RETURN_STACKADJ_RTX;
4128 emit_insn (gen_blockage ());
4129 emit_insn (TARGET_64BIT
4130 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4131 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4136 hppa_pic_save_rtx (void)
4138 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4142 hppa_profile_hook (int label_no)
4144 /* We use SImode for the address of the function in both 32 and
4145 64-bit code to avoid having to provide DImode versions of the
4146 lcla2 and load_offset_label_address insn patterns. */
4147 rtx reg = gen_reg_rtx (SImode);
4148 rtx label_rtx = gen_label_rtx ();
4149 rtx begin_label_rtx, call_insn;
4150 char begin_label_name[16];
4152 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4154 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4157 emit_move_insn (arg_pointer_rtx,
4158 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4161 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4163 /* The address of the function is loaded into %r25 with a instruction-
4164 relative sequence that avoids the use of relocations. The sequence
4165 is split so that the load_offset_label_address instruction can
4166 occupy the delay slot of the call to _mcount. */
4168 emit_insn (gen_lcla2 (reg, label_rtx));
4170 emit_insn (gen_lcla1 (reg, label_rtx));
4172 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4173 reg, begin_label_rtx, label_rtx));
4175 #ifndef NO_PROFILE_COUNTERS
4177 rtx count_label_rtx, addr, r24;
4178 char count_label_name[16];
4180 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4181 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4183 addr = force_reg (Pmode, count_label_rtx);
4184 r24 = gen_rtx_REG (Pmode, 24);
4185 emit_move_insn (r24, addr);
4188 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4189 gen_rtx_SYMBOL_REF (Pmode,
4191 GEN_INT (TARGET_64BIT ? 24 : 12)));
4193 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4198 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4199 gen_rtx_SYMBOL_REF (Pmode,
4201 GEN_INT (TARGET_64BIT ? 16 : 8)));
4205 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4206 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4208 /* Indicate the _mcount call cannot throw, nor will it execute a
4210 REG_NOTES (call_insn)
4211 = gen_rtx_EXPR_LIST (REG_EH_REGION, constm1_rtx, REG_NOTES (call_insn));
4214 /* Fetch the return address for the frame COUNT steps up from
4215 the current frame, after the prologue. FRAMEADDR is the
4216 frame pointer of the COUNT frame.
4218 We want to ignore any export stub remnants here. To handle this,
4219 we examine the code at the return address, and if it is an export
4220 stub, we return a memory rtx for the stub return address stored
4223 The value returned is used in two different ways:
4225 1. To find a function's caller.
4227 2. To change the return address for a function.
4229 This function handles most instances of case 1; however, it will
4230 fail if there are two levels of stubs to execute on the return
4231 path. The only way I believe that can happen is if the return value
4232 needs a parameter relocation, which never happens for C code.
4234 This function handles most instances of case 2; however, it will
4235 fail if we did not originally have stub code on the return path
4236 but will need stub code on the new return path. This can happen if
4237 the caller & callee are both in the main program, but the new
4238 return location is in a shared library. */
4241 return_addr_rtx (int count, rtx frameaddr)
4251 rp = get_hard_reg_initial_val (Pmode, 2);
4253 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4256 saved_rp = gen_reg_rtx (Pmode);
4257 emit_move_insn (saved_rp, rp);
4259 /* Get pointer to the instruction stream. We have to mask out the
4260 privilege level from the two low order bits of the return address
4261 pointer here so that ins will point to the start of the first
4262 instruction that would have been executed if we returned. */
4263 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4264 label = gen_label_rtx ();
4266 /* Check the instruction stream at the normal return address for the
4269 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4270 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4271 0x00011820 | stub+16: mtsp r1,sr0
4272 0xe0400002 | stub+20: be,n 0(sr0,rp)
4274 If it is an export stub, than our return address is really in
4277 emit_cmp_insn (gen_rtx_MEM (SImode, ins), GEN_INT (0x4bc23fd1), NE,
4278 NULL_RTX, SImode, 1);
4279 emit_jump_insn (gen_bne (label));
4281 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 4)),
4282 GEN_INT (0x004010a1), NE, NULL_RTX, SImode, 1);
4283 emit_jump_insn (gen_bne (label));
4285 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 8)),
4286 GEN_INT (0x00011820), NE, NULL_RTX, SImode, 1);
4287 emit_jump_insn (gen_bne (label));
4289 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 12)),
4290 GEN_INT (0xe0400002), NE, NULL_RTX, SImode, 1);
4292 /* If there is no export stub then just use the value saved from
4293 the return pointer register. */
4295 emit_jump_insn (gen_bne (label));
4297 /* Here we know that our return address points to an export
4298 stub. We don't want to return the address of the export stub,
4299 but rather the return address of the export stub. That return
4300 address is stored at -24[frameaddr]. */
4302 emit_move_insn (saved_rp,
4304 memory_address (Pmode,
4305 plus_constant (frameaddr,
4312 /* This is only valid once reload has completed because it depends on
4313 knowing exactly how much (if any) frame there is and...
4315 It's only valid if there is no frame marker to de-allocate and...
4317 It's only valid if %r2 hasn't been saved into the caller's frame
4318 (we're not profiling and %r2 isn't live anywhere). */
4320 hppa_can_use_return_insn_p (void)
4322 return (reload_completed
4323 && (compute_frame_size (get_frame_size (), 0) ? 0 : 1)
4324 && ! regs_ever_live[2]
4325 && ! frame_pointer_needed);
4329 emit_bcond_fp (enum rtx_code code, rtx operand0)
4331 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4332 gen_rtx_IF_THEN_ELSE (VOIDmode,
4333 gen_rtx_fmt_ee (code,
4335 gen_rtx_REG (CCFPmode, 0),
4337 gen_rtx_LABEL_REF (VOIDmode, operand0),
4343 gen_cmp_fp (enum rtx_code code, rtx operand0, rtx operand1)
4345 return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4346 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1));
4349 /* Adjust the cost of a scheduling dependency. Return the new cost of
4350 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4353 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4355 enum attr_type attr_type;
4357 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4358 true dependencies as they are described with bypasses now. */
4359 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4362 if (! recog_memoized (insn))
4365 attr_type = get_attr_type (insn);
4367 switch (REG_NOTE_KIND (link))
4370 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4373 if (attr_type == TYPE_FPLOAD)
4375 rtx pat = PATTERN (insn);
4376 rtx dep_pat = PATTERN (dep_insn);
4377 if (GET_CODE (pat) == PARALLEL)
4379 /* This happens for the fldXs,mb patterns. */
4380 pat = XVECEXP (pat, 0, 0);
4382 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4383 /* If this happens, we have to extend this to schedule
4384 optimally. Return 0 for now. */
4387 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4389 if (! recog_memoized (dep_insn))
4391 switch (get_attr_type (dep_insn))
4398 case TYPE_FPSQRTSGL:
4399 case TYPE_FPSQRTDBL:
4400 /* A fpload can't be issued until one cycle before a
4401 preceding arithmetic operation has finished if
4402 the target of the fpload is any of the sources
4403 (or destination) of the arithmetic operation. */
4404 return insn_default_latency (dep_insn) - 1;
4411 else if (attr_type == TYPE_FPALU)
4413 rtx pat = PATTERN (insn);
4414 rtx dep_pat = PATTERN (dep_insn);
4415 if (GET_CODE (pat) == PARALLEL)
4417 /* This happens for the fldXs,mb patterns. */
4418 pat = XVECEXP (pat, 0, 0);
4420 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4421 /* If this happens, we have to extend this to schedule
4422 optimally. Return 0 for now. */
4425 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4427 if (! recog_memoized (dep_insn))
4429 switch (get_attr_type (dep_insn))
4433 case TYPE_FPSQRTSGL:
4434 case TYPE_FPSQRTDBL:
4435 /* An ALU flop can't be issued until two cycles before a
4436 preceding divide or sqrt operation has finished if
4437 the target of the ALU flop is any of the sources
4438 (or destination) of the divide or sqrt operation. */
4439 return insn_default_latency (dep_insn) - 2;
4447 /* For other anti dependencies, the cost is 0. */
4450 case REG_DEP_OUTPUT:
4451 /* Output dependency; DEP_INSN writes a register that INSN writes some
4453 if (attr_type == TYPE_FPLOAD)
4455 rtx pat = PATTERN (insn);
4456 rtx dep_pat = PATTERN (dep_insn);
4457 if (GET_CODE (pat) == PARALLEL)
4459 /* This happens for the fldXs,mb patterns. */
4460 pat = XVECEXP (pat, 0, 0);
4462 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4463 /* If this happens, we have to extend this to schedule
4464 optimally. Return 0 for now. */
4467 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4469 if (! recog_memoized (dep_insn))
4471 switch (get_attr_type (dep_insn))
4478 case TYPE_FPSQRTSGL:
4479 case TYPE_FPSQRTDBL:
4480 /* A fpload can't be issued until one cycle before a
4481 preceding arithmetic operation has finished if
4482 the target of the fpload is the destination of the
4483 arithmetic operation.
4485 Exception: For PA7100LC, PA7200 and PA7300, the cost
4486 is 3 cycles, unless they bundle together. We also
4487 pay the penalty if the second insn is a fpload. */
4488 return insn_default_latency (dep_insn) - 1;
4495 else if (attr_type == TYPE_FPALU)
4497 rtx pat = PATTERN (insn);
4498 rtx dep_pat = PATTERN (dep_insn);
4499 if (GET_CODE (pat) == PARALLEL)
4501 /* This happens for the fldXs,mb patterns. */
4502 pat = XVECEXP (pat, 0, 0);
4504 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4505 /* If this happens, we have to extend this to schedule
4506 optimally. Return 0 for now. */
4509 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4511 if (! recog_memoized (dep_insn))
4513 switch (get_attr_type (dep_insn))
4517 case TYPE_FPSQRTSGL:
4518 case TYPE_FPSQRTDBL:
4519 /* An ALU flop can't be issued until two cycles before a
4520 preceding divide or sqrt operation has finished if
4521 the target of the ALU flop is also the target of
4522 the divide or sqrt operation. */
4523 return insn_default_latency (dep_insn) - 2;
4531 /* For other output dependencies, the cost is 0. */
4539 /* Adjust scheduling priorities. We use this to try and keep addil
4540 and the next use of %r1 close together. */
4542 pa_adjust_priority (rtx insn, int priority)
4544 rtx set = single_set (insn);
4548 src = SET_SRC (set);
4549 dest = SET_DEST (set);
4550 if (GET_CODE (src) == LO_SUM
4551 && symbolic_operand (XEXP (src, 1), VOIDmode)
4552 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4555 else if (GET_CODE (src) == MEM
4556 && GET_CODE (XEXP (src, 0)) == LO_SUM
4557 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4558 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4561 else if (GET_CODE (dest) == MEM
4562 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4563 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4564 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4570 /* The 700 can only issue a single insn at a time.
4571 The 7XXX processors can issue two insns at a time.
4572 The 8000 can issue 4 insns at a time. */
4574 pa_issue_rate (void)
4578 case PROCESSOR_700: return 1;
4579 case PROCESSOR_7100: return 2;
4580 case PROCESSOR_7100LC: return 2;
4581 case PROCESSOR_7200: return 2;
4582 case PROCESSOR_7300: return 2;
4583 case PROCESSOR_8000: return 4;
4592 /* Return any length adjustment needed by INSN which already has its length
4593 computed as LENGTH. Return zero if no adjustment is necessary.
4595 For the PA: function calls, millicode calls, and backwards short
4596 conditional branches with unfilled delay slots need an adjustment by +1
4597 (to account for the NOP which will be inserted into the instruction stream).
4599 Also compute the length of an inline block move here as it is too
4600 complicated to express as a length attribute in pa.md. */
4602 pa_adjust_insn_length (rtx insn, int length)
4604 rtx pat = PATTERN (insn);
4606 /* Jumps inside switch tables which have unfilled delay slots need
4608 if (GET_CODE (insn) == JUMP_INSN
4609 && GET_CODE (pat) == PARALLEL
4610 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4612 /* Millicode insn with an unfilled delay slot. */
4613 else if (GET_CODE (insn) == INSN
4614 && GET_CODE (pat) != SEQUENCE
4615 && GET_CODE (pat) != USE
4616 && GET_CODE (pat) != CLOBBER
4617 && get_attr_type (insn) == TYPE_MILLI)
4619 /* Block move pattern. */
4620 else if (GET_CODE (insn) == INSN
4621 && GET_CODE (pat) == PARALLEL
4622 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4623 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4624 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4625 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4626 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4627 return compute_movmem_length (insn) - 4;
4628 /* Block clear pattern. */
4629 else if (GET_CODE (insn) == INSN
4630 && GET_CODE (pat) == PARALLEL
4631 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4632 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4633 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4634 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4635 return compute_clrmem_length (insn) - 4;
4636 /* Conditional branch with an unfilled delay slot. */
4637 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4639 /* Adjust a short backwards conditional with an unfilled delay slot. */
4640 if (GET_CODE (pat) == SET
4642 && ! forward_branch_p (insn))
4644 else if (GET_CODE (pat) == PARALLEL
4645 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4648 /* Adjust dbra insn with short backwards conditional branch with
4649 unfilled delay slot -- only for case where counter is in a
4650 general register register. */
4651 else if (GET_CODE (pat) == PARALLEL
4652 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4653 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4654 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4656 && ! forward_branch_p (insn))
4664 /* Print operand X (an rtx) in assembler syntax to file FILE.
4665 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4666 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4669 print_operand (FILE *file, rtx x, int code)
4674 /* Output a 'nop' if there's nothing for the delay slot. */
4675 if (dbr_sequence_length () == 0)
4676 fputs ("\n\tnop", file);
4679 /* Output a nullification completer if there's nothing for the */
4680 /* delay slot or nullification is requested. */
4681 if (dbr_sequence_length () == 0 ||
4683 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
4687 /* Print out the second register name of a register pair.
4688 I.e., R (6) => 7. */
4689 fputs (reg_names[REGNO (x) + 1], file);
4692 /* A register or zero. */
4694 || (x == CONST0_RTX (DFmode))
4695 || (x == CONST0_RTX (SFmode)))
4697 fputs ("%r0", file);
4703 /* A register or zero (floating point). */
4705 || (x == CONST0_RTX (DFmode))
4706 || (x == CONST0_RTX (SFmode)))
4708 fputs ("%fr0", file);
4717 xoperands[0] = XEXP (XEXP (x, 0), 0);
4718 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
4719 output_global_address (file, xoperands[1], 0);
4720 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
4724 case 'C': /* Plain (C)ondition */
4726 switch (GET_CODE (x))
4729 fputs ("=", file); break;
4731 fputs ("<>", file); break;
4733 fputs (">", file); break;
4735 fputs (">=", file); break;
4737 fputs (">>=", file); break;
4739 fputs (">>", file); break;
4741 fputs ("<", file); break;
4743 fputs ("<=", file); break;
4745 fputs ("<<=", file); break;
4747 fputs ("<<", file); break;
4752 case 'N': /* Condition, (N)egated */
4753 switch (GET_CODE (x))
4756 fputs ("<>", file); break;
4758 fputs ("=", file); break;
4760 fputs ("<=", file); break;
4762 fputs ("<", file); break;
4764 fputs ("<<", file); break;
4766 fputs ("<<=", file); break;
4768 fputs (">=", file); break;
4770 fputs (">", file); break;
4772 fputs (">>", file); break;
4774 fputs (">>=", file); break;
4779 /* For floating point comparisons. Note that the output
4780 predicates are the complement of the desired mode. The
4781 conditions for GT, GE, LT, LE and LTGT cause an invalid
4782 operation exception if the result is unordered and this
4783 exception is enabled in the floating-point status register. */
4785 switch (GET_CODE (x))
4788 fputs ("!=", file); break;
4790 fputs ("=", file); break;
4792 fputs ("!>", file); break;
4794 fputs ("!>=", file); break;
4796 fputs ("!<", file); break;
4798 fputs ("!<=", file); break;
4800 fputs ("!<>", file); break;
4802 fputs ("!?<=", file); break;
4804 fputs ("!?<", file); break;
4806 fputs ("!?>=", file); break;
4808 fputs ("!?>", file); break;
4810 fputs ("!?=", file); break;
4812 fputs ("!?", file); break;
4814 fputs ("?", file); break;
4819 case 'S': /* Condition, operands are (S)wapped. */
4820 switch (GET_CODE (x))
4823 fputs ("=", file); break;
4825 fputs ("<>", file); break;
4827 fputs ("<", file); break;
4829 fputs ("<=", file); break;
4831 fputs ("<<=", file); break;
4833 fputs ("<<", file); break;
4835 fputs (">", file); break;
4837 fputs (">=", file); break;
4839 fputs (">>=", file); break;
4841 fputs (">>", file); break;
4846 case 'B': /* Condition, (B)oth swapped and negate. */
4847 switch (GET_CODE (x))
4850 fputs ("<>", file); break;
4852 fputs ("=", file); break;
4854 fputs (">=", file); break;
4856 fputs (">", file); break;
4858 fputs (">>", file); break;
4860 fputs (">>=", file); break;
4862 fputs ("<=", file); break;
4864 fputs ("<", file); break;
4866 fputs ("<<", file); break;
4868 fputs ("<<=", file); break;
4874 gcc_assert (GET_CODE (x) == CONST_INT);
4875 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
4878 gcc_assert (GET_CODE (x) == CONST_INT);
4879 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
4882 gcc_assert (GET_CODE (x) == CONST_INT);
4883 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
4886 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
4887 fprintf (file, "%d", exact_log2 (INTVAL (x)));
4890 gcc_assert (GET_CODE (x) == CONST_INT);
4891 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
4894 gcc_assert (GET_CODE (x) == CONST_INT);
4895 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
4898 if (GET_CODE (x) == CONST_INT)
4903 switch (GET_CODE (XEXP (x, 0)))
4907 if (ASSEMBLER_DIALECT == 0)
4908 fputs ("s,mb", file);
4910 fputs (",mb", file);
4914 if (ASSEMBLER_DIALECT == 0)
4915 fputs ("s,ma", file);
4917 fputs (",ma", file);
4920 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
4921 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
4923 if (ASSEMBLER_DIALECT == 0)
4926 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4927 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
4929 if (ASSEMBLER_DIALECT == 0)
4930 fputs ("x,s", file);
4934 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
4938 if (code == 'F' && ASSEMBLER_DIALECT == 0)
4944 output_global_address (file, x, 0);
4947 output_global_address (file, x, 1);
4949 case 0: /* Don't do anything special */
4954 compute_zdepwi_operands (INTVAL (x), op);
4955 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
4961 compute_zdepdi_operands (INTVAL (x), op);
4962 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
4966 /* We can get here from a .vtable_inherit due to our
4967 CONSTANT_ADDRESS_P rejecting perfectly good constant
4973 if (GET_CODE (x) == REG)
4975 fputs (reg_names [REGNO (x)], file);
4976 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
4982 && GET_MODE_SIZE (GET_MODE (x)) <= 4
4983 && (REGNO (x) & 1) == 0)
4986 else if (GET_CODE (x) == MEM)
4988 int size = GET_MODE_SIZE (GET_MODE (x));
4989 rtx base = NULL_RTX;
4990 switch (GET_CODE (XEXP (x, 0)))
4994 base = XEXP (XEXP (x, 0), 0);
4995 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
4999 base = XEXP (XEXP (x, 0), 0);
5000 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5003 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5004 fprintf (file, "%s(%s)",
5005 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5006 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5007 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5008 fprintf (file, "%s(%s)",
5009 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5010 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5011 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5012 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5014 /* Because the REG_POINTER flag can get lost during reload,
5015 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5016 index and base registers in the combined move patterns. */
5017 rtx base = XEXP (XEXP (x, 0), 1);
5018 rtx index = XEXP (XEXP (x, 0), 0);
5020 fprintf (file, "%s(%s)",
5021 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5024 output_address (XEXP (x, 0));
5027 output_address (XEXP (x, 0));
5032 output_addr_const (file, x);
5035 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5038 output_global_address (FILE *file, rtx x, int round_constant)
5041 /* Imagine (high (const (plus ...))). */
5042 if (GET_CODE (x) == HIGH)
5045 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5046 output_addr_const (file, x);
5047 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5049 output_addr_const (file, x);
5050 fputs ("-$global$", file);
5052 else if (GET_CODE (x) == CONST)
5054 const char *sep = "";
5055 int offset = 0; /* assembler wants -$global$ at end */
5056 rtx base = NULL_RTX;
5058 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5061 base = XEXP (XEXP (x, 0), 0);
5062 output_addr_const (file, base);
5065 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5071 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5074 base = XEXP (XEXP (x, 0), 1);
5075 output_addr_const (file, base);
5078 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5084 /* How bogus. The compiler is apparently responsible for
5085 rounding the constant if it uses an LR field selector.
5087 The linker and/or assembler seem a better place since
5088 they have to do this kind of thing already.
5090 If we fail to do this, HP's optimizing linker may eliminate
5091 an addil, but not update the ldw/stw/ldo instruction that
5092 uses the result of the addil. */
5094 offset = ((offset + 0x1000) & ~0x1fff);
5096 switch (GET_CODE (XEXP (x, 0)))
5109 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5117 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5118 fputs ("-$global$", file);
5120 fprintf (file, "%s%d", sep, offset);
5123 output_addr_const (file, x);
5126 /* Output boilerplate text to appear at the beginning of the file.
5127 There are several possible versions. */
5128 #define aputs(x) fputs(x, asm_out_file)
5130 pa_file_start_level (void)
5133 aputs ("\t.LEVEL 2.0w\n");
5134 else if (TARGET_PA_20)
5135 aputs ("\t.LEVEL 2.0\n");
5136 else if (TARGET_PA_11)
5137 aputs ("\t.LEVEL 1.1\n");
5139 aputs ("\t.LEVEL 1.0\n");
5143 pa_file_start_space (int sortspace)
5145 aputs ("\t.SPACE $PRIVATE$");
5148 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5149 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5150 "\n\t.SPACE $TEXT$");
5153 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5154 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5158 pa_file_start_file (int want_version)
5160 if (write_symbols != NO_DEBUG)
5162 output_file_directive (asm_out_file, main_input_filename);
5164 aputs ("\t.version\t\"01.01\"\n");
5169 pa_file_start_mcount (const char *aswhat)
5172 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5176 pa_elf_file_start (void)
5178 pa_file_start_level ();
5179 pa_file_start_mcount ("ENTRY");
5180 pa_file_start_file (0);
5184 pa_som_file_start (void)
5186 pa_file_start_level ();
5187 pa_file_start_space (0);
5188 aputs ("\t.IMPORT $global$,DATA\n"
5189 "\t.IMPORT $$dyncall,MILLICODE\n");
5190 pa_file_start_mcount ("CODE");
5191 pa_file_start_file (0);
5195 pa_linux_file_start (void)
5197 pa_file_start_file (1);
5198 pa_file_start_level ();
5199 pa_file_start_mcount ("CODE");
5203 pa_hpux64_gas_file_start (void)
5205 pa_file_start_level ();
5206 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5208 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5210 pa_file_start_file (1);
5214 pa_hpux64_hpas_file_start (void)
5216 pa_file_start_level ();
5217 pa_file_start_space (1);
5218 pa_file_start_mcount ("CODE");
5219 pa_file_start_file (0);
5223 static struct deferred_plabel *
5224 get_plabel (rtx symbol)
5226 const char *fname = XSTR (symbol, 0);
5229 /* See if we have already put this function on the list of deferred
5230 plabels. This list is generally small, so a liner search is not
5231 too ugly. If it proves too slow replace it with something faster. */
5232 for (i = 0; i < n_deferred_plabels; i++)
5233 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5236 /* If the deferred plabel list is empty, or this entry was not found
5237 on the list, create a new entry on the list. */
5238 if (deferred_plabels == NULL || i == n_deferred_plabels)
5242 if (deferred_plabels == 0)
5243 deferred_plabels = (struct deferred_plabel *)
5244 ggc_alloc (sizeof (struct deferred_plabel));
5246 deferred_plabels = (struct deferred_plabel *)
5247 ggc_realloc (deferred_plabels,
5248 ((n_deferred_plabels + 1)
5249 * sizeof (struct deferred_plabel)));
5251 i = n_deferred_plabels++;
5252 deferred_plabels[i].internal_label = gen_label_rtx ();
5253 deferred_plabels[i].symbol = symbol;
5255 /* Gross. We have just implicitly taken the address of this
5256 function. Mark it in the same manner as assemble_name. */
5257 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5259 mark_referenced (id);
5262 return &deferred_plabels[i];
5266 output_deferred_plabels (void)
5269 /* If we have deferred plabels, then we need to switch into the data
5270 section and align it to a 4 byte boundary before we output the
5271 deferred plabels. */
5272 if (n_deferred_plabels)
5275 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5278 /* Now output the deferred plabels. */
5279 for (i = 0; i < n_deferred_plabels; i++)
5281 (*targetm.asm_out.internal_label) (asm_out_file, "L",
5282 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5283 assemble_integer (deferred_plabels[i].symbol,
5284 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5288 #ifdef HPUX_LONG_DOUBLE_LIBRARY
5289 /* Initialize optabs to point to HPUX long double emulation routines. */
5291 pa_hpux_init_libfuncs (void)
5293 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5294 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5295 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5296 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5297 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5298 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5299 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5300 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5301 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5303 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5304 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5305 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5306 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5307 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5308 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5309 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5311 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5312 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5313 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5314 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5316 set_conv_libfunc (sfix_optab, SImode, TFmode, TARGET_64BIT
5317 ? "__U_Qfcnvfxt_quad_to_sgl"
5318 : "_U_Qfcnvfxt_quad_to_sgl");
5319 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
5320 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_usgl");
5321 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_udbl");
5323 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
5324 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
5328 /* HP's millicode routines mean something special to the assembler.
5329 Keep track of which ones we have used. */
5331 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5332 static void import_milli (enum millicodes);
5333 static char imported[(int) end1000];
5334 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5335 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5336 #define MILLI_START 10
5339 import_milli (enum millicodes code)
5341 char str[sizeof (import_string)];
5343 if (!imported[(int) code])
5345 imported[(int) code] = 1;
5346 strcpy (str, import_string);
5347 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5348 output_asm_insn (str, 0);
5352 /* The register constraints have put the operands and return value in
5353 the proper registers. */
5356 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5358 import_milli (mulI);
5359 return output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5362 /* Emit the rtl for doing a division by a constant. */
5364 /* Do magic division millicodes exist for this value? */
5365 const int magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5367 /* We'll use an array to keep track of the magic millicodes and
5368 whether or not we've used them already. [n][0] is signed, [n][1] is
5371 static int div_milli[16][2];
5374 emit_hpdiv_const (rtx *operands, int unsignedp)
5376 if (GET_CODE (operands[2]) == CONST_INT
5377 && INTVAL (operands[2]) > 0
5378 && INTVAL (operands[2]) < 16
5379 && magic_milli[INTVAL (operands[2])])
5381 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5383 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5387 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5388 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5390 gen_rtx_REG (SImode, 26),
5392 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5393 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5394 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5395 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5396 gen_rtx_CLOBBER (VOIDmode, ret))));
5397 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5404 output_div_insn (rtx *operands, int unsignedp, rtx insn)
5408 /* If the divisor is a constant, try to use one of the special
5410 if (GET_CODE (operands[0]) == CONST_INT)
5412 static char buf[100];
5413 divisor = INTVAL (operands[0]);
5414 if (!div_milli[divisor][unsignedp])
5416 div_milli[divisor][unsignedp] = 1;
5418 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5420 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5424 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5425 INTVAL (operands[0]));
5426 return output_millicode_call (insn,
5427 gen_rtx_SYMBOL_REF (SImode, buf));
5431 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5432 INTVAL (operands[0]));
5433 return output_millicode_call (insn,
5434 gen_rtx_SYMBOL_REF (SImode, buf));
5437 /* Divisor isn't a special constant. */
5442 import_milli (divU);
5443 return output_millicode_call (insn,
5444 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5448 import_milli (divI);
5449 return output_millicode_call (insn,
5450 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5455 /* Output a $$rem millicode to do mod. */
5458 output_mod_insn (int unsignedp, rtx insn)
5462 import_milli (remU);
5463 return output_millicode_call (insn,
5464 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5468 import_milli (remI);
5469 return output_millicode_call (insn,
5470 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5475 output_arg_descriptor (rtx call_insn)
5477 const char *arg_regs[4];
5478 enum machine_mode arg_mode;
5480 int i, output_flag = 0;
5483 /* We neither need nor want argument location descriptors for the
5484 64bit runtime environment or the ELF32 environment. */
5485 if (TARGET_64BIT || TARGET_ELF32)
5488 for (i = 0; i < 4; i++)
5491 /* Specify explicitly that no argument relocations should take place
5492 if using the portable runtime calling conventions. */
5493 if (TARGET_PORTABLE_RUNTIME)
5495 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5500 gcc_assert (GET_CODE (call_insn) == CALL_INSN);
5501 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5502 link; link = XEXP (link, 1))
5504 rtx use = XEXP (link, 0);
5506 if (! (GET_CODE (use) == USE
5507 && GET_CODE (XEXP (use, 0)) == REG
5508 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5511 arg_mode = GET_MODE (XEXP (use, 0));
5512 regno = REGNO (XEXP (use, 0));
5513 if (regno >= 23 && regno <= 26)
5515 arg_regs[26 - regno] = "GR";
5516 if (arg_mode == DImode)
5517 arg_regs[25 - regno] = "GR";
5519 else if (regno >= 32 && regno <= 39)
5521 if (arg_mode == SFmode)
5522 arg_regs[(regno - 32) / 2] = "FR";
5525 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5526 arg_regs[(regno - 34) / 2] = "FR";
5527 arg_regs[(regno - 34) / 2 + 1] = "FU";
5529 arg_regs[(regno - 34) / 2] = "FU";
5530 arg_regs[(regno - 34) / 2 + 1] = "FR";
5535 fputs ("\t.CALL ", asm_out_file);
5536 for (i = 0; i < 4; i++)
5541 fputc (',', asm_out_file);
5542 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5545 fputc ('\n', asm_out_file);
5548 /* Return the class of any secondary reload register that is needed to
5549 move IN into a register in class CLASS using mode MODE.
5551 Profiling has showed this routine and its descendants account for
5552 a significant amount of compile time (~7%). So it has been
5553 optimized to reduce redundant computations and eliminate useless
5556 It might be worthwhile to try and make this a leaf function too. */
5559 secondary_reload_class (enum reg_class class, enum machine_mode mode, rtx in)
5561 int regno, is_symbolic;
5563 /* Trying to load a constant into a FP register during PIC code
5564 generation will require %r1 as a scratch register. */
5566 && GET_MODE_CLASS (mode) == MODE_INT
5567 && FP_REG_CLASS_P (class)
5568 && (GET_CODE (in) == CONST_INT || GET_CODE (in) == CONST_DOUBLE))
5571 /* Profiling showed the PA port spends about 1.3% of its compilation
5572 time in true_regnum from calls inside secondary_reload_class. */
5574 if (GET_CODE (in) == REG)
5577 if (regno >= FIRST_PSEUDO_REGISTER)
5578 regno = true_regnum (in);
5580 else if (GET_CODE (in) == SUBREG)
5581 regno = true_regnum (in);
5585 /* If we have something like (mem (mem (...)), we can safely assume the
5586 inner MEM will end up in a general register after reloading, so there's
5587 no need for a secondary reload. */
5588 if (GET_CODE (in) == MEM
5589 && GET_CODE (XEXP (in, 0)) == MEM)
5592 /* Handle out of range displacement for integer mode loads/stores of
5594 if (((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5595 && GET_MODE_CLASS (mode) == MODE_INT
5596 && FP_REG_CLASS_P (class))
5597 || (class == SHIFT_REGS && (regno <= 0 || regno >= 32)))
5598 return GENERAL_REGS;
5600 /* A SAR<->FP register copy requires a secondary register (GPR) as
5601 well as secondary memory. */
5602 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
5603 && ((REGNO_REG_CLASS (regno) == SHIFT_REGS && FP_REG_CLASS_P (class))
5604 || (class == SHIFT_REGS && FP_REG_CLASS_P (REGNO_REG_CLASS (regno)))))
5605 return GENERAL_REGS;
5607 if (GET_CODE (in) == HIGH)
5610 /* Profiling has showed GCC spends about 2.6% of its compilation
5611 time in symbolic_operand from calls inside secondary_reload_class.
5613 We use an inline copy and only compute its return value once to avoid
5615 switch (GET_CODE (in))
5625 is_symbolic = ((GET_CODE (XEXP (tmp, 0)) == SYMBOL_REF
5626 || GET_CODE (XEXP (tmp, 0)) == LABEL_REF)
5627 && GET_CODE (XEXP (tmp, 1)) == CONST_INT);
5637 && read_only_operand (in, VOIDmode))
5640 if (class != R1_REGS && is_symbolic)
5646 /* In the 32-bit runtime, arguments larger than eight bytes are passed
5647 by invisible reference. As a GCC extension, we also pass anything
5648 with a zero or variable size by reference.
5650 The 64-bit runtime does not describe passing any types by invisible
5651 reference. The internals of GCC can't currently handle passing
5652 empty structures, and zero or variable length arrays when they are
5653 not passed entirely on the stack or by reference. Thus, as a GCC
5654 extension, we pass these types by reference. The HP compiler doesn't
5655 support these types, so hopefully there shouldn't be any compatibility
5656 issues. This may have to be revisited when HP releases a C99 compiler
5657 or updates the ABI. */
5660 pa_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5661 enum machine_mode mode, tree type,
5662 bool named ATTRIBUTE_UNUSED)
5667 size = int_size_in_bytes (type);
5669 size = GET_MODE_SIZE (mode);
5674 return size <= 0 || size > 8;
5678 function_arg_padding (enum machine_mode mode, tree type)
5681 || (TARGET_64BIT && type && AGGREGATE_TYPE_P (type)))
5683 /* Return none if justification is not required. */
5685 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
5686 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
5689 /* The directions set here are ignored when a BLKmode argument larger
5690 than a word is placed in a register. Different code is used for
5691 the stack and registers. This makes it difficult to have a
5692 consistent data representation for both the stack and registers.
5693 For both runtimes, the justification and padding for arguments on
5694 the stack and in registers should be identical. */
5696 /* The 64-bit runtime specifies left justification for aggregates. */
5699 /* The 32-bit runtime architecture specifies right justification.
5700 When the argument is passed on the stack, the argument is padded
5701 with garbage on the left. The HP compiler pads with zeros. */
5705 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
5712 /* Do what is necessary for `va_start'. We look at the current function
5713 to determine if stdargs or varargs is used and fill in an initial
5714 va_list. A pointer to this constructor is returned. */
5717 hppa_builtin_saveregs (void)
5720 tree fntype = TREE_TYPE (current_function_decl);
5721 int argadj = ((!(TYPE_ARG_TYPES (fntype) != 0
5722 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
5723 != void_type_node)))
5724 ? UNITS_PER_WORD : 0);
5727 offset = plus_constant (current_function_arg_offset_rtx, argadj);
5729 offset = current_function_arg_offset_rtx;
5735 /* Adjust for varargs/stdarg differences. */
5737 offset = plus_constant (current_function_arg_offset_rtx, -argadj);
5739 offset = current_function_arg_offset_rtx;
5741 /* We need to save %r26 .. %r19 inclusive starting at offset -64
5742 from the incoming arg pointer and growing to larger addresses. */
5743 for (i = 26, off = -64; i >= 19; i--, off += 8)
5744 emit_move_insn (gen_rtx_MEM (word_mode,
5745 plus_constant (arg_pointer_rtx, off)),
5746 gen_rtx_REG (word_mode, i));
5748 /* The incoming args pointer points just beyond the flushback area;
5749 normally this is not a serious concern. However, when we are doing
5750 varargs/stdargs we want to make the arg pointer point to the start
5751 of the incoming argument area. */
5752 emit_move_insn (virtual_incoming_args_rtx,
5753 plus_constant (arg_pointer_rtx, -64));
5755 /* Now return a pointer to the first anonymous argument. */
5756 return copy_to_reg (expand_binop (Pmode, add_optab,
5757 virtual_incoming_args_rtx,
5758 offset, 0, 0, OPTAB_LIB_WIDEN));
5761 /* Store general registers on the stack. */
5762 dest = gen_rtx_MEM (BLKmode,
5763 plus_constant (current_function_internal_arg_pointer,
5765 set_mem_alias_set (dest, get_varargs_alias_set ());
5766 set_mem_align (dest, BITS_PER_WORD);
5767 move_block_from_reg (23, dest, 4);
5769 /* move_block_from_reg will emit code to store the argument registers
5770 individually as scalar stores.
5772 However, other insns may later load from the same addresses for
5773 a structure load (passing a struct to a varargs routine).
5775 The alias code assumes that such aliasing can never happen, so we
5776 have to keep memory referencing insns from moving up beyond the
5777 last argument register store. So we emit a blockage insn here. */
5778 emit_insn (gen_blockage ());
5780 return copy_to_reg (expand_binop (Pmode, add_optab,
5781 current_function_internal_arg_pointer,
5782 offset, 0, 0, OPTAB_LIB_WIDEN));
5786 hppa_va_start (tree valist, rtx nextarg)
5788 nextarg = expand_builtin_saveregs ();
5789 std_expand_builtin_va_start (valist, nextarg);
5793 hppa_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
5797 /* Args grow upward. We can use the generic routines. */
5798 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
5800 else /* !TARGET_64BIT */
5802 tree ptr = build_pointer_type (type);
5805 unsigned int size, ofs;
5808 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
5812 ptr = build_pointer_type (type);
5814 size = int_size_in_bytes (type);
5815 valist_type = TREE_TYPE (valist);
5817 /* Args grow down. Not handled by generic routines. */
5819 u = fold_convert (valist_type, size_in_bytes (type));
5820 t = build (MINUS_EXPR, valist_type, valist, u);
5822 /* Copied from va-pa.h, but we probably don't need to align to
5823 word size, since we generate and preserve that invariant. */
5824 u = build_int_cst (valist_type, (size > 4 ? -8 : -4));
5825 t = build (BIT_AND_EXPR, valist_type, t, u);
5827 t = build (MODIFY_EXPR, valist_type, valist, t);
5829 ofs = (8 - size) % 4;
5832 u = fold_convert (valist_type, size_int (ofs));
5833 t = build (PLUS_EXPR, valist_type, t, u);
5836 t = fold_convert (ptr, t);
5837 t = build_fold_indirect_ref (t);
5840 t = build_fold_indirect_ref (t);
5846 /* True if MODE is valid for the target. By "valid", we mean able to
5847 be manipulated in non-trivial ways. In particular, this means all
5848 the arithmetic is supported.
5850 Currently, TImode is not valid as the HP 64-bit runtime documentation
5851 doesn't document the alignment and calling conventions for this type.
5852 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
5853 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
5856 pa_scalar_mode_supported_p (enum machine_mode mode)
5858 int precision = GET_MODE_PRECISION (mode);
5860 switch (GET_MODE_CLASS (mode))
5862 case MODE_PARTIAL_INT:
5864 if (precision == CHAR_TYPE_SIZE)
5866 if (precision == SHORT_TYPE_SIZE)
5868 if (precision == INT_TYPE_SIZE)
5870 if (precision == LONG_TYPE_SIZE)
5872 if (precision == LONG_LONG_TYPE_SIZE)
5877 if (precision == FLOAT_TYPE_SIZE)
5879 if (precision == DOUBLE_TYPE_SIZE)
5881 if (precision == LONG_DOUBLE_TYPE_SIZE)
5890 /* This routine handles all the normal conditional branch sequences we
5891 might need to generate. It handles compare immediate vs compare
5892 register, nullification of delay slots, varying length branches,
5893 negated branches, and all combinations of the above. It returns the
5894 output appropriate to emit the branch corresponding to all given
5898 output_cbranch (rtx *operands, int nullify, int length, int negated, rtx insn)
5900 static char buf[100];
5904 /* A conditional branch to the following instruction (e.g. the delay slot)
5905 is asking for a disaster. This can happen when not optimizing and
5906 when jump optimization fails.
5908 While it is usually safe to emit nothing, this can fail if the
5909 preceding instruction is a nullified branch with an empty delay
5910 slot and the same branch target as this branch. We could check
5911 for this but jump optimization should eliminate nop jumps. It
5912 is always safe to emit a nop. */
5913 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
5916 /* The doubleword form of the cmpib instruction doesn't have the LEU
5917 and GTU conditions while the cmpb instruction does. Since we accept
5918 zero for cmpb, we must ensure that we use cmpb for the comparison. */
5919 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
5920 operands[2] = gen_rtx_REG (DImode, 0);
5922 /* If this is a long branch with its delay slot unfilled, set `nullify'
5923 as it can nullify the delay slot and save a nop. */
5924 if (length == 8 && dbr_sequence_length () == 0)
5927 /* If this is a short forward conditional branch which did not get
5928 its delay slot filled, the delay slot can still be nullified. */
5929 if (! nullify && length == 4 && dbr_sequence_length () == 0)
5930 nullify = forward_branch_p (insn);
5932 /* A forward branch over a single nullified insn can be done with a
5933 comclr instruction. This avoids a single cycle penalty due to
5934 mis-predicted branch if we fall through (branch not taken). */
5936 && next_real_insn (insn) != 0
5937 && get_attr_length (next_real_insn (insn)) == 4
5938 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
5944 /* All short conditional branches except backwards with an unfilled
5948 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
5950 strcpy (buf, "{com%I2b,|cmp%I2b,}");
5951 if (GET_MODE (operands[1]) == DImode)
5954 strcat (buf, "%B3");
5956 strcat (buf, "%S3");
5958 strcat (buf, " %2,%r1,%%r0");
5960 strcat (buf, ",n %2,%r1,%0");
5962 strcat (buf, " %2,%r1,%0");
5965 /* All long conditionals. Note a short backward branch with an
5966 unfilled delay slot is treated just like a long backward branch
5967 with an unfilled delay slot. */
5969 /* Handle weird backwards branch with a filled delay slot
5970 with is nullified. */
5971 if (dbr_sequence_length () != 0
5972 && ! forward_branch_p (insn)
5975 strcpy (buf, "{com%I2b,|cmp%I2b,}");
5976 if (GET_MODE (operands[1]) == DImode)
5979 strcat (buf, "%S3");
5981 strcat (buf, "%B3");
5982 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
5984 /* Handle short backwards branch with an unfilled delay slot.
5985 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
5986 taken and untaken branches. */
5987 else if (dbr_sequence_length () == 0
5988 && ! forward_branch_p (insn)
5989 && INSN_ADDRESSES_SET_P ()
5990 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
5991 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
5993 strcpy (buf, "{com%I2b,|cmp%I2b,}");
5994 if (GET_MODE (operands[1]) == DImode)
5997 strcat (buf, "%B3 %2,%r1,%0%#");
5999 strcat (buf, "%S3 %2,%r1,%0%#");
6003 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6004 if (GET_MODE (operands[1]) == DImode)
6007 strcat (buf, "%S3");
6009 strcat (buf, "%B3");
6011 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6013 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6019 xoperands[0] = operands[0];
6020 xoperands[1] = operands[1];
6021 xoperands[2] = operands[2];
6022 xoperands[3] = operands[3];
6024 /* The reversed conditional branch must branch over one additional
6025 instruction if the delay slot is filled. If the delay slot
6026 is empty, the instruction after the reversed condition branch
6027 must be nullified. */
6028 nullify = dbr_sequence_length () == 0;
6029 xoperands[4] = nullify ? GEN_INT (length) : GEN_INT (length + 4);
6031 /* Create a reversed conditional branch which branches around
6032 the following insns. */
6033 if (GET_MODE (operands[1]) != DImode)
6039 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6042 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6048 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6051 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6060 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6063 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6069 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6072 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6076 output_asm_insn (buf, xoperands);
6077 return output_lbranch (operands[0], insn);
6085 /* This routine handles long unconditional branches that exceed the
6086 maximum range of a simple branch instruction. */
6089 output_lbranch (rtx dest, rtx insn)
6093 xoperands[0] = dest;
6095 /* First, free up the delay slot. */
6096 if (dbr_sequence_length () != 0)
6098 /* We can't handle a jump in the delay slot. */
6099 gcc_assert (GET_CODE (NEXT_INSN (insn)) != JUMP_INSN);
6101 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6104 /* Now delete the delay insn. */
6105 PUT_CODE (NEXT_INSN (insn), NOTE);
6106 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
6107 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
6110 /* Output an insn to save %r1. The runtime documentation doesn't
6111 specify whether the "Clean Up" slot in the callers frame can
6112 be clobbered by the callee. It isn't copied by HP's builtin
6113 alloca, so this suggests that it can be clobbered if necessary.
6114 The "Static Link" location is copied by HP builtin alloca, so
6115 we avoid using it. Using the cleanup slot might be a problem
6116 if we have to interoperate with languages that pass cleanup
6117 information. However, it should be possible to handle these
6118 situations with GCC's asm feature.
6120 The "Current RP" slot is reserved for the called procedure, so
6121 we try to use it when we don't have a frame of our own. It's
6122 rather unlikely that we won't have a frame when we need to emit
6125 Really the way to go long term is a register scavenger; goto
6126 the target of the jump and find a register which we can use
6127 as a scratch to hold the value in %r1. Then, we wouldn't have
6128 to free up the delay slot or clobber a slot that may be needed
6129 for other purposes. */
6132 if (actual_fsize == 0 && !regs_ever_live[2])
6133 /* Use the return pointer slot in the frame marker. */
6134 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6136 /* Use the slot at -40 in the frame marker since HP builtin
6137 alloca doesn't copy it. */
6138 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6142 if (actual_fsize == 0 && !regs_ever_live[2])
6143 /* Use the return pointer slot in the frame marker. */
6144 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6146 /* Use the "Clean Up" slot in the frame marker. In GCC,
6147 the only other use of this location is for copying a
6148 floating point double argument from a floating-point
6149 register to two general registers. The copy is done
6150 as an "atomic" operation when outputting a call, so it
6151 won't interfere with our using the location here. */
6152 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6155 if (TARGET_PORTABLE_RUNTIME)
6157 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6158 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6159 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6163 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6164 if (TARGET_SOM || !TARGET_GAS)
6166 xoperands[1] = gen_label_rtx ();
6167 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6168 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6169 CODE_LABEL_NUMBER (xoperands[1]));
6170 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6174 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6175 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6177 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6180 /* Now output a very long branch to the original target. */
6181 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6183 /* Now restore the value of %r1 in the delay slot. */
6186 if (actual_fsize == 0 && !regs_ever_live[2])
6187 return "ldd -16(%%r30),%%r1";
6189 return "ldd -40(%%r30),%%r1";
6193 if (actual_fsize == 0 && !regs_ever_live[2])
6194 return "ldw -20(%%r30),%%r1";
6196 return "ldw -12(%%r30),%%r1";
6200 /* This routine handles all the branch-on-bit conditional branch sequences we
6201 might need to generate. It handles nullification of delay slots,
6202 varying length branches, negated branches and all combinations of the
6203 above. it returns the appropriate output template to emit the branch. */
6206 output_bb (rtx *operands ATTRIBUTE_UNUSED, int nullify, int length,
6207 int negated, rtx insn, int which)
6209 static char buf[100];
6212 /* A conditional branch to the following instruction (e.g. the delay slot) is
6213 asking for a disaster. I do not think this can happen as this pattern
6214 is only used when optimizing; jump optimization should eliminate the
6215 jump. But be prepared just in case. */
6217 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6220 /* If this is a long branch with its delay slot unfilled, set `nullify'
6221 as it can nullify the delay slot and save a nop. */
6222 if (length == 8 && dbr_sequence_length () == 0)
6225 /* If this is a short forward conditional branch which did not get
6226 its delay slot filled, the delay slot can still be nullified. */
6227 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6228 nullify = forward_branch_p (insn);
6230 /* A forward branch over a single nullified insn can be done with a
6231 extrs instruction. This avoids a single cycle penalty due to
6232 mis-predicted branch if we fall through (branch not taken). */
6235 && next_real_insn (insn) != 0
6236 && get_attr_length (next_real_insn (insn)) == 4
6237 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6244 /* All short conditional branches except backwards with an unfilled
6248 strcpy (buf, "{extrs,|extrw,s,}");
6250 strcpy (buf, "bb,");
6251 if (useskip && GET_MODE (operands[0]) == DImode)
6252 strcpy (buf, "extrd,s,*");
6253 else if (GET_MODE (operands[0]) == DImode)
6254 strcpy (buf, "bb,*");
6255 if ((which == 0 && negated)
6256 || (which == 1 && ! negated))
6261 strcat (buf, " %0,%1,1,%%r0");
6262 else if (nullify && negated)
6263 strcat (buf, ",n %0,%1,%3");
6264 else if (nullify && ! negated)
6265 strcat (buf, ",n %0,%1,%2");
6266 else if (! nullify && negated)
6267 strcat (buf, "%0,%1,%3");
6268 else if (! nullify && ! negated)
6269 strcat (buf, " %0,%1,%2");
6272 /* All long conditionals. Note a short backward branch with an
6273 unfilled delay slot is treated just like a long backward branch
6274 with an unfilled delay slot. */
6276 /* Handle weird backwards branch with a filled delay slot
6277 with is nullified. */
6278 if (dbr_sequence_length () != 0
6279 && ! forward_branch_p (insn)
6282 strcpy (buf, "bb,");
6283 if (GET_MODE (operands[0]) == DImode)
6285 if ((which == 0 && negated)
6286 || (which == 1 && ! negated))
6291 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6293 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6295 /* Handle short backwards branch with an unfilled delay slot.
6296 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6297 taken and untaken branches. */
6298 else if (dbr_sequence_length () == 0
6299 && ! forward_branch_p (insn)
6300 && INSN_ADDRESSES_SET_P ()
6301 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6302 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6304 strcpy (buf, "bb,");
6305 if (GET_MODE (operands[0]) == DImode)
6307 if ((which == 0 && negated)
6308 || (which == 1 && ! negated))
6313 strcat (buf, " %0,%1,%3%#");
6315 strcat (buf, " %0,%1,%2%#");
6319 strcpy (buf, "{extrs,|extrw,s,}");
6320 if (GET_MODE (operands[0]) == DImode)
6321 strcpy (buf, "extrd,s,*");
6322 if ((which == 0 && negated)
6323 || (which == 1 && ! negated))
6327 if (nullify && negated)
6328 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6329 else if (nullify && ! negated)
6330 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6332 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6334 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6344 /* This routine handles all the branch-on-variable-bit conditional branch
6345 sequences we might need to generate. It handles nullification of delay
6346 slots, varying length branches, negated branches and all combinations
6347 of the above. it returns the appropriate output template to emit the
6351 output_bvb (rtx *operands ATTRIBUTE_UNUSED, int nullify, int length,
6352 int negated, rtx insn, int which)
6354 static char buf[100];
6357 /* A conditional branch to the following instruction (e.g. the delay slot) is
6358 asking for a disaster. I do not think this can happen as this pattern
6359 is only used when optimizing; jump optimization should eliminate the
6360 jump. But be prepared just in case. */
6362 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6365 /* If this is a long branch with its delay slot unfilled, set `nullify'
6366 as it can nullify the delay slot and save a nop. */
6367 if (length == 8 && dbr_sequence_length () == 0)
6370 /* If this is a short forward conditional branch which did not get
6371 its delay slot filled, the delay slot can still be nullified. */
6372 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6373 nullify = forward_branch_p (insn);
6375 /* A forward branch over a single nullified insn can be done with a
6376 extrs instruction. This avoids a single cycle penalty due to
6377 mis-predicted branch if we fall through (branch not taken). */
6380 && next_real_insn (insn) != 0
6381 && get_attr_length (next_real_insn (insn)) == 4
6382 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6389 /* All short conditional branches except backwards with an unfilled
6393 strcpy (buf, "{vextrs,|extrw,s,}");
6395 strcpy (buf, "{bvb,|bb,}");
6396 if (useskip && GET_MODE (operands[0]) == DImode)
6397 strcpy (buf, "extrd,s,*");
6398 else if (GET_MODE (operands[0]) == DImode)
6399 strcpy (buf, "bb,*");
6400 if ((which == 0 && negated)
6401 || (which == 1 && ! negated))
6406 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6407 else if (nullify && negated)
6408 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6409 else if (nullify && ! negated)
6410 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6411 else if (! nullify && negated)
6412 strcat (buf, "{%0,%3|%0,%%sar,%3}");
6413 else if (! nullify && ! negated)
6414 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6417 /* All long conditionals. Note a short backward branch with an
6418 unfilled delay slot is treated just like a long backward branch
6419 with an unfilled delay slot. */
6421 /* Handle weird backwards branch with a filled delay slot
6422 with is nullified. */
6423 if (dbr_sequence_length () != 0
6424 && ! forward_branch_p (insn)
6427 strcpy (buf, "{bvb,|bb,}");
6428 if (GET_MODE (operands[0]) == DImode)
6430 if ((which == 0 && negated)
6431 || (which == 1 && ! negated))
6436 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6438 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6440 /* Handle short backwards branch with an unfilled delay slot.
6441 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6442 taken and untaken branches. */
6443 else if (dbr_sequence_length () == 0
6444 && ! forward_branch_p (insn)
6445 && INSN_ADDRESSES_SET_P ()
6446 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6447 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6449 strcpy (buf, "{bvb,|bb,}");
6450 if (GET_MODE (operands[0]) == DImode)
6452 if ((which == 0 && negated)
6453 || (which == 1 && ! negated))
6458 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
6460 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
6464 strcpy (buf, "{vextrs,|extrw,s,}");
6465 if (GET_MODE (operands[0]) == DImode)
6466 strcpy (buf, "extrd,s,*");
6467 if ((which == 0 && negated)
6468 || (which == 1 && ! negated))
6472 if (nullify && negated)
6473 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6474 else if (nullify && ! negated)
6475 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6477 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6479 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6489 /* Return the output template for emitting a dbra type insn.
6491 Note it may perform some output operations on its own before
6492 returning the final output string. */
6494 output_dbra (rtx *operands, rtx insn, int which_alternative)
6497 /* A conditional branch to the following instruction (e.g. the delay slot) is
6498 asking for a disaster. Be prepared! */
6500 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6502 if (which_alternative == 0)
6503 return "ldo %1(%0),%0";
6504 else if (which_alternative == 1)
6506 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
6507 output_asm_insn ("ldw -16(%%r30),%4", operands);
6508 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6509 return "{fldws|fldw} -16(%%r30),%0";
6513 output_asm_insn ("ldw %0,%4", operands);
6514 return "ldo %1(%4),%4\n\tstw %4,%0";
6518 if (which_alternative == 0)
6520 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6521 int length = get_attr_length (insn);
6523 /* If this is a long branch with its delay slot unfilled, set `nullify'
6524 as it can nullify the delay slot and save a nop. */
6525 if (length == 8 && dbr_sequence_length () == 0)
6528 /* If this is a short forward conditional branch which did not get
6529 its delay slot filled, the delay slot can still be nullified. */
6530 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6531 nullify = forward_branch_p (insn);
6537 return "addib,%C2,n %1,%0,%3";
6539 return "addib,%C2 %1,%0,%3";
6542 /* Handle weird backwards branch with a fulled delay slot
6543 which is nullified. */
6544 if (dbr_sequence_length () != 0
6545 && ! forward_branch_p (insn)
6547 return "addib,%N2,n %1,%0,.+12\n\tb %3";
6548 /* Handle short backwards branch with an unfilled delay slot.
6549 Using a addb;nop rather than addi;bl saves 1 cycle for both
6550 taken and untaken branches. */
6551 else if (dbr_sequence_length () == 0
6552 && ! forward_branch_p (insn)
6553 && INSN_ADDRESSES_SET_P ()
6554 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6555 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6556 return "addib,%C2 %1,%0,%3%#";
6558 /* Handle normal cases. */
6560 return "addi,%N2 %1,%0,%0\n\tb,n %3";
6562 return "addi,%N2 %1,%0,%0\n\tb %3";
6569 /* Deal with gross reload from FP register case. */
6570 else if (which_alternative == 1)
6572 /* Move loop counter from FP register to MEM then into a GR,
6573 increment the GR, store the GR into MEM, and finally reload
6574 the FP register from MEM from within the branch's delay slot. */
6575 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
6577 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6578 if (get_attr_length (insn) == 24)
6579 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
6581 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6583 /* Deal with gross reload from memory case. */
6586 /* Reload loop counter from memory, the store back to memory
6587 happens in the branch's delay slot. */
6588 output_asm_insn ("ldw %0,%4", operands);
6589 if (get_attr_length (insn) == 12)
6590 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
6592 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
6596 /* Return the output template for emitting a dbra type insn.
6598 Note it may perform some output operations on its own before
6599 returning the final output string. */
6601 output_movb (rtx *operands, rtx insn, int which_alternative,
6602 int reverse_comparison)
6605 /* A conditional branch to the following instruction (e.g. the delay slot) is
6606 asking for a disaster. Be prepared! */
6608 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6610 if (which_alternative == 0)
6611 return "copy %1,%0";
6612 else if (which_alternative == 1)
6614 output_asm_insn ("stw %1,-16(%%r30)", operands);
6615 return "{fldws|fldw} -16(%%r30),%0";
6617 else if (which_alternative == 2)
6623 /* Support the second variant. */
6624 if (reverse_comparison)
6625 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
6627 if (which_alternative == 0)
6629 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6630 int length = get_attr_length (insn);
6632 /* If this is a long branch with its delay slot unfilled, set `nullify'
6633 as it can nullify the delay slot and save a nop. */
6634 if (length == 8 && dbr_sequence_length () == 0)
6637 /* If this is a short forward conditional branch which did not get
6638 its delay slot filled, the delay slot can still be nullified. */
6639 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6640 nullify = forward_branch_p (insn);
6646 return "movb,%C2,n %1,%0,%3";
6648 return "movb,%C2 %1,%0,%3";
6651 /* Handle weird backwards branch with a filled delay slot
6652 which is nullified. */
6653 if (dbr_sequence_length () != 0
6654 && ! forward_branch_p (insn)
6656 return "movb,%N2,n %1,%0,.+12\n\tb %3";
6658 /* Handle short backwards branch with an unfilled delay slot.
6659 Using a movb;nop rather than or;bl saves 1 cycle for both
6660 taken and untaken branches. */
6661 else if (dbr_sequence_length () == 0
6662 && ! forward_branch_p (insn)
6663 && INSN_ADDRESSES_SET_P ()
6664 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6665 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6666 return "movb,%C2 %1,%0,%3%#";
6667 /* Handle normal cases. */
6669 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
6671 return "or,%N2 %1,%%r0,%0\n\tb %3";
6677 /* Deal with gross reload from FP register case. */
6678 else if (which_alternative == 1)
6680 /* Move loop counter from FP register to MEM then into a GR,
6681 increment the GR, store the GR into MEM, and finally reload
6682 the FP register from MEM from within the branch's delay slot. */
6683 output_asm_insn ("stw %1,-16(%%r30)", operands);
6684 if (get_attr_length (insn) == 12)
6685 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
6687 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6689 /* Deal with gross reload from memory case. */
6690 else if (which_alternative == 2)
6692 /* Reload loop counter from memory, the store back to memory
6693 happens in the branch's delay slot. */
6694 if (get_attr_length (insn) == 8)
6695 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
6697 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
6699 /* Handle SAR as a destination. */
6702 if (get_attr_length (insn) == 8)
6703 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
6705 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
6709 /* Copy any FP arguments in INSN into integer registers. */
6711 copy_fp_args (rtx insn)
6716 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
6718 int arg_mode, regno;
6719 rtx use = XEXP (link, 0);
6721 if (! (GET_CODE (use) == USE
6722 && GET_CODE (XEXP (use, 0)) == REG
6723 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6726 arg_mode = GET_MODE (XEXP (use, 0));
6727 regno = REGNO (XEXP (use, 0));
6729 /* Is it a floating point register? */
6730 if (regno >= 32 && regno <= 39)
6732 /* Copy the FP register into an integer register via memory. */
6733 if (arg_mode == SFmode)
6735 xoperands[0] = XEXP (use, 0);
6736 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
6737 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
6738 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6742 xoperands[0] = XEXP (use, 0);
6743 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
6744 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
6745 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
6746 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
6752 /* Compute length of the FP argument copy sequence for INSN. */
6754 length_fp_args (rtx insn)
6759 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
6761 int arg_mode, regno;
6762 rtx use = XEXP (link, 0);
6764 if (! (GET_CODE (use) == USE
6765 && GET_CODE (XEXP (use, 0)) == REG
6766 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
6769 arg_mode = GET_MODE (XEXP (use, 0));
6770 regno = REGNO (XEXP (use, 0));
6772 /* Is it a floating point register? */
6773 if (regno >= 32 && regno <= 39)
6775 if (arg_mode == SFmode)
6785 /* Return the attribute length for the millicode call instruction INSN.
6786 The length must match the code generated by output_millicode_call.
6787 We include the delay slot in the returned length as it is better to
6788 over estimate the length than to under estimate it. */
6791 attr_length_millicode_call (rtx insn)
6793 unsigned long distance = -1;
6794 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
6796 if (INSN_ADDRESSES_SET_P ())
6798 distance = (total + insn_current_reference_address (insn));
6799 if (distance < total)
6805 if (!TARGET_LONG_CALLS && distance < 7600000)
6810 else if (TARGET_PORTABLE_RUNTIME)
6814 if (!TARGET_LONG_CALLS && distance < 240000)
6817 if (TARGET_LONG_ABS_CALL && !flag_pic)
6824 /* INSN is a function call. It may have an unconditional jump
6827 CALL_DEST is the routine we are calling. */
6830 output_millicode_call (rtx insn, rtx call_dest)
6832 int attr_length = get_attr_length (insn);
6833 int seq_length = dbr_sequence_length ();
6838 xoperands[0] = call_dest;
6839 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
6841 /* Handle the common case where we are sure that the branch will
6842 reach the beginning of the $CODE$ subspace. The within reach
6843 form of the $$sh_func_adrs call has a length of 28. Because
6844 it has an attribute type of multi, it never has a nonzero
6845 sequence length. The length of the $$sh_func_adrs is the same
6846 as certain out of reach PIC calls to other routines. */
6847 if (!TARGET_LONG_CALLS
6848 && ((seq_length == 0
6849 && (attr_length == 12
6850 || (attr_length == 28 && get_attr_type (insn) == TYPE_MULTI)))
6851 || (seq_length != 0 && attr_length == 8)))
6853 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
6859 /* It might seem that one insn could be saved by accessing
6860 the millicode function using the linkage table. However,
6861 this doesn't work in shared libraries and other dynamically
6862 loaded objects. Using a pc-relative sequence also avoids
6863 problems related to the implicit use of the gp register. */
6864 output_asm_insn ("b,l .+8,%%r1", xoperands);
6868 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
6869 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6873 xoperands[1] = gen_label_rtx ();
6874 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
6875 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6876 CODE_LABEL_NUMBER (xoperands[1]));
6877 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
6880 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
6882 else if (TARGET_PORTABLE_RUNTIME)
6884 /* Pure portable runtime doesn't allow be/ble; we also don't
6885 have PIC support in the assembler/linker, so this sequence
6888 /* Get the address of our target into %r1. */
6889 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6890 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6892 /* Get our return address into %r31. */
6893 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
6894 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
6896 /* Jump to our target address in %r1. */
6897 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6901 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6903 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
6905 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
6909 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6910 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
6912 if (TARGET_SOM || !TARGET_GAS)
6914 /* The HP assembler can generate relocations for the
6915 difference of two symbols. GAS can do this for a
6916 millicode symbol but not an arbitrary external
6917 symbol when generating SOM output. */
6918 xoperands[1] = gen_label_rtx ();
6919 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6920 CODE_LABEL_NUMBER (xoperands[1]));
6921 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
6922 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
6926 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
6927 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
6931 /* Jump to our target address in %r1. */
6932 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6936 if (seq_length == 0)
6937 output_asm_insn ("nop", xoperands);
6939 /* We are done if there isn't a jump in the delay slot. */
6940 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
6943 /* This call has an unconditional jump in its delay slot. */
6944 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
6946 /* See if the return address can be adjusted. Use the containing
6947 sequence insn's address. */
6948 if (INSN_ADDRESSES_SET_P ())
6950 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
6951 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
6952 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
6954 if (VAL_14_BITS_P (distance))
6956 xoperands[1] = gen_label_rtx ();
6957 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
6958 (*targetm.asm_out.internal_label) (asm_out_file, "L",
6959 CODE_LABEL_NUMBER (xoperands[1]));
6962 /* ??? This branch may not reach its target. */
6963 output_asm_insn ("nop\n\tb,n %0", xoperands);
6966 /* ??? This branch may not reach its target. */
6967 output_asm_insn ("nop\n\tb,n %0", xoperands);
6969 /* Delete the jump. */
6970 PUT_CODE (NEXT_INSN (insn), NOTE);
6971 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
6972 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
6977 /* Return the attribute length of the call instruction INSN. The SIBCALL
6978 flag indicates whether INSN is a regular call or a sibling call. The
6979 length returned must be longer than the code actually generated by
6980 output_call. Since branch shortening is done before delay branch
6981 sequencing, there is no way to determine whether or not the delay
6982 slot will be filled during branch shortening. Even when the delay
6983 slot is filled, we may have to add a nop if the delay slot contains
6984 a branch that can't reach its target. Thus, we always have to include
6985 the delay slot in the length estimate. This used to be done in
6986 pa_adjust_insn_length but we do it here now as some sequences always
6987 fill the delay slot and we can save four bytes in the estimate for
6991 attr_length_call (rtx insn, int sibcall)
6997 rtx pat = PATTERN (insn);
6998 unsigned long distance = -1;
7000 if (INSN_ADDRESSES_SET_P ())
7002 unsigned long total;
7004 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7005 distance = (total + insn_current_reference_address (insn));
7006 if (distance < total)
7010 /* Determine if this is a local call. */
7011 if (GET_CODE (XVECEXP (pat, 0, 0)) == CALL)
7012 call_dest = XEXP (XEXP (XVECEXP (pat, 0, 0), 0), 0);
7014 call_dest = XEXP (XEXP (XEXP (XVECEXP (pat, 0, 0), 1), 0), 0);
7016 call_decl = SYMBOL_REF_DECL (call_dest);
7017 local_call = call_decl && (*targetm.binds_local_p) (call_decl);
7019 /* pc-relative branch. */
7020 if (!TARGET_LONG_CALLS
7021 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7022 || distance < 240000))
7025 /* 64-bit plabel sequence. */
7026 else if (TARGET_64BIT && !local_call)
7027 length += sibcall ? 28 : 24;
7029 /* non-pic long absolute branch sequence. */
7030 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7033 /* long pc-relative branch sequence. */
7034 else if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7035 || (TARGET_64BIT && !TARGET_GAS)
7036 || (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7040 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS)
7044 /* 32-bit plabel sequence. */
7050 length += length_fp_args (insn);
7060 if (!TARGET_NO_SPACE_REGS)
7068 /* INSN is a function call. It may have an unconditional jump
7071 CALL_DEST is the routine we are calling. */
7074 output_call (rtx insn, rtx call_dest, int sibcall)
7076 int delay_insn_deleted = 0;
7077 int delay_slot_filled = 0;
7078 int seq_length = dbr_sequence_length ();
7079 tree call_decl = SYMBOL_REF_DECL (call_dest);
7080 int local_call = call_decl && (*targetm.binds_local_p) (call_decl);
7083 xoperands[0] = call_dest;
7085 /* Handle the common case where we're sure that the branch will reach
7086 the beginning of the "$CODE$" subspace. This is the beginning of
7087 the current function if we are in a named section. */
7088 if (!TARGET_LONG_CALLS && attr_length_call (insn, sibcall) == 8)
7090 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7091 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7095 if (TARGET_64BIT && !local_call)
7097 /* ??? As far as I can tell, the HP linker doesn't support the
7098 long pc-relative sequence described in the 64-bit runtime
7099 architecture. So, we use a slightly longer indirect call. */
7100 struct deferred_plabel *p = get_plabel (call_dest);
7102 xoperands[0] = p->internal_label;
7103 xoperands[1] = gen_label_rtx ();
7105 /* If this isn't a sibcall, we put the load of %r27 into the
7106 delay slot. We can't do this in a sibcall as we don't
7107 have a second call-clobbered scratch register available. */
7109 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7112 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7115 /* Now delete the delay insn. */
7116 PUT_CODE (NEXT_INSN (insn), NOTE);
7117 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7118 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7119 delay_insn_deleted = 1;
7122 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7123 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7124 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7128 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7129 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7130 output_asm_insn ("bve (%%r1)", xoperands);
7134 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7135 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7136 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7137 delay_slot_filled = 1;
7142 int indirect_call = 0;
7144 /* Emit a long call. There are several different sequences
7145 of increasing length and complexity. In most cases,
7146 they don't allow an instruction in the delay slot. */
7147 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7148 && !(TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7149 && !(TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7154 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7156 && (!TARGET_PA_20 || indirect_call))
7158 /* A non-jump insn in the delay slot. By definition we can
7159 emit this insn before the call (and in fact before argument
7161 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7164 /* Now delete the delay insn. */
7165 PUT_CODE (NEXT_INSN (insn), NOTE);
7166 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7167 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7168 delay_insn_deleted = 1;
7171 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7173 /* This is the best sequence for making long calls in
7174 non-pic code. Unfortunately, GNU ld doesn't provide
7175 the stub needed for external calls, and GAS's support
7176 for this with the SOM linker is buggy. It is safe
7177 to use this for local calls. */
7178 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7180 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7184 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7187 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7189 output_asm_insn ("copy %%r31,%%r2", xoperands);
7190 delay_slot_filled = 1;
7195 if ((TARGET_SOM && TARGET_LONG_PIC_SDIFF_CALL)
7196 || (TARGET_64BIT && !TARGET_GAS))
7198 /* The HP assembler and linker can handle relocations
7199 for the difference of two symbols. GAS and the HP
7200 linker can't do this when one of the symbols is
7202 xoperands[1] = gen_label_rtx ();
7203 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7204 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7205 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7206 CODE_LABEL_NUMBER (xoperands[1]));
7207 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7209 else if (TARGET_GAS && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7211 /* GAS currently can't generate the relocations that
7212 are needed for the SOM linker under HP-UX using this
7213 sequence. The GNU linker doesn't generate the stubs
7214 that are needed for external calls on TARGET_ELF32
7215 with this sequence. For now, we have to use a
7216 longer plabel sequence when using GAS. */
7217 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7218 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7220 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7225 /* Emit a long plabel-based call sequence. This is
7226 essentially an inline implementation of $$dyncall.
7227 We don't actually try to call $$dyncall as this is
7228 as difficult as calling the function itself. */
7229 struct deferred_plabel *p = get_plabel (call_dest);
7231 xoperands[0] = p->internal_label;
7232 xoperands[1] = gen_label_rtx ();
7234 /* Since the call is indirect, FP arguments in registers
7235 need to be copied to the general registers. Then, the
7236 argument relocation stub will copy them back. */
7238 copy_fp_args (insn);
7242 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7243 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7244 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7248 output_asm_insn ("addil LR'%0-$global$,%%r27",
7250 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7254 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7255 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7256 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7257 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7259 if (!sibcall && !TARGET_PA_20)
7261 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7262 if (TARGET_NO_SPACE_REGS)
7263 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7265 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7272 output_asm_insn ("bve (%%r1)", xoperands);
7277 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7278 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7279 delay_slot_filled = 1;
7282 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7287 if (!TARGET_NO_SPACE_REGS)
7288 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7293 if (TARGET_NO_SPACE_REGS)
7294 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7296 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7300 if (TARGET_NO_SPACE_REGS)
7301 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
7303 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
7306 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
7308 output_asm_insn ("copy %%r31,%%r2", xoperands);
7309 delay_slot_filled = 1;
7316 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
7317 output_asm_insn ("nop", xoperands);
7319 /* We are done if there isn't a jump in the delay slot. */
7321 || delay_insn_deleted
7322 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7325 /* A sibcall should never have a branch in the delay slot. */
7326 gcc_assert (!sibcall);
7328 /* This call has an unconditional jump in its delay slot. */
7329 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7331 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
7333 /* See if the return address can be adjusted. Use the containing
7334 sequence insn's address. */
7335 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7336 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7337 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7339 if (VAL_14_BITS_P (distance))
7341 xoperands[1] = gen_label_rtx ();
7342 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
7343 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7344 CODE_LABEL_NUMBER (xoperands[1]));
7347 output_asm_insn ("nop\n\tb,n %0", xoperands);
7350 output_asm_insn ("b,n %0", xoperands);
7352 /* Delete the jump. */
7353 PUT_CODE (NEXT_INSN (insn), NOTE);
7354 NOTE_LINE_NUMBER (NEXT_INSN (insn)) = NOTE_INSN_DELETED;
7355 NOTE_SOURCE_FILE (NEXT_INSN (insn)) = 0;
7360 /* Return the attribute length of the indirect call instruction INSN.
7361 The length must match the code generated by output_indirect call.
7362 The returned length includes the delay slot. Currently, the delay
7363 slot of an indirect call sequence is not exposed and it is used by
7364 the sequence itself. */
7367 attr_length_indirect_call (rtx insn)
7369 unsigned long distance = -1;
7370 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7372 if (INSN_ADDRESSES_SET_P ())
7374 distance = (total + insn_current_reference_address (insn));
7375 if (distance < total)
7382 if (TARGET_FAST_INDIRECT_CALLS
7383 || (!TARGET_PORTABLE_RUNTIME
7384 && ((TARGET_PA_20 && distance < 7600000) || distance < 240000)))
7390 if (TARGET_PORTABLE_RUNTIME)
7393 /* Out of reach, can use ble. */
7398 output_indirect_call (rtx insn, rtx call_dest)
7404 xoperands[0] = call_dest;
7405 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
7406 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
7410 /* First the special case for kernels, level 0 systems, etc. */
7411 if (TARGET_FAST_INDIRECT_CALLS)
7412 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
7414 /* Now the normal case -- we can reach $$dyncall directly or
7415 we're sure that we can get there via a long-branch stub.
7417 No need to check target flags as the length uniquely identifies
7418 the remaining cases. */
7419 if (attr_length_indirect_call (insn) == 8)
7421 /* The HP linker substitutes a BLE for millicode calls using
7422 the short PIC PCREL form. Thus, we must use %r31 as the
7423 link register when generating PA 1.x code. */
7425 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
7427 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
7430 /* Long millicode call, but we are not generating PIC or portable runtime
7432 if (attr_length_indirect_call (insn) == 12)
7433 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
7435 /* Long millicode call for portable runtime. */
7436 if (attr_length_indirect_call (insn) == 20)
7437 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
7439 /* We need a long PIC call to $$dyncall. */
7440 xoperands[0] = NULL_RTX;
7441 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7442 if (TARGET_SOM || !TARGET_GAS)
7444 xoperands[0] = gen_label_rtx ();
7445 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
7446 (*targetm.asm_out.internal_label) (asm_out_file, "L",
7447 CODE_LABEL_NUMBER (xoperands[0]));
7448 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
7452 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
7453 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
7456 output_asm_insn ("blr %%r0,%%r2", xoperands);
7457 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
7461 /* Return the total length of the save and restore instructions needed for
7462 the data linkage table pointer (i.e., the PIC register) across the call
7463 instruction INSN. No-return calls do not require a save and restore.
7464 In addition, we may be able to avoid the save and restore for calls
7465 within the same translation unit. */
7468 attr_length_save_restore_dltp (rtx insn)
7470 if (find_reg_note (insn, REG_NORETURN, NULL_RTX))
7476 /* In HPUX 8.0's shared library scheme, special relocations are needed
7477 for function labels if they might be passed to a function
7478 in a shared library (because shared libraries don't live in code
7479 space), and special magic is needed to construct their address. */
7482 hppa_encode_label (rtx sym)
7484 const char *str = XSTR (sym, 0);
7485 int len = strlen (str) + 1;
7488 p = newstr = alloca (len + 1);
7492 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
7496 pa_encode_section_info (tree decl, rtx rtl, int first)
7498 default_encode_section_info (decl, rtl, first);
7500 if (first && TEXT_SPACE_P (decl))
7502 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
7503 if (TREE_CODE (decl) == FUNCTION_DECL)
7504 hppa_encode_label (XEXP (rtl, 0));
7508 /* This is sort of inverse to pa_encode_section_info. */
7511 pa_strip_name_encoding (const char *str)
7513 str += (*str == '@');
7514 str += (*str == '*');
7519 function_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7521 return GET_CODE (op) == SYMBOL_REF && FUNCTION_NAME_P (XSTR (op, 0));
7524 /* Returns 1 if OP is a function label involved in a simple addition
7525 with a constant. Used to keep certain patterns from matching
7526 during instruction combination. */
7528 is_function_label_plus_const (rtx op)
7530 /* Strip off any CONST. */
7531 if (GET_CODE (op) == CONST)
7534 return (GET_CODE (op) == PLUS
7535 && function_label_operand (XEXP (op, 0), Pmode)
7536 && GET_CODE (XEXP (op, 1)) == CONST_INT);
7539 /* Output assembly code for a thunk to FUNCTION. */
7542 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
7543 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
7546 static unsigned int current_thunk_number;
7547 int val_14 = VAL_14_BITS_P (delta);
7552 xoperands[0] = XEXP (DECL_RTL (function), 0);
7553 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
7554 xoperands[2] = GEN_INT (delta);
7556 ASM_OUTPUT_LABEL (file, XSTR (xoperands[1], 0));
7557 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
7559 /* Output the thunk. We know that the function is in the same
7560 translation unit (i.e., the same space) as the thunk, and that
7561 thunks are output after their method. Thus, we don't need an
7562 external branch to reach the function. With SOM and GAS,
7563 functions and thunks are effectively in different sections.
7564 Thus, we can always use a IA-relative branch and the linker
7565 will add a long branch stub if necessary.
7567 However, we have to be careful when generating PIC code on the
7568 SOM port to ensure that the sequence does not transfer to an
7569 import stub for the target function as this could clobber the
7570 return value saved at SP-24. This would also apply to the
7571 32-bit linux port if the multi-space model is implemented. */
7572 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7573 && !(flag_pic && TREE_PUBLIC (function))
7574 && (TARGET_GAS || last_address < 262132))
7575 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7576 && ((targetm.have_named_sections
7577 && DECL_SECTION_NAME (thunk_fndecl) != NULL
7578 /* The GNU 64-bit linker has rather poor stub management.
7579 So, we use a long branch from thunks that aren't in
7580 the same section as the target function. */
7582 && (DECL_SECTION_NAME (thunk_fndecl)
7583 != DECL_SECTION_NAME (function)))
7584 || ((DECL_SECTION_NAME (thunk_fndecl)
7585 == DECL_SECTION_NAME (function))
7586 && last_address < 262132)))
7587 || (!targetm.have_named_sections && last_address < 262132))))
7590 output_asm_insn ("addil L'%2,%%r26", xoperands);
7592 output_asm_insn ("b %0", xoperands);
7596 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7601 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7605 else if (TARGET_64BIT)
7607 /* We only have one call-clobbered scratch register, so we can't
7608 make use of the delay slot if delta doesn't fit in 14 bits. */
7611 output_asm_insn ("addil L'%2,%%r26", xoperands);
7612 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7615 output_asm_insn ("b,l .+8,%%r1", xoperands);
7619 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7620 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7624 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
7625 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
7630 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7631 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7636 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
7640 else if (TARGET_PORTABLE_RUNTIME)
7642 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7643 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
7646 output_asm_insn ("addil L'%2,%%r26", xoperands);
7648 output_asm_insn ("bv %%r0(%%r22)", xoperands);
7652 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7657 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7661 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
7663 /* The function is accessible from outside this module. The only
7664 way to avoid an import stub between the thunk and function is to
7665 call the function directly with an indirect sequence similar to
7666 that used by $$dyncall. This is possible because $$dyncall acts
7667 as the import stub in an indirect call. */
7668 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
7669 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
7670 output_asm_insn ("addil LT'%3,%%r19", xoperands);
7671 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
7672 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
7673 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
7674 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
7675 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
7676 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
7680 output_asm_insn ("addil L'%2,%%r26", xoperands);
7686 output_asm_insn ("bve (%%r22)", xoperands);
7689 else if (TARGET_NO_SPACE_REGS)
7691 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
7696 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
7697 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
7698 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
7703 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7705 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7709 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7711 if (TARGET_SOM || !TARGET_GAS)
7713 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
7714 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
7718 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7719 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
7723 output_asm_insn ("addil L'%2,%%r26", xoperands);
7725 output_asm_insn ("bv %%r0(%%r22)", xoperands);
7729 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7734 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7741 output_asm_insn ("addil L'%2,%%r26", xoperands);
7743 output_asm_insn ("ldil L'%0,%%r22", xoperands);
7744 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
7748 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7753 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7758 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
7760 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
7763 output_asm_insn (".align 4", xoperands);
7764 ASM_OUTPUT_LABEL (file, label);
7765 output_asm_insn (".word P'%0", xoperands);
7767 else if (TARGET_SOM && TARGET_GAS)
7770 current_thunk_number++;
7771 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
7772 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
7773 last_address += nbytes;
7774 update_total_code_bytes (nbytes);
7777 /* Only direct calls to static functions are allowed to be sibling (tail)
7780 This restriction is necessary because some linker generated stubs will
7781 store return pointers into rp' in some cases which might clobber a
7782 live value already in rp'.
7784 In a sibcall the current function and the target function share stack
7785 space. Thus if the path to the current function and the path to the
7786 target function save a value in rp', they save the value into the
7787 same stack slot, which has undesirable consequences.
7789 Because of the deferred binding nature of shared libraries any function
7790 with external scope could be in a different load module and thus require
7791 rp' to be saved when calling that function. So sibcall optimizations
7792 can only be safe for static function.
7794 Note that GCC never needs return value relocations, so we don't have to
7795 worry about static calls with return value relocations (which require
7798 It is safe to perform a sibcall optimization when the target function
7799 will never return. */
7801 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
7803 if (TARGET_PORTABLE_RUNTIME)
7806 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
7807 single subspace mode and the call is not indirect. As far as I know,
7808 there is no operating system support for the multiple subspace mode.
7809 It might be possible to support indirect calls if we didn't use
7810 $$dyncall (see the indirect sequence generated in output_call). */
7812 return (decl != NULL_TREE);
7814 /* Sibcalls are not ok because the arg pointer register is not a fixed
7815 register. This prevents the sibcall optimization from occurring. In
7816 addition, there are problems with stub placement using GNU ld. This
7817 is because a normal sibcall branch uses a 17-bit relocation while
7818 a regular call branch uses a 22-bit relocation. As a result, more
7819 care needs to be taken in the placement of long-branch stubs. */
7823 /* Sibcalls are only ok within a translation unit. */
7824 return (decl && !TREE_PUBLIC (decl));
7827 /* ??? Addition is not commutative on the PA due to the weird implicit
7828 space register selection rules for memory addresses. Therefore, we
7829 don't consider a + b == b + a, as this might be inside a MEM. */
7831 pa_commutative_p (rtx x, int outer_code)
7833 return (COMMUTATIVE_P (x)
7834 && ((outer_code != UNKNOWN && outer_code != MEM)
7835 || GET_CODE (x) != PLUS));
7838 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
7839 use in fmpyadd instructions. */
7841 fmpyaddoperands (rtx *operands)
7843 enum machine_mode mode = GET_MODE (operands[0]);
7845 /* Must be a floating point mode. */
7846 if (mode != SFmode && mode != DFmode)
7849 /* All modes must be the same. */
7850 if (! (mode == GET_MODE (operands[1])
7851 && mode == GET_MODE (operands[2])
7852 && mode == GET_MODE (operands[3])
7853 && mode == GET_MODE (operands[4])
7854 && mode == GET_MODE (operands[5])))
7857 /* All operands must be registers. */
7858 if (! (GET_CODE (operands[1]) == REG
7859 && GET_CODE (operands[2]) == REG
7860 && GET_CODE (operands[3]) == REG
7861 && GET_CODE (operands[4]) == REG
7862 && GET_CODE (operands[5]) == REG))
7865 /* Only 2 real operands to the addition. One of the input operands must
7866 be the same as the output operand. */
7867 if (! rtx_equal_p (operands[3], operands[4])
7868 && ! rtx_equal_p (operands[3], operands[5]))
7871 /* Inout operand of add cannot conflict with any operands from multiply. */
7872 if (rtx_equal_p (operands[3], operands[0])
7873 || rtx_equal_p (operands[3], operands[1])
7874 || rtx_equal_p (operands[3], operands[2]))
7877 /* multiply cannot feed into addition operands. */
7878 if (rtx_equal_p (operands[4], operands[0])
7879 || rtx_equal_p (operands[5], operands[0]))
7882 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
7884 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
7885 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
7886 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
7887 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
7888 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
7889 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
7892 /* Passed. Operands are suitable for fmpyadd. */
7896 #if !defined(USE_COLLECT2)
7898 pa_asm_out_constructor (rtx symbol, int priority)
7900 if (!function_label_operand (symbol, VOIDmode))
7901 hppa_encode_label (symbol);
7903 #ifdef CTORS_SECTION_ASM_OP
7904 default_ctor_section_asm_out_constructor (symbol, priority);
7906 # ifdef TARGET_ASM_NAMED_SECTION
7907 default_named_section_asm_out_constructor (symbol, priority);
7909 default_stabs_asm_out_constructor (symbol, priority);
7915 pa_asm_out_destructor (rtx symbol, int priority)
7917 if (!function_label_operand (symbol, VOIDmode))
7918 hppa_encode_label (symbol);
7920 #ifdef DTORS_SECTION_ASM_OP
7921 default_dtor_section_asm_out_destructor (symbol, priority);
7923 # ifdef TARGET_ASM_NAMED_SECTION
7924 default_named_section_asm_out_destructor (symbol, priority);
7926 default_stabs_asm_out_destructor (symbol, priority);
7932 /* This function places uninitialized global data in the bss section.
7933 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
7934 function on the SOM port to prevent uninitialized global data from
7935 being placed in the data section. */
7938 pa_asm_output_aligned_bss (FILE *stream,
7940 unsigned HOST_WIDE_INT size,
7944 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
7946 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
7947 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
7950 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
7951 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7954 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
7955 ASM_OUTPUT_LABEL (stream, name);
7956 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
7959 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
7960 that doesn't allow the alignment of global common storage to be directly
7961 specified. The SOM linker aligns common storage based on the rounded
7962 value of the NUM_BYTES parameter in the .comm directive. It's not
7963 possible to use the .align directive as it doesn't affect the alignment
7964 of the label associated with a .comm directive. */
7967 pa_asm_output_aligned_common (FILE *stream,
7969 unsigned HOST_WIDE_INT size,
7972 unsigned int max_common_align;
7974 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
7975 if (align > max_common_align)
7977 warning (0, "alignment (%u) for %s exceeds maximum alignment "
7978 "for global common data. Using %u",
7979 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
7980 align = max_common_align;
7985 assemble_name (stream, name);
7986 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
7987 MAX (size, align / BITS_PER_UNIT));
7990 /* We can't use .comm for local common storage as the SOM linker effectively
7991 treats the symbol as universal and uses the same storage for local symbols
7992 with the same name in different object files. The .block directive
7993 reserves an uninitialized block of storage. However, it's not common
7994 storage. Fortunately, GCC never requests common storage with the same
7995 name in any given translation unit. */
7998 pa_asm_output_aligned_local (FILE *stream,
8000 unsigned HOST_WIDE_INT size,
8004 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8007 fprintf (stream, "%s", LOCAL_ASM_OP);
8008 assemble_name (stream, name);
8009 fprintf (stream, "\n");
8012 ASM_OUTPUT_LABEL (stream, name);
8013 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8016 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8017 use in fmpysub instructions. */
8019 fmpysuboperands (rtx *operands)
8021 enum machine_mode mode = GET_MODE (operands[0]);
8023 /* Must be a floating point mode. */
8024 if (mode != SFmode && mode != DFmode)
8027 /* All modes must be the same. */
8028 if (! (mode == GET_MODE (operands[1])
8029 && mode == GET_MODE (operands[2])
8030 && mode == GET_MODE (operands[3])
8031 && mode == GET_MODE (operands[4])
8032 && mode == GET_MODE (operands[5])))
8035 /* All operands must be registers. */
8036 if (! (GET_CODE (operands[1]) == REG
8037 && GET_CODE (operands[2]) == REG
8038 && GET_CODE (operands[3]) == REG
8039 && GET_CODE (operands[4]) == REG
8040 && GET_CODE (operands[5]) == REG))
8043 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8044 operation, so operands[4] must be the same as operand[3]. */
8045 if (! rtx_equal_p (operands[3], operands[4]))
8048 /* multiply cannot feed into subtraction. */
8049 if (rtx_equal_p (operands[5], operands[0]))
8052 /* Inout operand of sub cannot conflict with any operands from multiply. */
8053 if (rtx_equal_p (operands[3], operands[0])
8054 || rtx_equal_p (operands[3], operands[1])
8055 || rtx_equal_p (operands[3], operands[2]))
8058 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8060 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8061 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8062 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8063 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8064 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8065 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8068 /* Passed. Operands are suitable for fmpysub. */
8072 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8073 constants for shadd instructions. */
8075 shadd_constant_p (int val)
8077 if (val == 2 || val == 4 || val == 8)
8083 /* Return 1 if OP is valid as a base or index register in a
8087 borx_reg_operand (rtx op, enum machine_mode mode)
8089 if (GET_CODE (op) != REG)
8092 /* We must reject virtual registers as the only expressions that
8093 can be instantiated are REG and REG+CONST. */
8094 if (op == virtual_incoming_args_rtx
8095 || op == virtual_stack_vars_rtx
8096 || op == virtual_stack_dynamic_rtx
8097 || op == virtual_outgoing_args_rtx
8098 || op == virtual_cfa_rtx)
8101 /* While it's always safe to index off the frame pointer, it's not
8102 profitable to do so when the frame pointer is being eliminated. */
8103 if (!reload_completed
8104 && flag_omit_frame_pointer
8105 && !current_function_calls_alloca
8106 && op == frame_pointer_rtx)
8109 return register_operand (op, mode);
8112 /* Return 1 if this operand is anything other than a hard register. */
8115 non_hard_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8117 return ! (GET_CODE (op) == REG && REGNO (op) < FIRST_PSEUDO_REGISTER);
8120 /* Return 1 if INSN branches forward. Should be using insn_addresses
8121 to avoid walking through all the insns... */
8123 forward_branch_p (rtx insn)
8125 rtx label = JUMP_LABEL (insn);
8132 insn = NEXT_INSN (insn);
8135 return (insn == label);
8138 /* Return 1 if OP is an equality comparison, else return 0. */
8140 eq_neq_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8142 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
8145 /* Return 1 if INSN is in the delay slot of a call instruction. */
8147 jump_in_call_delay (rtx insn)
8150 if (GET_CODE (insn) != JUMP_INSN)
8153 if (PREV_INSN (insn)
8154 && PREV_INSN (PREV_INSN (insn))
8155 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8157 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8159 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8160 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8167 /* Output an unconditional move and branch insn. */
8170 output_parallel_movb (rtx *operands, int length)
8172 /* These are the cases in which we win. */
8174 return "mov%I1b,tr %1,%0,%2";
8176 /* None of these cases wins, but they don't lose either. */
8177 if (dbr_sequence_length () == 0)
8179 /* Nothing in the delay slot, fake it by putting the combined
8180 insn (the copy or add) in the delay slot of a bl. */
8181 if (GET_CODE (operands[1]) == CONST_INT)
8182 return "b %2\n\tldi %1,%0";
8184 return "b %2\n\tcopy %1,%0";
8188 /* Something in the delay slot, but we've got a long branch. */
8189 if (GET_CODE (operands[1]) == CONST_INT)
8190 return "ldi %1,%0\n\tb %2";
8192 return "copy %1,%0\n\tb %2";
8196 /* Output an unconditional add and branch insn. */
8199 output_parallel_addb (rtx *operands, int length)
8201 /* To make life easy we want operand0 to be the shared input/output
8202 operand and operand1 to be the readonly operand. */
8203 if (operands[0] == operands[1])
8204 operands[1] = operands[2];
8206 /* These are the cases in which we win. */
8208 return "add%I1b,tr %1,%0,%3";
8210 /* None of these cases win, but they don't lose either. */
8211 if (dbr_sequence_length () == 0)
8213 /* Nothing in the delay slot, fake it by putting the combined
8214 insn (the copy or add) in the delay slot of a bl. */
8215 return "b %3\n\tadd%I1 %1,%0,%0";
8219 /* Something in the delay slot, but we've got a long branch. */
8220 return "add%I1 %1,%0,%0\n\tb %3";
8224 /* Return nonzero if INSN (a jump insn) immediately follows a call
8225 to a named function. This is used to avoid filling the delay slot
8226 of the jump since it can usually be eliminated by modifying RP in
8227 the delay slot of the call. */
8230 following_call (rtx insn)
8232 if (! TARGET_JUMP_IN_DELAY)
8235 /* Find the previous real insn, skipping NOTEs. */
8236 insn = PREV_INSN (insn);
8237 while (insn && GET_CODE (insn) == NOTE)
8238 insn = PREV_INSN (insn);
8240 /* Check for CALL_INSNs and millicode calls. */
8242 && ((GET_CODE (insn) == CALL_INSN
8243 && get_attr_type (insn) != TYPE_DYNCALL)
8244 || (GET_CODE (insn) == INSN
8245 && GET_CODE (PATTERN (insn)) != SEQUENCE
8246 && GET_CODE (PATTERN (insn)) != USE
8247 && GET_CODE (PATTERN (insn)) != CLOBBER
8248 && get_attr_type (insn) == TYPE_MILLI)))
8254 /* We use this hook to perform a PA specific optimization which is difficult
8255 to do in earlier passes.
8257 We want the delay slots of branches within jump tables to be filled.
8258 None of the compiler passes at the moment even has the notion that a
8259 PA jump table doesn't contain addresses, but instead contains actual
8262 Because we actually jump into the table, the addresses of each entry
8263 must stay constant in relation to the beginning of the table (which
8264 itself must stay constant relative to the instruction to jump into
8265 it). I don't believe we can guarantee earlier passes of the compiler
8266 will adhere to those rules.
8268 So, late in the compilation process we find all the jump tables, and
8269 expand them into real code -- e.g. each entry in the jump table vector
8270 will get an appropriate label followed by a jump to the final target.
8272 Reorg and the final jump pass can then optimize these branches and
8273 fill their delay slots. We end up with smaller, more efficient code.
8275 The jump instructions within the table are special; we must be able
8276 to identify them during assembly output (if the jumps don't get filled
8277 we need to emit a nop rather than nullifying the delay slot)). We
8278 identify jumps in switch tables by using insns with the attribute
8279 type TYPE_BTABLE_BRANCH.
8281 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8282 insns. This serves two purposes, first it prevents jump.c from
8283 noticing that the last N entries in the table jump to the instruction
8284 immediately after the table and deleting the jumps. Second, those
8285 insns mark where we should emit .begin_brtab and .end_brtab directives
8286 when using GAS (allows for better link time optimizations). */
8293 remove_useless_addtr_insns (1);
8295 if (pa_cpu < PROCESSOR_8000)
8296 pa_combine_instructions ();
8299 /* This is fairly cheap, so always run it if optimizing. */
8300 if (optimize > 0 && !TARGET_BIG_SWITCH)
8302 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8303 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8305 rtx pattern, tmp, location, label;
8306 unsigned int length, i;
8308 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8309 if (GET_CODE (insn) != JUMP_INSN
8310 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8311 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8314 /* Emit marker for the beginning of the branch table. */
8315 emit_insn_before (gen_begin_brtab (), insn);
8317 pattern = PATTERN (insn);
8318 location = PREV_INSN (insn);
8319 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
8321 for (i = 0; i < length; i++)
8323 /* Emit a label before each jump to keep jump.c from
8324 removing this code. */
8325 tmp = gen_label_rtx ();
8326 LABEL_NUSES (tmp) = 1;
8327 emit_label_after (tmp, location);
8328 location = NEXT_INSN (location);
8330 if (GET_CODE (pattern) == ADDR_VEC)
8331 label = XEXP (XVECEXP (pattern, 0, i), 0);
8333 label = XEXP (XVECEXP (pattern, 1, i), 0);
8335 tmp = gen_short_jump (label);
8337 /* Emit the jump itself. */
8338 tmp = emit_jump_insn_after (tmp, location);
8339 JUMP_LABEL (tmp) = label;
8340 LABEL_NUSES (label)++;
8341 location = NEXT_INSN (location);
8343 /* Emit a BARRIER after the jump. */
8344 emit_barrier_after (location);
8345 location = NEXT_INSN (location);
8348 /* Emit marker for the end of the branch table. */
8349 emit_insn_before (gen_end_brtab (), location);
8350 location = NEXT_INSN (location);
8351 emit_barrier_after (location);
8353 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8359 /* Still need brtab marker insns. FIXME: the presence of these
8360 markers disables output of the branch table to readonly memory,
8361 and any alignment directives that might be needed. Possibly,
8362 the begin_brtab insn should be output before the label for the
8363 table. This doesn't matter at the moment since the tables are
8364 always output in the text section. */
8365 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8367 /* Find an ADDR_VEC insn. */
8368 if (GET_CODE (insn) != JUMP_INSN
8369 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8370 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8373 /* Now generate markers for the beginning and end of the
8375 emit_insn_before (gen_begin_brtab (), insn);
8376 emit_insn_after (gen_end_brtab (), insn);
8381 /* The PA has a number of odd instructions which can perform multiple
8382 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8383 it may be profitable to combine two instructions into one instruction
8384 with two outputs. It's not profitable PA2.0 machines because the
8385 two outputs would take two slots in the reorder buffers.
8387 This routine finds instructions which can be combined and combines
8388 them. We only support some of the potential combinations, and we
8389 only try common ways to find suitable instructions.
8391 * addb can add two registers or a register and a small integer
8392 and jump to a nearby (+-8k) location. Normally the jump to the
8393 nearby location is conditional on the result of the add, but by
8394 using the "true" condition we can make the jump unconditional.
8395 Thus addb can perform two independent operations in one insn.
8397 * movb is similar to addb in that it can perform a reg->reg
8398 or small immediate->reg copy and jump to a nearby (+-8k location).
8400 * fmpyadd and fmpysub can perform a FP multiply and either an
8401 FP add or FP sub if the operands of the multiply and add/sub are
8402 independent (there are other minor restrictions). Note both
8403 the fmpy and fadd/fsub can in theory move to better spots according
8404 to data dependencies, but for now we require the fmpy stay at a
8407 * Many of the memory operations can perform pre & post updates
8408 of index registers. GCC's pre/post increment/decrement addressing
8409 is far too simple to take advantage of all the possibilities. This
8410 pass may not be suitable since those insns may not be independent.
8412 * comclr can compare two ints or an int and a register, nullify
8413 the following instruction and zero some other register. This
8414 is more difficult to use as it's harder to find an insn which
8415 will generate a comclr than finding something like an unconditional
8416 branch. (conditional moves & long branches create comclr insns).
8418 * Most arithmetic operations can conditionally skip the next
8419 instruction. They can be viewed as "perform this operation
8420 and conditionally jump to this nearby location" (where nearby
8421 is an insns away). These are difficult to use due to the
8422 branch length restrictions. */
8425 pa_combine_instructions (void)
8429 /* This can get expensive since the basic algorithm is on the
8430 order of O(n^2) (or worse). Only do it for -O2 or higher
8431 levels of optimization. */
8435 /* Walk down the list of insns looking for "anchor" insns which
8436 may be combined with "floating" insns. As the name implies,
8437 "anchor" instructions don't move, while "floating" insns may
8439 new = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
8440 new = make_insn_raw (new);
8442 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
8444 enum attr_pa_combine_type anchor_attr;
8445 enum attr_pa_combine_type floater_attr;
8447 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8448 Also ignore any special USE insns. */
8449 if ((GET_CODE (anchor) != INSN
8450 && GET_CODE (anchor) != JUMP_INSN
8451 && GET_CODE (anchor) != CALL_INSN)
8452 || GET_CODE (PATTERN (anchor)) == USE
8453 || GET_CODE (PATTERN (anchor)) == CLOBBER
8454 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
8455 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
8458 anchor_attr = get_attr_pa_combine_type (anchor);
8459 /* See if anchor is an insn suitable for combination. */
8460 if (anchor_attr == PA_COMBINE_TYPE_FMPY
8461 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
8462 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8463 && ! forward_branch_p (anchor)))
8467 for (floater = PREV_INSN (anchor);
8469 floater = PREV_INSN (floater))
8471 if (GET_CODE (floater) == NOTE
8472 || (GET_CODE (floater) == INSN
8473 && (GET_CODE (PATTERN (floater)) == USE
8474 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8477 /* Anything except a regular INSN will stop our search. */
8478 if (GET_CODE (floater) != INSN
8479 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8480 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8486 /* See if FLOATER is suitable for combination with the
8488 floater_attr = get_attr_pa_combine_type (floater);
8489 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8490 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8491 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8492 && floater_attr == PA_COMBINE_TYPE_FMPY))
8494 /* If ANCHOR and FLOATER can be combined, then we're
8495 done with this pass. */
8496 if (pa_can_combine_p (new, anchor, floater, 0,
8497 SET_DEST (PATTERN (floater)),
8498 XEXP (SET_SRC (PATTERN (floater)), 0),
8499 XEXP (SET_SRC (PATTERN (floater)), 1)))
8503 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8504 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
8506 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
8508 if (pa_can_combine_p (new, anchor, floater, 0,
8509 SET_DEST (PATTERN (floater)),
8510 XEXP (SET_SRC (PATTERN (floater)), 0),
8511 XEXP (SET_SRC (PATTERN (floater)), 1)))
8516 if (pa_can_combine_p (new, anchor, floater, 0,
8517 SET_DEST (PATTERN (floater)),
8518 SET_SRC (PATTERN (floater)),
8519 SET_SRC (PATTERN (floater))))
8525 /* If we didn't find anything on the backwards scan try forwards. */
8527 && (anchor_attr == PA_COMBINE_TYPE_FMPY
8528 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
8530 for (floater = anchor; floater; floater = NEXT_INSN (floater))
8532 if (GET_CODE (floater) == NOTE
8533 || (GET_CODE (floater) == INSN
8534 && (GET_CODE (PATTERN (floater)) == USE
8535 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8539 /* Anything except a regular INSN will stop our search. */
8540 if (GET_CODE (floater) != INSN
8541 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8542 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8548 /* See if FLOATER is suitable for combination with the
8550 floater_attr = get_attr_pa_combine_type (floater);
8551 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8552 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8553 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8554 && floater_attr == PA_COMBINE_TYPE_FMPY))
8556 /* If ANCHOR and FLOATER can be combined, then we're
8557 done with this pass. */
8558 if (pa_can_combine_p (new, anchor, floater, 1,
8559 SET_DEST (PATTERN (floater)),
8560 XEXP (SET_SRC (PATTERN (floater)),
8562 XEXP (SET_SRC (PATTERN (floater)),
8569 /* FLOATER will be nonzero if we found a suitable floating
8570 insn for combination with ANCHOR. */
8572 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8573 || anchor_attr == PA_COMBINE_TYPE_FMPY))
8575 /* Emit the new instruction and delete the old anchor. */
8576 emit_insn_before (gen_rtx_PARALLEL
8578 gen_rtvec (2, PATTERN (anchor),
8579 PATTERN (floater))),
8582 PUT_CODE (anchor, NOTE);
8583 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8584 NOTE_SOURCE_FILE (anchor) = 0;
8586 /* Emit a special USE insn for FLOATER, then delete
8587 the floating insn. */
8588 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8589 delete_insn (floater);
8594 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
8597 /* Emit the new_jump instruction and delete the old anchor. */
8599 = emit_jump_insn_before (gen_rtx_PARALLEL
8601 gen_rtvec (2, PATTERN (anchor),
8602 PATTERN (floater))),
8605 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
8606 PUT_CODE (anchor, NOTE);
8607 NOTE_LINE_NUMBER (anchor) = NOTE_INSN_DELETED;
8608 NOTE_SOURCE_FILE (anchor) = 0;
8610 /* Emit a special USE insn for FLOATER, then delete
8611 the floating insn. */
8612 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
8613 delete_insn (floater);
8621 pa_can_combine_p (rtx new, rtx anchor, rtx floater, int reversed, rtx dest,
8624 int insn_code_number;
8627 /* Create a PARALLEL with the patterns of ANCHOR and
8628 FLOATER, try to recognize it, then test constraints
8629 for the resulting pattern.
8631 If the pattern doesn't match or the constraints
8632 aren't met keep searching for a suitable floater
8634 XVECEXP (PATTERN (new), 0, 0) = PATTERN (anchor);
8635 XVECEXP (PATTERN (new), 0, 1) = PATTERN (floater);
8636 INSN_CODE (new) = -1;
8637 insn_code_number = recog_memoized (new);
8638 if (insn_code_number < 0
8639 || (extract_insn (new), ! constrain_operands (1)))
8653 /* There's up to three operands to consider. One
8654 output and two inputs.
8656 The output must not be used between FLOATER & ANCHOR
8657 exclusive. The inputs must not be set between
8658 FLOATER and ANCHOR exclusive. */
8660 if (reg_used_between_p (dest, start, end))
8663 if (reg_set_between_p (src1, start, end))
8666 if (reg_set_between_p (src2, start, end))
8669 /* If we get here, then everything is good. */
8673 /* Return nonzero if references for INSN are delayed.
8675 Millicode insns are actually function calls with some special
8676 constraints on arguments and register usage.
8678 Millicode calls always expect their arguments in the integer argument
8679 registers, and always return their result in %r29 (ret1). They
8680 are expected to clobber their arguments, %r1, %r29, and the return
8681 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
8683 This function tells reorg that the references to arguments and
8684 millicode calls do not appear to happen until after the millicode call.
8685 This allows reorg to put insns which set the argument registers into the
8686 delay slot of the millicode call -- thus they act more like traditional
8689 Note we cannot consider side effects of the insn to be delayed because
8690 the branch and link insn will clobber the return pointer. If we happened
8691 to use the return pointer in the delay slot of the call, then we lose.
8693 get_attr_type will try to recognize the given insn, so make sure to
8694 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
8697 insn_refs_are_delayed (rtx insn)
8699 return ((GET_CODE (insn) == INSN
8700 && GET_CODE (PATTERN (insn)) != SEQUENCE
8701 && GET_CODE (PATTERN (insn)) != USE
8702 && GET_CODE (PATTERN (insn)) != CLOBBER
8703 && get_attr_type (insn) == TYPE_MILLI));
8706 /* On the HP-PA the value is found in register(s) 28(-29), unless
8707 the mode is SF or DF. Then the value is returned in fr4 (32).
8709 This must perform the same promotions as PROMOTE_MODE, else
8710 TARGET_PROMOTE_FUNCTION_RETURN will not work correctly.
8712 Small structures must be returned in a PARALLEL on PA64 in order
8713 to match the HP Compiler ABI. */
8716 function_value (tree valtype, tree func ATTRIBUTE_UNUSED)
8718 enum machine_mode valmode;
8720 if (AGGREGATE_TYPE_P (valtype))
8724 /* Aggregates with a size less than or equal to 128 bits are
8725 returned in GR 28(-29). They are left justified. The pad
8726 bits are undefined. Larger aggregates are returned in
8730 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
8732 for (i = 0; i < ub; i++)
8734 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
8735 gen_rtx_REG (DImode, 28 + i),
8740 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
8742 else if (int_size_in_bytes (valtype) > UNITS_PER_WORD)
8744 /* Aggregates 5 to 8 bytes in size are returned in general
8745 registers r28-r29 in the same manner as other non
8746 floating-point objects. The data is right-justified and
8747 zero-extended to 64 bits. This is opposite to the normal
8748 justification used on big endian targets and requires
8749 special treatment. */
8750 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
8751 gen_rtx_REG (DImode, 28), const0_rtx);
8752 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
8756 if ((INTEGRAL_TYPE_P (valtype)
8757 && TYPE_PRECISION (valtype) < BITS_PER_WORD)
8758 || POINTER_TYPE_P (valtype))
8759 valmode = word_mode;
8761 valmode = TYPE_MODE (valtype);
8763 if (TREE_CODE (valtype) == REAL_TYPE
8764 && !AGGREGATE_TYPE_P (valtype)
8765 && TYPE_MODE (valtype) != TFmode
8766 && !TARGET_SOFT_FLOAT)
8767 return gen_rtx_REG (valmode, 32);
8769 return gen_rtx_REG (valmode, 28);
8772 /* Return the location of a parameter that is passed in a register or NULL
8773 if the parameter has any component that is passed in memory.
8775 This is new code and will be pushed to into the net sources after
8778 ??? We might want to restructure this so that it looks more like other
8781 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
8782 int named ATTRIBUTE_UNUSED)
8784 int max_arg_words = (TARGET_64BIT ? 8 : 4);
8791 if (mode == VOIDmode)
8794 arg_size = FUNCTION_ARG_SIZE (mode, type);
8796 /* If this arg would be passed partially or totally on the stack, then
8797 this routine should return zero. pa_arg_partial_bytes will
8798 handle arguments which are split between regs and stack slots if
8799 the ABI mandates split arguments. */
8802 /* The 32-bit ABI does not split arguments. */
8803 if (cum->words + arg_size > max_arg_words)
8809 alignment = cum->words & 1;
8810 if (cum->words + alignment >= max_arg_words)
8814 /* The 32bit ABIs and the 64bit ABIs are rather different,
8815 particularly in their handling of FP registers. We might
8816 be able to cleverly share code between them, but I'm not
8817 going to bother in the hope that splitting them up results
8818 in code that is more easily understood. */
8822 /* Advance the base registers to their current locations.
8824 Remember, gprs grow towards smaller register numbers while
8825 fprs grow to higher register numbers. Also remember that
8826 although FP regs are 32-bit addressable, we pretend that
8827 the registers are 64-bits wide. */
8828 gpr_reg_base = 26 - cum->words;
8829 fpr_reg_base = 32 + cum->words;
8831 /* Arguments wider than one word and small aggregates need special
8835 || (type && AGGREGATE_TYPE_P (type)))
8837 /* Double-extended precision (80-bit), quad-precision (128-bit)
8838 and aggregates including complex numbers are aligned on
8839 128-bit boundaries. The first eight 64-bit argument slots
8840 are associated one-to-one, with general registers r26
8841 through r19, and also with floating-point registers fr4
8842 through fr11. Arguments larger than one word are always
8843 passed in general registers.
8845 Using a PARALLEL with a word mode register results in left
8846 justified data on a big-endian target. */
8849 int i, offset = 0, ub = arg_size;
8851 /* Align the base register. */
8852 gpr_reg_base -= alignment;
8854 ub = MIN (ub, max_arg_words - cum->words - alignment);
8855 for (i = 0; i < ub; i++)
8857 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
8858 gen_rtx_REG (DImode, gpr_reg_base),
8864 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
8869 /* If the argument is larger than a word, then we know precisely
8870 which registers we must use. */
8884 /* Structures 5 to 8 bytes in size are passed in the general
8885 registers in the same manner as other non floating-point
8886 objects. The data is right-justified and zero-extended
8887 to 64 bits. This is opposite to the normal justification
8888 used on big endian targets and requires special treatment.
8889 We now define BLOCK_REG_PADDING to pad these objects. */
8890 if (mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
8892 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
8893 gen_rtx_REG (DImode, gpr_reg_base),
8895 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
8900 /* We have a single word (32 bits). A simple computation
8901 will get us the register #s we need. */
8902 gpr_reg_base = 26 - cum->words;
8903 fpr_reg_base = 32 + 2 * cum->words;
8907 /* Determine if the argument needs to be passed in both general and
8908 floating point registers. */
8909 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
8910 /* If we are doing soft-float with portable runtime, then there
8911 is no need to worry about FP regs. */
8912 && !TARGET_SOFT_FLOAT
8913 /* The parameter must be some kind of float, else we can just
8914 pass it in integer registers. */
8915 && FLOAT_MODE_P (mode)
8916 /* The target function must not have a prototype. */
8917 && cum->nargs_prototype <= 0
8918 /* libcalls do not need to pass items in both FP and general
8920 && type != NULL_TREE
8921 /* All this hair applies to "outgoing" args only. This includes
8922 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
8924 /* Also pass outgoing floating arguments in both registers in indirect
8925 calls with the 32 bit ABI and the HP assembler since there is no
8926 way to the specify argument locations in static functions. */
8931 && FLOAT_MODE_P (mode)))
8937 gen_rtx_EXPR_LIST (VOIDmode,
8938 gen_rtx_REG (mode, fpr_reg_base),
8940 gen_rtx_EXPR_LIST (VOIDmode,
8941 gen_rtx_REG (mode, gpr_reg_base),
8946 /* See if we should pass this parameter in a general register. */
8947 if (TARGET_SOFT_FLOAT
8948 /* Indirect calls in the normal 32bit ABI require all arguments
8949 to be passed in general registers. */
8950 || (!TARGET_PORTABLE_RUNTIME
8954 /* If the parameter is not a floating point parameter, then
8955 it belongs in GPRs. */
8956 || !FLOAT_MODE_P (mode)
8957 /* Structure with single SFmode field belongs in GPR. */
8958 || (type && AGGREGATE_TYPE_P (type)))
8959 retval = gen_rtx_REG (mode, gpr_reg_base);
8961 retval = gen_rtx_REG (mode, fpr_reg_base);
8967 /* If this arg would be passed totally in registers or totally on the stack,
8968 then this routine should return zero. */
8971 pa_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
8972 tree type, bool named ATTRIBUTE_UNUSED)
8974 unsigned int max_arg_words = 8;
8975 unsigned int offset = 0;
8980 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
8983 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
8984 /* Arg fits fully into registers. */
8986 else if (cum->words + offset >= max_arg_words)
8987 /* Arg fully on the stack. */
8991 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
8995 /* Return a string to output before text in the current function.
8997 This function is only used with SOM. Because we don't support
8998 named subspaces, we can only create a new subspace or switch back
8999 to the default text subspace. */
9001 som_text_section_asm_op (void)
9008 if (cfun && !cfun->machine->in_nsubspa)
9010 /* We only want to emit a .nsubspa directive once at the
9011 start of the function. */
9012 cfun->machine->in_nsubspa = 1;
9014 /* Create a new subspace for the text. This provides
9015 better stub placement and one-only functions. */
9017 && DECL_ONE_ONLY (cfun->decl)
9018 && !DECL_WEAK (cfun->decl))
9020 "\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,SORT=24,COMDAT";
9022 return "\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$";
9026 /* There isn't a current function or the body of the current
9027 function has been completed. So, we are changing to the
9028 text section to output debugging information. Do this in
9029 the default text section. We need to forget that we are
9030 in the text section so that the function text_section in
9031 varasm.c will call us the next time around. */
9036 return "\t.SPACE $TEXT$\n\t.SUBSPA $CODE$";
9039 /* On hpux10, the linker will give an error if we have a reference
9040 in the read-only data section to a symbol defined in a shared
9041 library. Therefore, expressions that might require a reloc can
9042 not be placed in the read-only data section. */
9045 pa_select_section (tree exp, int reloc,
9046 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9048 if (TREE_CODE (exp) == VAR_DECL
9049 && TREE_READONLY (exp)
9050 && !TREE_THIS_VOLATILE (exp)
9051 && DECL_INITIAL (exp)
9052 && (DECL_INITIAL (exp) == error_mark_node
9053 || TREE_CONSTANT (DECL_INITIAL (exp)))
9057 && DECL_ONE_ONLY (exp)
9058 && !DECL_WEAK (exp))
9059 som_one_only_readonly_data_section ();
9061 readonly_data_section ();
9063 else if (CONSTANT_CLASS_P (exp) && !reloc)
9064 readonly_data_section ();
9066 && TREE_CODE (exp) == VAR_DECL
9067 && DECL_ONE_ONLY (exp)
9068 && !DECL_WEAK (exp))
9069 som_one_only_data_section ();
9075 pa_globalize_label (FILE *stream, const char *name)
9077 /* We only handle DATA objects here, functions are globalized in
9078 ASM_DECLARE_FUNCTION_NAME. */
9079 if (! FUNCTION_NAME_P (name))
9081 fputs ("\t.EXPORT ", stream);
9082 assemble_name (stream, name);
9083 fputs (",DATA\n", stream);
9087 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9090 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9091 int incoming ATTRIBUTE_UNUSED)
9093 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9096 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9099 pa_return_in_memory (tree type, tree fntype ATTRIBUTE_UNUSED)
9101 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9102 PA64 ABI says that objects larger than 128 bits are returned in memory.
9103 Note, int_size_in_bytes can return -1 if the size of the object is
9104 variable or larger than the maximum value that can be expressed as
9105 a HOST_WIDE_INT. It can also return zero for an empty type. The
9106 simplest way to handle variable and empty types is to pass them in
9107 memory. This avoids problems in defining the boundaries of argument
9108 slots, allocating registers, etc. */
9109 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9110 || int_size_in_bytes (type) <= 0);
9113 /* Structure to hold declaration and name of external symbols that are
9114 emitted by GCC. We generate a vector of these symbols and output them
9115 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9116 This avoids putting out names that are never really used. */
9118 typedef struct extern_symbol GTY(())
9124 /* Define gc'd vector type for extern_symbol. */
9125 DEF_VEC_O(extern_symbol);
9126 DEF_VEC_ALLOC_O(extern_symbol,gc);
9128 /* Vector of extern_symbol pointers. */
9129 static GTY(()) VEC(extern_symbol,gc) *extern_symbols;
9131 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9132 /* Mark DECL (name NAME) as an external reference (assembler output
9133 file FILE). This saves the names to output at the end of the file
9134 if actually referenced. */
9137 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9139 extern_symbol * p = VEC_safe_push (extern_symbol, gc, extern_symbols, NULL);
9141 gcc_assert (file == asm_out_file);
9146 /* Output text required at the end of an assembler file.
9147 This includes deferred plabels and .import directives for
9148 all external symbols that were actually referenced. */
9151 pa_hpux_file_end (void)
9156 output_deferred_plabels ();
9158 for (i = 0; VEC_iterate (extern_symbol, extern_symbols, i, p); i++)
9160 tree decl = p->decl;
9162 if (!TREE_ASM_WRITTEN (decl)
9163 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9164 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9167 VEC_free (extern_symbol, gc, extern_symbols);