1 /* Subroutines for insn-output.c for HPPA.
2 Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Tim Moore (moore@cs.utah.edu), based on sparc.c
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3, or (at your option)
14 GCC is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
41 #include "integrate.h"
49 #include "target-def.h"
52 /* Return nonzero if there is a bypass for the output of
53 OUT_INSN and the fp store IN_INSN. */
55 hppa_fpstore_bypass_p (rtx out_insn, rtx in_insn)
57 enum machine_mode store_mode;
58 enum machine_mode other_mode;
61 if (recog_memoized (in_insn) < 0
62 || (get_attr_type (in_insn) != TYPE_FPSTORE
63 && get_attr_type (in_insn) != TYPE_FPSTORE_LOAD)
64 || recog_memoized (out_insn) < 0)
67 store_mode = GET_MODE (SET_SRC (PATTERN (in_insn)));
69 set = single_set (out_insn);
73 other_mode = GET_MODE (SET_SRC (set));
75 return (GET_MODE_SIZE (store_mode) == GET_MODE_SIZE (other_mode));
79 #ifndef DO_FRAME_NOTES
80 #ifdef INCOMING_RETURN_ADDR_RTX
81 #define DO_FRAME_NOTES 1
83 #define DO_FRAME_NOTES 0
87 static void copy_reg_pointer (rtx, rtx);
88 static void fix_range (const char *);
89 static bool pa_handle_option (size_t, const char *, int);
90 static int hppa_address_cost (rtx, bool);
91 static bool hppa_rtx_costs (rtx, int, int, int *, bool);
92 static inline rtx force_mode (enum machine_mode, rtx);
93 static void pa_reorg (void);
94 static void pa_combine_instructions (void);
95 static int pa_can_combine_p (rtx, rtx, rtx, int, rtx, rtx, rtx);
96 static int forward_branch_p (rtx);
97 static void compute_zdepwi_operands (unsigned HOST_WIDE_INT, unsigned *);
98 static int compute_movmem_length (rtx);
99 static int compute_clrmem_length (rtx);
100 static bool pa_assemble_integer (rtx, unsigned int, int);
101 static void remove_useless_addtr_insns (int);
102 static void store_reg (int, HOST_WIDE_INT, int);
103 static void store_reg_modify (int, int, HOST_WIDE_INT);
104 static void load_reg (int, HOST_WIDE_INT, int);
105 static void set_reg_plus_d (int, int, HOST_WIDE_INT, int);
106 static void pa_output_function_prologue (FILE *, HOST_WIDE_INT);
107 static void update_total_code_bytes (unsigned int);
108 static void pa_output_function_epilogue (FILE *, HOST_WIDE_INT);
109 static int pa_adjust_cost (rtx, rtx, rtx, int);
110 static int pa_adjust_priority (rtx, int);
111 static int pa_issue_rate (void);
112 static void pa_som_asm_init_sections (void) ATTRIBUTE_UNUSED;
113 static section *pa_select_section (tree, int, unsigned HOST_WIDE_INT)
115 static void pa_encode_section_info (tree, rtx, int);
116 static const char *pa_strip_name_encoding (const char *);
117 static bool pa_function_ok_for_sibcall (tree, tree);
118 static void pa_globalize_label (FILE *, const char *)
120 static void pa_asm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
121 HOST_WIDE_INT, tree);
122 #if !defined(USE_COLLECT2)
123 static void pa_asm_out_constructor (rtx, int);
124 static void pa_asm_out_destructor (rtx, int);
126 static void pa_init_builtins (void);
127 static rtx hppa_builtin_saveregs (void);
128 static void hppa_va_start (tree, rtx);
129 static tree hppa_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
130 static bool pa_scalar_mode_supported_p (enum machine_mode);
131 static bool pa_commutative_p (const_rtx x, int outer_code);
132 static void copy_fp_args (rtx) ATTRIBUTE_UNUSED;
133 static int length_fp_args (rtx) ATTRIBUTE_UNUSED;
134 static rtx hppa_legitimize_address (rtx, rtx, enum machine_mode);
135 static inline void pa_file_start_level (void) ATTRIBUTE_UNUSED;
136 static inline void pa_file_start_space (int) ATTRIBUTE_UNUSED;
137 static inline void pa_file_start_file (int) ATTRIBUTE_UNUSED;
138 static inline void pa_file_start_mcount (const char*) ATTRIBUTE_UNUSED;
139 static void pa_elf_file_start (void) ATTRIBUTE_UNUSED;
140 static void pa_som_file_start (void) ATTRIBUTE_UNUSED;
141 static void pa_linux_file_start (void) ATTRIBUTE_UNUSED;
142 static void pa_hpux64_gas_file_start (void) ATTRIBUTE_UNUSED;
143 static void pa_hpux64_hpas_file_start (void) ATTRIBUTE_UNUSED;
144 static void output_deferred_plabels (void);
145 static void output_deferred_profile_counters (void) ATTRIBUTE_UNUSED;
146 #ifdef ASM_OUTPUT_EXTERNAL_REAL
147 static void pa_hpux_file_end (void);
149 #ifdef HPUX_LONG_DOUBLE_LIBRARY
150 static void pa_hpux_init_libfuncs (void);
152 static rtx pa_struct_value_rtx (tree, int);
153 static bool pa_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode,
155 static int pa_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
157 static struct machine_function * pa_init_machine_status (void);
158 static enum reg_class pa_secondary_reload (bool, rtx, enum reg_class,
160 secondary_reload_info *);
161 static void pa_extra_live_on_entry (bitmap);
163 /* The following extra sections are only used for SOM. */
164 static GTY(()) section *som_readonly_data_section;
165 static GTY(()) section *som_one_only_readonly_data_section;
166 static GTY(()) section *som_one_only_data_section;
168 /* Save the operands last given to a compare for use when we
169 generate a scc or bcc insn. */
170 rtx hppa_compare_op0, hppa_compare_op1;
171 enum cmp_type hppa_branch_type;
173 /* Which cpu we are scheduling for. */
174 enum processor_type pa_cpu = TARGET_SCHED_DEFAULT;
176 /* The UNIX standard to use for predefines and linking. */
177 int flag_pa_unix = TARGET_HPUX_11_11 ? 1998 : TARGET_HPUX_10_10 ? 1995 : 1993;
179 /* Counts for the number of callee-saved general and floating point
180 registers which were saved by the current function's prologue. */
181 static int gr_saved, fr_saved;
183 /* Boolean indicating whether the return pointer was saved by the
184 current function's prologue. */
185 static bool rp_saved;
187 static rtx find_addr_reg (rtx);
189 /* Keep track of the number of bytes we have output in the CODE subspace
190 during this compilation so we'll know when to emit inline long-calls. */
191 unsigned long total_code_bytes;
193 /* The last address of the previous function plus the number of bytes in
194 associated thunks that have been output. This is used to determine if
195 a thunk can use an IA-relative branch to reach its target function. */
196 static unsigned int last_address;
198 /* Variables to handle plabels that we discover are necessary at assembly
199 output time. They are output after the current function. */
200 struct GTY(()) deferred_plabel
205 static GTY((length ("n_deferred_plabels"))) struct deferred_plabel *
207 static size_t n_deferred_plabels = 0;
210 /* Initialize the GCC target structure. */
212 #undef TARGET_ASM_ALIGNED_HI_OP
213 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
214 #undef TARGET_ASM_ALIGNED_SI_OP
215 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
216 #undef TARGET_ASM_ALIGNED_DI_OP
217 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
218 #undef TARGET_ASM_UNALIGNED_HI_OP
219 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
220 #undef TARGET_ASM_UNALIGNED_SI_OP
221 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
222 #undef TARGET_ASM_UNALIGNED_DI_OP
223 #define TARGET_ASM_UNALIGNED_DI_OP TARGET_ASM_ALIGNED_DI_OP
224 #undef TARGET_ASM_INTEGER
225 #define TARGET_ASM_INTEGER pa_assemble_integer
227 #undef TARGET_ASM_FUNCTION_PROLOGUE
228 #define TARGET_ASM_FUNCTION_PROLOGUE pa_output_function_prologue
229 #undef TARGET_ASM_FUNCTION_EPILOGUE
230 #define TARGET_ASM_FUNCTION_EPILOGUE pa_output_function_epilogue
232 #undef TARGET_LEGITIMIZE_ADDRESS
233 #define TARGET_LEGITIMIZE_ADDRESS hppa_legitimize_address
235 #undef TARGET_SCHED_ADJUST_COST
236 #define TARGET_SCHED_ADJUST_COST pa_adjust_cost
237 #undef TARGET_SCHED_ADJUST_PRIORITY
238 #define TARGET_SCHED_ADJUST_PRIORITY pa_adjust_priority
239 #undef TARGET_SCHED_ISSUE_RATE
240 #define TARGET_SCHED_ISSUE_RATE pa_issue_rate
242 #undef TARGET_ENCODE_SECTION_INFO
243 #define TARGET_ENCODE_SECTION_INFO pa_encode_section_info
244 #undef TARGET_STRIP_NAME_ENCODING
245 #define TARGET_STRIP_NAME_ENCODING pa_strip_name_encoding
247 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
248 #define TARGET_FUNCTION_OK_FOR_SIBCALL pa_function_ok_for_sibcall
250 #undef TARGET_COMMUTATIVE_P
251 #define TARGET_COMMUTATIVE_P pa_commutative_p
253 #undef TARGET_ASM_OUTPUT_MI_THUNK
254 #define TARGET_ASM_OUTPUT_MI_THUNK pa_asm_output_mi_thunk
255 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
256 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
258 #undef TARGET_ASM_FILE_END
259 #ifdef ASM_OUTPUT_EXTERNAL_REAL
260 #define TARGET_ASM_FILE_END pa_hpux_file_end
262 #define TARGET_ASM_FILE_END output_deferred_plabels
265 #if !defined(USE_COLLECT2)
266 #undef TARGET_ASM_CONSTRUCTOR
267 #define TARGET_ASM_CONSTRUCTOR pa_asm_out_constructor
268 #undef TARGET_ASM_DESTRUCTOR
269 #define TARGET_ASM_DESTRUCTOR pa_asm_out_destructor
272 #undef TARGET_DEFAULT_TARGET_FLAGS
273 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | TARGET_CPU_DEFAULT)
274 #undef TARGET_HANDLE_OPTION
275 #define TARGET_HANDLE_OPTION pa_handle_option
277 #undef TARGET_INIT_BUILTINS
278 #define TARGET_INIT_BUILTINS pa_init_builtins
280 #undef TARGET_RTX_COSTS
281 #define TARGET_RTX_COSTS hppa_rtx_costs
282 #undef TARGET_ADDRESS_COST
283 #define TARGET_ADDRESS_COST hppa_address_cost
285 #undef TARGET_MACHINE_DEPENDENT_REORG
286 #define TARGET_MACHINE_DEPENDENT_REORG pa_reorg
288 #ifdef HPUX_LONG_DOUBLE_LIBRARY
289 #undef TARGET_INIT_LIBFUNCS
290 #define TARGET_INIT_LIBFUNCS pa_hpux_init_libfuncs
293 #undef TARGET_PROMOTE_FUNCTION_RETURN
294 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
295 #undef TARGET_PROMOTE_PROTOTYPES
296 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
298 #undef TARGET_STRUCT_VALUE_RTX
299 #define TARGET_STRUCT_VALUE_RTX pa_struct_value_rtx
300 #undef TARGET_RETURN_IN_MEMORY
301 #define TARGET_RETURN_IN_MEMORY pa_return_in_memory
302 #undef TARGET_MUST_PASS_IN_STACK
303 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
304 #undef TARGET_PASS_BY_REFERENCE
305 #define TARGET_PASS_BY_REFERENCE pa_pass_by_reference
306 #undef TARGET_CALLEE_COPIES
307 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
308 #undef TARGET_ARG_PARTIAL_BYTES
309 #define TARGET_ARG_PARTIAL_BYTES pa_arg_partial_bytes
311 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
312 #define TARGET_EXPAND_BUILTIN_SAVEREGS hppa_builtin_saveregs
313 #undef TARGET_EXPAND_BUILTIN_VA_START
314 #define TARGET_EXPAND_BUILTIN_VA_START hppa_va_start
315 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
316 #define TARGET_GIMPLIFY_VA_ARG_EXPR hppa_gimplify_va_arg_expr
318 #undef TARGET_SCALAR_MODE_SUPPORTED_P
319 #define TARGET_SCALAR_MODE_SUPPORTED_P pa_scalar_mode_supported_p
321 #undef TARGET_CANNOT_FORCE_CONST_MEM
322 #define TARGET_CANNOT_FORCE_CONST_MEM pa_tls_referenced_p
324 #undef TARGET_SECONDARY_RELOAD
325 #define TARGET_SECONDARY_RELOAD pa_secondary_reload
327 #undef TARGET_EXTRA_LIVE_ON_ENTRY
328 #define TARGET_EXTRA_LIVE_ON_ENTRY pa_extra_live_on_entry
330 struct gcc_target targetm = TARGET_INITIALIZER;
332 /* Parse the -mfixed-range= option string. */
335 fix_range (const char *const_str)
338 char *str, *dash, *comma;
340 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
341 REG2 are either register names or register numbers. The effect
342 of this option is to mark the registers in the range from REG1 to
343 REG2 as ``fixed'' so they won't be used by the compiler. This is
344 used, e.g., to ensure that kernel mode code doesn't use fr4-fr31. */
346 i = strlen (const_str);
347 str = (char *) alloca (i + 1);
348 memcpy (str, const_str, i + 1);
352 dash = strchr (str, '-');
355 warning (0, "value of -mfixed-range must have form REG1-REG2");
360 comma = strchr (dash + 1, ',');
364 first = decode_reg_name (str);
367 warning (0, "unknown register name: %s", str);
371 last = decode_reg_name (dash + 1);
374 warning (0, "unknown register name: %s", dash + 1);
382 warning (0, "%s-%s is an empty range", str, dash + 1);
386 for (i = first; i <= last; ++i)
387 fixed_regs[i] = call_used_regs[i] = 1;
396 /* Check if all floating point registers have been fixed. */
397 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
402 target_flags |= MASK_DISABLE_FPREGS;
405 /* Implement TARGET_HANDLE_OPTION. */
408 pa_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
413 case OPT_mpa_risc_1_0:
415 target_flags &= ~(MASK_PA_11 | MASK_PA_20);
419 case OPT_mpa_risc_1_1:
421 target_flags &= ~MASK_PA_20;
422 target_flags |= MASK_PA_11;
425 case OPT_mpa_risc_2_0:
427 target_flags |= MASK_PA_11 | MASK_PA_20;
431 if (strcmp (arg, "8000") == 0)
432 pa_cpu = PROCESSOR_8000;
433 else if (strcmp (arg, "7100") == 0)
434 pa_cpu = PROCESSOR_7100;
435 else if (strcmp (arg, "700") == 0)
436 pa_cpu = PROCESSOR_700;
437 else if (strcmp (arg, "7100LC") == 0)
438 pa_cpu = PROCESSOR_7100LC;
439 else if (strcmp (arg, "7200") == 0)
440 pa_cpu = PROCESSOR_7200;
441 else if (strcmp (arg, "7300") == 0)
442 pa_cpu = PROCESSOR_7300;
447 case OPT_mfixed_range_:
457 #if TARGET_HPUX_10_10
463 #if TARGET_HPUX_11_11
475 override_options (void)
477 /* Unconditional branches in the delay slot are not compatible with dwarf2
478 call frame information. There is no benefit in using this optimization
479 on PA8000 and later processors. */
480 if (pa_cpu >= PROCESSOR_8000
481 || (! USING_SJLJ_EXCEPTIONS && flag_exceptions)
482 || flag_unwind_tables)
483 target_flags &= ~MASK_JUMP_IN_DELAY;
485 if (flag_pic && TARGET_PORTABLE_RUNTIME)
487 warning (0, "PIC code generation is not supported in the portable runtime model");
490 if (flag_pic && TARGET_FAST_INDIRECT_CALLS)
492 warning (0, "PIC code generation is not compatible with fast indirect calls");
495 if (! TARGET_GAS && write_symbols != NO_DEBUG)
497 warning (0, "-g is only supported when using GAS on this processor,");
498 warning (0, "-g option disabled");
499 write_symbols = NO_DEBUG;
502 /* We only support the "big PIC" model now. And we always generate PIC
503 code when in 64bit mode. */
504 if (flag_pic == 1 || TARGET_64BIT)
507 /* We can't guarantee that .dword is available for 32-bit targets. */
508 if (UNITS_PER_WORD == 4)
509 targetm.asm_out.aligned_op.di = NULL;
511 /* The unaligned ops are only available when using GAS. */
514 targetm.asm_out.unaligned_op.hi = NULL;
515 targetm.asm_out.unaligned_op.si = NULL;
516 targetm.asm_out.unaligned_op.di = NULL;
519 init_machine_status = pa_init_machine_status;
523 pa_init_builtins (void)
525 #ifdef DONT_HAVE_FPUTC_UNLOCKED
526 built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED] =
527 built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
528 implicit_built_in_decls[(int) BUILT_IN_FPUTC_UNLOCKED]
529 = implicit_built_in_decls[(int) BUILT_IN_PUTC_UNLOCKED];
532 if (built_in_decls [BUILT_IN_FINITE])
533 set_user_assembler_name (built_in_decls [BUILT_IN_FINITE], "_Isfinite");
534 if (built_in_decls [BUILT_IN_FINITEF])
535 set_user_assembler_name (built_in_decls [BUILT_IN_FINITEF], "_Isfinitef");
539 /* Function to init struct machine_function.
540 This will be called, via a pointer variable,
541 from push_function_context. */
543 static struct machine_function *
544 pa_init_machine_status (void)
546 return GGC_CNEW (machine_function);
549 /* If FROM is a probable pointer register, mark TO as a probable
550 pointer register with the same pointer alignment as FROM. */
553 copy_reg_pointer (rtx to, rtx from)
555 if (REG_POINTER (from))
556 mark_reg_pointer (to, REGNO_POINTER_ALIGN (REGNO (from)));
559 /* Return 1 if X contains a symbolic expression. We know these
560 expressions will have one of a few well defined forms, so
561 we need only check those forms. */
563 symbolic_expression_p (rtx x)
566 /* Strip off any HIGH. */
567 if (GET_CODE (x) == HIGH)
570 return (symbolic_operand (x, VOIDmode));
573 /* Accept any constant that can be moved in one instruction into a
576 cint_ok_for_move (HOST_WIDE_INT ival)
578 /* OK if ldo, ldil, or zdepi, can be used. */
579 return (VAL_14_BITS_P (ival)
580 || ldil_cint_p (ival)
581 || zdepi_cint_p (ival));
584 /* Return truth value of whether OP can be used as an operand in a
587 adddi3_operand (rtx op, enum machine_mode mode)
589 return (register_operand (op, mode)
590 || (GET_CODE (op) == CONST_INT
591 && (TARGET_64BIT ? INT_14_BITS (op) : INT_11_BITS (op))));
594 /* True iff the operand OP can be used as the destination operand of
595 an integer store. This also implies the operand could be used as
596 the source operand of an integer load. Symbolic, lo_sum and indexed
597 memory operands are not allowed. We accept reloading pseudos and
598 other memory operands. */
600 integer_store_memory_operand (rtx op, enum machine_mode mode)
602 return ((reload_in_progress
604 && REGNO (op) >= FIRST_PSEUDO_REGISTER
605 && reg_renumber [REGNO (op)] < 0)
606 || (GET_CODE (op) == MEM
607 && (reload_in_progress || memory_address_p (mode, XEXP (op, 0)))
608 && !symbolic_memory_operand (op, VOIDmode)
609 && !IS_LO_SUM_DLT_ADDR_P (XEXP (op, 0))
610 && !IS_INDEX_ADDR_P (XEXP (op, 0))));
613 /* True iff ldil can be used to load this CONST_INT. The least
614 significant 11 bits of the value must be zero and the value must
615 not change sign when extended from 32 to 64 bits. */
617 ldil_cint_p (HOST_WIDE_INT ival)
619 HOST_WIDE_INT x = ival & (((HOST_WIDE_INT) -1 << 31) | 0x7ff);
621 return x == 0 || x == ((HOST_WIDE_INT) -1 << 31);
624 /* True iff zdepi can be used to generate this CONST_INT.
625 zdepi first sign extends a 5-bit signed number to a given field
626 length, then places this field anywhere in a zero. */
628 zdepi_cint_p (unsigned HOST_WIDE_INT x)
630 unsigned HOST_WIDE_INT lsb_mask, t;
632 /* This might not be obvious, but it's at least fast.
633 This function is critical; we don't have the time loops would take. */
635 t = ((x >> 4) + lsb_mask) & ~(lsb_mask - 1);
636 /* Return true iff t is a power of two. */
637 return ((t & (t - 1)) == 0);
640 /* True iff depi or extru can be used to compute (reg & mask).
641 Accept bit pattern like these:
646 and_mask_p (unsigned HOST_WIDE_INT mask)
649 mask += mask & -mask;
650 return (mask & (mask - 1)) == 0;
653 /* True iff depi can be used to compute (reg | MASK). */
655 ior_mask_p (unsigned HOST_WIDE_INT mask)
657 mask += mask & -mask;
658 return (mask & (mask - 1)) == 0;
661 /* Legitimize PIC addresses. If the address is already
662 position-independent, we return ORIG. Newly generated
663 position-independent addresses go to REG. If we need more
664 than one register, we lose. */
667 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
671 gcc_assert (!PA_SYMBOL_REF_TLS_P (orig));
673 /* Labels need special handling. */
674 if (pic_label_operand (orig, mode))
678 /* We do not want to go through the movXX expanders here since that
679 would create recursion.
681 Nor do we really want to call a generator for a named pattern
682 since that requires multiple patterns if we want to support
685 So instead we just emit the raw set, which avoids the movXX
686 expanders completely. */
687 mark_reg_pointer (reg, BITS_PER_UNIT);
688 insn = emit_insn (gen_rtx_SET (VOIDmode, reg, orig));
690 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
691 add_reg_note (insn, REG_EQUAL, orig);
693 /* During and after reload, we need to generate a REG_LABEL_OPERAND note
694 and update LABEL_NUSES because this is not done automatically. */
695 if (reload_in_progress || reload_completed)
697 /* Extract LABEL_REF. */
698 if (GET_CODE (orig) == CONST)
699 orig = XEXP (XEXP (orig, 0), 0);
700 /* Extract CODE_LABEL. */
701 orig = XEXP (orig, 0);
702 add_reg_note (insn, REG_LABEL_OPERAND, orig);
703 LABEL_NUSES (orig)++;
705 crtl->uses_pic_offset_table = 1;
708 if (GET_CODE (orig) == SYMBOL_REF)
714 /* Before reload, allocate a temporary register for the intermediate
715 result. This allows the sequence to be deleted when the final
716 result is unused and the insns are trivially dead. */
717 tmp_reg = ((reload_in_progress || reload_completed)
718 ? reg : gen_reg_rtx (Pmode));
720 if (function_label_operand (orig, mode))
722 /* Force function label into memory in word mode. */
723 orig = XEXP (force_const_mem (word_mode, orig), 0);
724 /* Load plabel address from DLT. */
725 emit_move_insn (tmp_reg,
726 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
727 gen_rtx_HIGH (word_mode, orig)));
729 = gen_const_mem (Pmode,
730 gen_rtx_LO_SUM (Pmode, tmp_reg,
731 gen_rtx_UNSPEC (Pmode,
734 emit_move_insn (reg, pic_ref);
735 /* Now load address of function descriptor. */
736 pic_ref = gen_rtx_MEM (Pmode, reg);
740 /* Load symbol reference from DLT. */
741 emit_move_insn (tmp_reg,
742 gen_rtx_PLUS (word_mode, pic_offset_table_rtx,
743 gen_rtx_HIGH (word_mode, orig)));
745 = gen_const_mem (Pmode,
746 gen_rtx_LO_SUM (Pmode, tmp_reg,
747 gen_rtx_UNSPEC (Pmode,
752 crtl->uses_pic_offset_table = 1;
753 mark_reg_pointer (reg, BITS_PER_UNIT);
754 insn = emit_move_insn (reg, pic_ref);
756 /* Put a REG_EQUAL note on this insn, so that it can be optimized. */
757 set_unique_reg_note (insn, REG_EQUAL, orig);
761 else if (GET_CODE (orig) == CONST)
765 if (GET_CODE (XEXP (orig, 0)) == PLUS
766 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
770 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
772 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
773 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
774 base == reg ? 0 : reg);
776 if (GET_CODE (orig) == CONST_INT)
778 if (INT_14_BITS (orig))
779 return plus_constant (base, INTVAL (orig));
780 orig = force_reg (Pmode, orig);
782 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
783 /* Likewise, should we set special REG_NOTEs here? */
789 static GTY(()) rtx gen_tls_tga;
792 gen_tls_get_addr (void)
795 gen_tls_tga = init_one_libfunc ("__tls_get_addr");
800 hppa_tls_call (rtx arg)
804 ret = gen_reg_rtx (Pmode);
805 emit_library_call_value (gen_tls_get_addr (), ret,
806 LCT_CONST, Pmode, 1, arg, Pmode);
812 legitimize_tls_address (rtx addr)
814 rtx ret, insn, tmp, t1, t2, tp;
815 enum tls_model model = SYMBOL_REF_TLS_MODEL (addr);
819 case TLS_MODEL_GLOBAL_DYNAMIC:
820 tmp = gen_reg_rtx (Pmode);
822 emit_insn (gen_tgd_load_pic (tmp, addr));
824 emit_insn (gen_tgd_load (tmp, addr));
825 ret = hppa_tls_call (tmp);
828 case TLS_MODEL_LOCAL_DYNAMIC:
829 ret = gen_reg_rtx (Pmode);
830 tmp = gen_reg_rtx (Pmode);
833 emit_insn (gen_tld_load_pic (tmp, addr));
835 emit_insn (gen_tld_load (tmp, addr));
836 t1 = hppa_tls_call (tmp);
839 t2 = gen_reg_rtx (Pmode);
840 emit_libcall_block (insn, t2, t1,
841 gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
843 emit_insn (gen_tld_offset_load (ret, addr, t2));
846 case TLS_MODEL_INITIAL_EXEC:
847 tp = gen_reg_rtx (Pmode);
848 tmp = gen_reg_rtx (Pmode);
849 ret = gen_reg_rtx (Pmode);
850 emit_insn (gen_tp_load (tp));
852 emit_insn (gen_tie_load_pic (tmp, addr));
854 emit_insn (gen_tie_load (tmp, addr));
855 emit_move_insn (ret, gen_rtx_PLUS (Pmode, tp, tmp));
858 case TLS_MODEL_LOCAL_EXEC:
859 tp = gen_reg_rtx (Pmode);
860 ret = gen_reg_rtx (Pmode);
861 emit_insn (gen_tp_load (tp));
862 emit_insn (gen_tle_load (ret, addr, tp));
872 /* Try machine-dependent ways of modifying an illegitimate address
873 to be legitimate. If we find one, return the new, valid address.
874 This macro is used in only one place: `memory_address' in explow.c.
876 OLDX is the address as it was before break_out_memory_refs was called.
877 In some cases it is useful to look at this to decide what needs to be done.
879 MODE and WIN are passed so that this macro can use
880 GO_IF_LEGITIMATE_ADDRESS.
882 It is always safe for this macro to do nothing. It exists to recognize
883 opportunities to optimize the output.
885 For the PA, transform:
887 memory(X + <large int>)
891 if (<large int> & mask) >= 16
892 Y = (<large int> & ~mask) + mask + 1 Round up.
894 Y = (<large int> & ~mask) Round down.
896 memory (Z + (<large int> - Y));
898 This is for CSE to find several similar references, and only use one Z.
900 X can either be a SYMBOL_REF or REG, but because combine cannot
901 perform a 4->2 combination we do nothing for SYMBOL_REF + D where
902 D will not fit in 14 bits.
904 MODE_FLOAT references allow displacements which fit in 5 bits, so use
907 MODE_INT references allow displacements which fit in 14 bits, so use
910 This relies on the fact that most mode MODE_FLOAT references will use FP
911 registers and most mode MODE_INT references will use integer registers.
912 (In the rare case of an FP register used in an integer MODE, we depend
913 on secondary reloads to clean things up.)
916 It is also beneficial to handle (plus (mult (X) (Y)) (Z)) in a special
917 manner if Y is 2, 4, or 8. (allows more shadd insns and shifted indexed
918 addressing modes to be used).
920 Put X and Z into registers. Then put the entire expression into
924 hppa_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
925 enum machine_mode mode)
929 /* We need to canonicalize the order of operands in unscaled indexed
930 addresses since the code that checks if an address is valid doesn't
931 always try both orders. */
932 if (!TARGET_NO_SPACE_REGS
933 && GET_CODE (x) == PLUS
934 && GET_MODE (x) == Pmode
935 && REG_P (XEXP (x, 0))
936 && REG_P (XEXP (x, 1))
937 && REG_POINTER (XEXP (x, 0))
938 && !REG_POINTER (XEXP (x, 1)))
939 return gen_rtx_PLUS (Pmode, XEXP (x, 1), XEXP (x, 0));
941 if (PA_SYMBOL_REF_TLS_P (x))
942 return legitimize_tls_address (x);
944 return legitimize_pic_address (x, mode, gen_reg_rtx (Pmode));
946 /* Strip off CONST. */
947 if (GET_CODE (x) == CONST)
950 /* Special case. Get the SYMBOL_REF into a register and use indexing.
951 That should always be safe. */
952 if (GET_CODE (x) == PLUS
953 && GET_CODE (XEXP (x, 0)) == REG
954 && GET_CODE (XEXP (x, 1)) == SYMBOL_REF)
956 rtx reg = force_reg (Pmode, XEXP (x, 1));
957 return force_reg (Pmode, gen_rtx_PLUS (Pmode, reg, XEXP (x, 0)));
960 /* Note we must reject symbols which represent function addresses
961 since the assembler/linker can't handle arithmetic on plabels. */
962 if (GET_CODE (x) == PLUS
963 && GET_CODE (XEXP (x, 1)) == CONST_INT
964 && ((GET_CODE (XEXP (x, 0)) == SYMBOL_REF
965 && !FUNCTION_NAME_P (XSTR (XEXP (x, 0), 0)))
966 || GET_CODE (XEXP (x, 0)) == REG))
968 rtx int_part, ptr_reg;
970 int offset = INTVAL (XEXP (x, 1));
973 mask = (GET_MODE_CLASS (mode) == MODE_FLOAT
974 ? (INT14_OK_STRICT ? 0x3fff : 0x1f) : 0x3fff);
976 /* Choose which way to round the offset. Round up if we
977 are >= halfway to the next boundary. */
978 if ((offset & mask) >= ((mask + 1) / 2))
979 newoffset = (offset & ~ mask) + mask + 1;
981 newoffset = (offset & ~ mask);
983 /* If the newoffset will not fit in 14 bits (ldo), then
984 handling this would take 4 or 5 instructions (2 to load
985 the SYMBOL_REF + 1 or 2 to load the newoffset + 1 to
986 add the new offset and the SYMBOL_REF.) Combine can
987 not handle 4->2 or 5->2 combinations, so do not create
989 if (! VAL_14_BITS_P (newoffset)
990 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF)
992 rtx const_part = plus_constant (XEXP (x, 0), newoffset);
995 gen_rtx_HIGH (Pmode, const_part));
998 gen_rtx_LO_SUM (Pmode,
999 tmp_reg, const_part));
1003 if (! VAL_14_BITS_P (newoffset))
1004 int_part = force_reg (Pmode, GEN_INT (newoffset));
1006 int_part = GEN_INT (newoffset);
1008 ptr_reg = force_reg (Pmode,
1009 gen_rtx_PLUS (Pmode,
1010 force_reg (Pmode, XEXP (x, 0)),
1013 return plus_constant (ptr_reg, offset - newoffset);
1016 /* Handle (plus (mult (a) (shadd_constant)) (b)). */
1018 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == MULT
1019 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1020 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1)))
1021 && (OBJECT_P (XEXP (x, 1))
1022 || GET_CODE (XEXP (x, 1)) == SUBREG)
1023 && GET_CODE (XEXP (x, 1)) != CONST)
1025 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1029 if (GET_CODE (reg1) != REG)
1030 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1032 reg2 = XEXP (XEXP (x, 0), 0);
1033 if (GET_CODE (reg2) != REG)
1034 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1036 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1037 gen_rtx_MULT (Pmode,
1043 /* Similarly for (plus (plus (mult (a) (shadd_constant)) (b)) (c)).
1045 Only do so for floating point modes since this is more speculative
1046 and we lose if it's an integer store. */
1047 if (GET_CODE (x) == PLUS
1048 && GET_CODE (XEXP (x, 0)) == PLUS
1049 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
1050 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == CONST_INT
1051 && shadd_constant_p (INTVAL (XEXP (XEXP (XEXP (x, 0), 0), 1)))
1052 && (mode == SFmode || mode == DFmode))
1055 /* First, try and figure out what to use as a base register. */
1056 rtx reg1, reg2, base, idx, orig_base;
1058 reg1 = XEXP (XEXP (x, 0), 1);
1063 /* Make sure they're both regs. If one was a SYMBOL_REF [+ const],
1064 then emit_move_sequence will turn on REG_POINTER so we'll know
1065 it's a base register below. */
1066 if (GET_CODE (reg1) != REG)
1067 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1069 if (GET_CODE (reg2) != REG)
1070 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1072 /* Figure out what the base and index are. */
1074 if (GET_CODE (reg1) == REG
1075 && REG_POINTER (reg1))
1078 orig_base = XEXP (XEXP (x, 0), 1);
1079 idx = gen_rtx_PLUS (Pmode,
1080 gen_rtx_MULT (Pmode,
1081 XEXP (XEXP (XEXP (x, 0), 0), 0),
1082 XEXP (XEXP (XEXP (x, 0), 0), 1)),
1085 else if (GET_CODE (reg2) == REG
1086 && REG_POINTER (reg2))
1089 orig_base = XEXP (x, 1);
1096 /* If the index adds a large constant, try to scale the
1097 constant so that it can be loaded with only one insn. */
1098 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1099 && VAL_14_BITS_P (INTVAL (XEXP (idx, 1))
1100 / INTVAL (XEXP (XEXP (idx, 0), 1)))
1101 && INTVAL (XEXP (idx, 1)) % INTVAL (XEXP (XEXP (idx, 0), 1)) == 0)
1103 /* Divide the CONST_INT by the scale factor, then add it to A. */
1104 int val = INTVAL (XEXP (idx, 1));
1106 val /= INTVAL (XEXP (XEXP (idx, 0), 1));
1107 reg1 = XEXP (XEXP (idx, 0), 0);
1108 if (GET_CODE (reg1) != REG)
1109 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1111 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, reg1, GEN_INT (val)));
1113 /* We can now generate a simple scaled indexed address. */
1116 (Pmode, gen_rtx_PLUS (Pmode,
1117 gen_rtx_MULT (Pmode, reg1,
1118 XEXP (XEXP (idx, 0), 1)),
1122 /* If B + C is still a valid base register, then add them. */
1123 if (GET_CODE (XEXP (idx, 1)) == CONST_INT
1124 && INTVAL (XEXP (idx, 1)) <= 4096
1125 && INTVAL (XEXP (idx, 1)) >= -4096)
1127 int val = INTVAL (XEXP (XEXP (idx, 0), 1));
1130 reg1 = force_reg (Pmode, gen_rtx_PLUS (Pmode, base, XEXP (idx, 1)));
1132 reg2 = XEXP (XEXP (idx, 0), 0);
1133 if (GET_CODE (reg2) != CONST_INT)
1134 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1136 return force_reg (Pmode, gen_rtx_PLUS (Pmode,
1137 gen_rtx_MULT (Pmode,
1143 /* Get the index into a register, then add the base + index and
1144 return a register holding the result. */
1146 /* First get A into a register. */
1147 reg1 = XEXP (XEXP (idx, 0), 0);
1148 if (GET_CODE (reg1) != REG)
1149 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1151 /* And get B into a register. */
1152 reg2 = XEXP (idx, 1);
1153 if (GET_CODE (reg2) != REG)
1154 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1156 reg1 = force_reg (Pmode,
1157 gen_rtx_PLUS (Pmode,
1158 gen_rtx_MULT (Pmode, reg1,
1159 XEXP (XEXP (idx, 0), 1)),
1162 /* Add the result to our base register and return. */
1163 return force_reg (Pmode, gen_rtx_PLUS (Pmode, base, reg1));
1167 /* Uh-oh. We might have an address for x[n-100000]. This needs
1168 special handling to avoid creating an indexed memory address
1169 with x-100000 as the base.
1171 If the constant part is small enough, then it's still safe because
1172 there is a guard page at the beginning and end of the data segment.
1174 Scaled references are common enough that we want to try and rearrange the
1175 terms so that we can use indexing for these addresses too. Only
1176 do the optimization for floatint point modes. */
1178 if (GET_CODE (x) == PLUS
1179 && symbolic_expression_p (XEXP (x, 1)))
1181 /* Ugly. We modify things here so that the address offset specified
1182 by the index expression is computed first, then added to x to form
1183 the entire address. */
1185 rtx regx1, regx2, regy1, regy2, y;
1187 /* Strip off any CONST. */
1189 if (GET_CODE (y) == CONST)
1192 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1194 /* See if this looks like
1195 (plus (mult (reg) (shadd_const))
1196 (const (plus (symbol_ref) (const_int))))
1198 Where const_int is small. In that case the const
1199 expression is a valid pointer for indexing.
1201 If const_int is big, but can be divided evenly by shadd_const
1202 and added to (reg). This allows more scaled indexed addresses. */
1203 if (GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1204 && GET_CODE (XEXP (x, 0)) == MULT
1205 && GET_CODE (XEXP (y, 1)) == CONST_INT
1206 && INTVAL (XEXP (y, 1)) >= -4096
1207 && INTVAL (XEXP (y, 1)) <= 4095
1208 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1209 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1211 int val = INTVAL (XEXP (XEXP (x, 0), 1));
1215 if (GET_CODE (reg1) != REG)
1216 reg1 = force_reg (Pmode, force_operand (reg1, 0));
1218 reg2 = XEXP (XEXP (x, 0), 0);
1219 if (GET_CODE (reg2) != REG)
1220 reg2 = force_reg (Pmode, force_operand (reg2, 0));
1222 return force_reg (Pmode,
1223 gen_rtx_PLUS (Pmode,
1224 gen_rtx_MULT (Pmode,
1229 else if ((mode == DFmode || mode == SFmode)
1230 && GET_CODE (XEXP (y, 0)) == SYMBOL_REF
1231 && GET_CODE (XEXP (x, 0)) == MULT
1232 && GET_CODE (XEXP (y, 1)) == CONST_INT
1233 && INTVAL (XEXP (y, 1)) % INTVAL (XEXP (XEXP (x, 0), 1)) == 0
1234 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
1235 && shadd_constant_p (INTVAL (XEXP (XEXP (x, 0), 1))))
1238 = force_reg (Pmode, GEN_INT (INTVAL (XEXP (y, 1))
1239 / INTVAL (XEXP (XEXP (x, 0), 1))));
1240 regx2 = XEXP (XEXP (x, 0), 0);
1241 if (GET_CODE (regx2) != REG)
1242 regx2 = force_reg (Pmode, force_operand (regx2, 0));
1243 regx2 = force_reg (Pmode, gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1247 gen_rtx_PLUS (Pmode,
1248 gen_rtx_MULT (Pmode, regx2,
1249 XEXP (XEXP (x, 0), 1)),
1250 force_reg (Pmode, XEXP (y, 0))));
1252 else if (GET_CODE (XEXP (y, 1)) == CONST_INT
1253 && INTVAL (XEXP (y, 1)) >= -4096
1254 && INTVAL (XEXP (y, 1)) <= 4095)
1256 /* This is safe because of the guard page at the
1257 beginning and end of the data space. Just
1258 return the original address. */
1263 /* Doesn't look like one we can optimize. */
1264 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1265 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1266 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1267 regx1 = force_reg (Pmode,
1268 gen_rtx_fmt_ee (GET_CODE (y), Pmode,
1270 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1278 /* For the HPPA, REG and REG+CONST is cost 0
1279 and addresses involving symbolic constants are cost 2.
1281 PIC addresses are very expensive.
1283 It is no coincidence that this has the same structure
1284 as GO_IF_LEGITIMATE_ADDRESS. */
1287 hppa_address_cost (rtx X,
1288 bool speed ATTRIBUTE_UNUSED)
1290 switch (GET_CODE (X))
1303 /* Compute a (partial) cost for rtx X. Return true if the complete
1304 cost has been computed, and false if subexpressions should be
1305 scanned. In either case, *TOTAL contains the cost result. */
1308 hppa_rtx_costs (rtx x, int code, int outer_code, int *total,
1309 bool speed ATTRIBUTE_UNUSED)
1314 if (INTVAL (x) == 0)
1316 else if (INT_14_BITS (x))
1333 if ((x == CONST0_RTX (DFmode) || x == CONST0_RTX (SFmode))
1334 && outer_code != SET)
1341 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1342 *total = COSTS_N_INSNS (3);
1343 else if (TARGET_PA_11 && !TARGET_DISABLE_FPREGS && !TARGET_SOFT_FLOAT)
1344 *total = COSTS_N_INSNS (8);
1346 *total = COSTS_N_INSNS (20);
1350 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1352 *total = COSTS_N_INSNS (14);
1360 *total = COSTS_N_INSNS (60);
1363 case PLUS: /* this includes shNadd insns */
1365 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
1366 *total = COSTS_N_INSNS (3);
1368 *total = COSTS_N_INSNS (1);
1374 *total = COSTS_N_INSNS (1);
1382 /* Ensure mode of ORIG, a REG rtx, is MODE. Returns either ORIG or a
1383 new rtx with the correct mode. */
1385 force_mode (enum machine_mode mode, rtx orig)
1387 if (mode == GET_MODE (orig))
1390 gcc_assert (REGNO (orig) < FIRST_PSEUDO_REGISTER);
1392 return gen_rtx_REG (mode, REGNO (orig));
1395 /* Return 1 if *X is a thread-local symbol. */
1398 pa_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1400 return PA_SYMBOL_REF_TLS_P (*x);
1403 /* Return 1 if X contains a thread-local symbol. */
1406 pa_tls_referenced_p (rtx x)
1408 if (!TARGET_HAVE_TLS)
1411 return for_each_rtx (&x, &pa_tls_symbol_ref_1, 0);
1414 /* Emit insns to move operands[1] into operands[0].
1416 Return 1 if we have written out everything that needs to be done to
1417 do the move. Otherwise, return 0 and the caller will emit the move
1420 Note SCRATCH_REG may not be in the proper mode depending on how it
1421 will be used. This routine is responsible for creating a new copy
1422 of SCRATCH_REG in the proper mode. */
1425 emit_move_sequence (rtx *operands, enum machine_mode mode, rtx scratch_reg)
1427 register rtx operand0 = operands[0];
1428 register rtx operand1 = operands[1];
1431 /* We can only handle indexed addresses in the destination operand
1432 of floating point stores. Thus, we need to break out indexed
1433 addresses from the destination operand. */
1434 if (GET_CODE (operand0) == MEM && IS_INDEX_ADDR_P (XEXP (operand0, 0)))
1436 gcc_assert (can_create_pseudo_p ());
1438 tem = copy_to_mode_reg (Pmode, XEXP (operand0, 0));
1439 operand0 = replace_equiv_address (operand0, tem);
1442 /* On targets with non-equivalent space registers, break out unscaled
1443 indexed addresses from the source operand before the final CSE.
1444 We have to do this because the REG_POINTER flag is not correctly
1445 carried through various optimization passes and CSE may substitute
1446 a pseudo without the pointer set for one with the pointer set. As
1447 a result, we loose various opportunities to create insns with
1448 unscaled indexed addresses. */
1449 if (!TARGET_NO_SPACE_REGS
1450 && !cse_not_expected
1451 && GET_CODE (operand1) == MEM
1452 && GET_CODE (XEXP (operand1, 0)) == PLUS
1453 && REG_P (XEXP (XEXP (operand1, 0), 0))
1454 && REG_P (XEXP (XEXP (operand1, 0), 1)))
1456 = replace_equiv_address (operand1,
1457 copy_to_mode_reg (Pmode, XEXP (operand1, 0)));
1460 && reload_in_progress && GET_CODE (operand0) == REG
1461 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1462 operand0 = reg_equiv_mem[REGNO (operand0)];
1463 else if (scratch_reg
1464 && reload_in_progress && GET_CODE (operand0) == SUBREG
1465 && GET_CODE (SUBREG_REG (operand0)) == REG
1466 && REGNO (SUBREG_REG (operand0)) >= FIRST_PSEUDO_REGISTER)
1468 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1469 the code which tracks sets/uses for delete_output_reload. */
1470 rtx temp = gen_rtx_SUBREG (GET_MODE (operand0),
1471 reg_equiv_mem [REGNO (SUBREG_REG (operand0))],
1472 SUBREG_BYTE (operand0));
1473 operand0 = alter_subreg (&temp);
1477 && reload_in_progress && GET_CODE (operand1) == REG
1478 && REGNO (operand1) >= FIRST_PSEUDO_REGISTER)
1479 operand1 = reg_equiv_mem[REGNO (operand1)];
1480 else if (scratch_reg
1481 && reload_in_progress && GET_CODE (operand1) == SUBREG
1482 && GET_CODE (SUBREG_REG (operand1)) == REG
1483 && REGNO (SUBREG_REG (operand1)) >= FIRST_PSEUDO_REGISTER)
1485 /* We must not alter SUBREG_BYTE (operand0) since that would confuse
1486 the code which tracks sets/uses for delete_output_reload. */
1487 rtx temp = gen_rtx_SUBREG (GET_MODE (operand1),
1488 reg_equiv_mem [REGNO (SUBREG_REG (operand1))],
1489 SUBREG_BYTE (operand1));
1490 operand1 = alter_subreg (&temp);
1493 if (scratch_reg && reload_in_progress && GET_CODE (operand0) == MEM
1494 && ((tem = find_replacement (&XEXP (operand0, 0)))
1495 != XEXP (operand0, 0)))
1496 operand0 = replace_equiv_address (operand0, tem);
1498 if (scratch_reg && reload_in_progress && GET_CODE (operand1) == MEM
1499 && ((tem = find_replacement (&XEXP (operand1, 0)))
1500 != XEXP (operand1, 0)))
1501 operand1 = replace_equiv_address (operand1, tem);
1503 /* Handle secondary reloads for loads/stores of FP registers from
1504 REG+D addresses where D does not fit in 5 or 14 bits, including
1505 (subreg (mem (addr))) cases. */
1507 && fp_reg_operand (operand0, mode)
1508 && ((GET_CODE (operand1) == MEM
1509 && !memory_address_p ((GET_MODE_SIZE (mode) == 4 ? SFmode : DFmode),
1510 XEXP (operand1, 0)))
1511 || ((GET_CODE (operand1) == SUBREG
1512 && GET_CODE (XEXP (operand1, 0)) == MEM
1513 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1515 XEXP (XEXP (operand1, 0), 0))))))
1517 if (GET_CODE (operand1) == SUBREG)
1518 operand1 = XEXP (operand1, 0);
1520 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1521 it in WORD_MODE regardless of what mode it was originally given
1523 scratch_reg = force_mode (word_mode, scratch_reg);
1525 /* D might not fit in 14 bits either; for such cases load D into
1527 if (!memory_address_p (Pmode, XEXP (operand1, 0)))
1529 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1530 emit_move_insn (scratch_reg,
1531 gen_rtx_fmt_ee (GET_CODE (XEXP (operand1, 0)),
1533 XEXP (XEXP (operand1, 0), 0),
1537 emit_move_insn (scratch_reg, XEXP (operand1, 0));
1538 emit_insn (gen_rtx_SET (VOIDmode, operand0,
1539 replace_equiv_address (operand1, scratch_reg)));
1542 else if (scratch_reg
1543 && fp_reg_operand (operand1, mode)
1544 && ((GET_CODE (operand0) == MEM
1545 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1547 XEXP (operand0, 0)))
1548 || ((GET_CODE (operand0) == SUBREG)
1549 && GET_CODE (XEXP (operand0, 0)) == MEM
1550 && !memory_address_p ((GET_MODE_SIZE (mode) == 4
1552 XEXP (XEXP (operand0, 0), 0)))))
1554 if (GET_CODE (operand0) == SUBREG)
1555 operand0 = XEXP (operand0, 0);
1557 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1558 it in WORD_MODE regardless of what mode it was originally given
1560 scratch_reg = force_mode (word_mode, scratch_reg);
1562 /* D might not fit in 14 bits either; for such cases load D into
1564 if (!memory_address_p (Pmode, XEXP (operand0, 0)))
1566 emit_move_insn (scratch_reg, XEXP (XEXP (operand0, 0), 1));
1567 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand0,
1570 XEXP (XEXP (operand0, 0),
1575 emit_move_insn (scratch_reg, XEXP (operand0, 0));
1576 emit_insn (gen_rtx_SET (VOIDmode,
1577 replace_equiv_address (operand0, scratch_reg),
1581 /* Handle secondary reloads for loads of FP registers from constant
1582 expressions by forcing the constant into memory.
1584 Use scratch_reg to hold the address of the memory location.
1586 The proper fix is to change PREFERRED_RELOAD_CLASS to return
1587 NO_REGS when presented with a const_int and a register class
1588 containing only FP registers. Doing so unfortunately creates
1589 more problems than it solves. Fix this for 2.5. */
1590 else if (scratch_reg
1591 && CONSTANT_P (operand1)
1592 && fp_reg_operand (operand0, mode))
1594 rtx const_mem, xoperands[2];
1596 /* SCRATCH_REG will hold an address and maybe the actual data. We want
1597 it in WORD_MODE regardless of what mode it was originally given
1599 scratch_reg = force_mode (word_mode, scratch_reg);
1601 /* Force the constant into memory and put the address of the
1602 memory location into scratch_reg. */
1603 const_mem = force_const_mem (mode, operand1);
1604 xoperands[0] = scratch_reg;
1605 xoperands[1] = XEXP (const_mem, 0);
1606 emit_move_sequence (xoperands, Pmode, 0);
1608 /* Now load the destination register. */
1609 emit_insn (gen_rtx_SET (mode, operand0,
1610 replace_equiv_address (const_mem, scratch_reg)));
1613 /* Handle secondary reloads for SAR. These occur when trying to load
1614 the SAR from memory, FP register, or with a constant. */
1615 else if (scratch_reg
1616 && GET_CODE (operand0) == REG
1617 && REGNO (operand0) < FIRST_PSEUDO_REGISTER
1618 && REGNO_REG_CLASS (REGNO (operand0)) == SHIFT_REGS
1619 && (GET_CODE (operand1) == MEM
1620 || GET_CODE (operand1) == CONST_INT
1621 || (GET_CODE (operand1) == REG
1622 && FP_REG_CLASS_P (REGNO_REG_CLASS (REGNO (operand1))))))
1624 /* D might not fit in 14 bits either; for such cases load D into
1626 if (GET_CODE (operand1) == MEM
1627 && !memory_address_p (Pmode, XEXP (operand1, 0)))
1629 /* We are reloading the address into the scratch register, so we
1630 want to make sure the scratch register is a full register. */
1631 scratch_reg = force_mode (word_mode, scratch_reg);
1633 emit_move_insn (scratch_reg, XEXP (XEXP (operand1, 0), 1));
1634 emit_move_insn (scratch_reg, gen_rtx_fmt_ee (GET_CODE (XEXP (operand1,
1637 XEXP (XEXP (operand1, 0),
1641 /* Now we are going to load the scratch register from memory,
1642 we want to load it in the same width as the original MEM,
1643 which must be the same as the width of the ultimate destination,
1645 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1647 emit_move_insn (scratch_reg,
1648 replace_equiv_address (operand1, scratch_reg));
1652 /* We want to load the scratch register using the same mode as
1653 the ultimate destination. */
1654 scratch_reg = force_mode (GET_MODE (operand0), scratch_reg);
1656 emit_move_insn (scratch_reg, operand1);
1659 /* And emit the insn to set the ultimate destination. We know that
1660 the scratch register has the same mode as the destination at this
1662 emit_move_insn (operand0, scratch_reg);
1665 /* Handle the most common case: storing into a register. */
1666 else if (register_operand (operand0, mode))
1668 if (register_operand (operand1, mode)
1669 || (GET_CODE (operand1) == CONST_INT
1670 && cint_ok_for_move (INTVAL (operand1)))
1671 || (operand1 == CONST0_RTX (mode))
1672 || (GET_CODE (operand1) == HIGH
1673 && !symbolic_operand (XEXP (operand1, 0), VOIDmode))
1674 /* Only `general_operands' can come here, so MEM is ok. */
1675 || GET_CODE (operand1) == MEM)
1677 /* Various sets are created during RTL generation which don't
1678 have the REG_POINTER flag correctly set. After the CSE pass,
1679 instruction recognition can fail if we don't consistently
1680 set this flag when performing register copies. This should
1681 also improve the opportunities for creating insns that use
1682 unscaled indexing. */
1683 if (REG_P (operand0) && REG_P (operand1))
1685 if (REG_POINTER (operand1)
1686 && !REG_POINTER (operand0)
1687 && !HARD_REGISTER_P (operand0))
1688 copy_reg_pointer (operand0, operand1);
1689 else if (REG_POINTER (operand0)
1690 && !REG_POINTER (operand1)
1691 && !HARD_REGISTER_P (operand1))
1692 copy_reg_pointer (operand1, operand0);
1695 /* When MEMs are broken out, the REG_POINTER flag doesn't
1696 get set. In some cases, we can set the REG_POINTER flag
1697 from the declaration for the MEM. */
1698 if (REG_P (operand0)
1699 && GET_CODE (operand1) == MEM
1700 && !REG_POINTER (operand0))
1702 tree decl = MEM_EXPR (operand1);
1704 /* Set the register pointer flag and register alignment
1705 if the declaration for this memory reference is a
1706 pointer type. Fortran indirect argument references
1709 && !(flag_argument_noalias > 1
1710 && TREE_CODE (decl) == INDIRECT_REF
1711 && TREE_CODE (TREE_OPERAND (decl, 0)) == PARM_DECL))
1715 /* If this is a COMPONENT_REF, use the FIELD_DECL from
1717 if (TREE_CODE (decl) == COMPONENT_REF)
1718 decl = TREE_OPERAND (decl, 1);
1720 type = TREE_TYPE (decl);
1721 type = strip_array_types (type);
1723 if (POINTER_TYPE_P (type))
1727 type = TREE_TYPE (type);
1728 /* Using TYPE_ALIGN_OK is rather conservative as
1729 only the ada frontend actually sets it. */
1730 align = (TYPE_ALIGN_OK (type) ? TYPE_ALIGN (type)
1732 mark_reg_pointer (operand0, align);
1737 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1741 else if (GET_CODE (operand0) == MEM)
1743 if (mode == DFmode && operand1 == CONST0_RTX (mode)
1744 && !(reload_in_progress || reload_completed))
1746 rtx temp = gen_reg_rtx (DFmode);
1748 emit_insn (gen_rtx_SET (VOIDmode, temp, operand1));
1749 emit_insn (gen_rtx_SET (VOIDmode, operand0, temp));
1752 if (register_operand (operand1, mode) || operand1 == CONST0_RTX (mode))
1754 /* Run this case quickly. */
1755 emit_insn (gen_rtx_SET (VOIDmode, operand0, operand1));
1758 if (! (reload_in_progress || reload_completed))
1760 operands[0] = validize_mem (operand0);
1761 operands[1] = operand1 = force_reg (mode, operand1);
1765 /* Simplify the source if we need to.
1766 Note we do have to handle function labels here, even though we do
1767 not consider them legitimate constants. Loop optimizations can
1768 call the emit_move_xxx with one as a source. */
1769 if ((GET_CODE (operand1) != HIGH && immediate_operand (operand1, mode))
1770 || function_label_operand (operand1, mode)
1771 || (GET_CODE (operand1) == HIGH
1772 && symbolic_operand (XEXP (operand1, 0), mode)))
1776 if (GET_CODE (operand1) == HIGH)
1779 operand1 = XEXP (operand1, 0);
1781 if (symbolic_operand (operand1, mode))
1783 /* Argh. The assembler and linker can't handle arithmetic
1786 So we force the plabel into memory, load operand0 from
1787 the memory location, then add in the constant part. */
1788 if ((GET_CODE (operand1) == CONST
1789 && GET_CODE (XEXP (operand1, 0)) == PLUS
1790 && function_label_operand (XEXP (XEXP (operand1, 0), 0), Pmode))
1791 || function_label_operand (operand1, mode))
1793 rtx temp, const_part;
1795 /* Figure out what (if any) scratch register to use. */
1796 if (reload_in_progress || reload_completed)
1798 scratch_reg = scratch_reg ? scratch_reg : operand0;
1799 /* SCRATCH_REG will hold an address and maybe the actual
1800 data. We want it in WORD_MODE regardless of what mode it
1801 was originally given to us. */
1802 scratch_reg = force_mode (word_mode, scratch_reg);
1805 scratch_reg = gen_reg_rtx (Pmode);
1807 if (GET_CODE (operand1) == CONST)
1809 /* Save away the constant part of the expression. */
1810 const_part = XEXP (XEXP (operand1, 0), 1);
1811 gcc_assert (GET_CODE (const_part) == CONST_INT);
1813 /* Force the function label into memory. */
1814 temp = force_const_mem (mode, XEXP (XEXP (operand1, 0), 0));
1818 /* No constant part. */
1819 const_part = NULL_RTX;
1821 /* Force the function label into memory. */
1822 temp = force_const_mem (mode, operand1);
1826 /* Get the address of the memory location. PIC-ify it if
1828 temp = XEXP (temp, 0);
1830 temp = legitimize_pic_address (temp, mode, scratch_reg);
1832 /* Put the address of the memory location into our destination
1835 emit_move_sequence (operands, mode, scratch_reg);
1837 /* Now load from the memory location into our destination
1839 operands[1] = gen_rtx_MEM (Pmode, operands[0]);
1840 emit_move_sequence (operands, mode, scratch_reg);
1842 /* And add back in the constant part. */
1843 if (const_part != NULL_RTX)
1844 expand_inc (operand0, const_part);
1853 if (reload_in_progress || reload_completed)
1855 temp = scratch_reg ? scratch_reg : operand0;
1856 /* TEMP will hold an address and maybe the actual
1857 data. We want it in WORD_MODE regardless of what mode it
1858 was originally given to us. */
1859 temp = force_mode (word_mode, temp);
1862 temp = gen_reg_rtx (Pmode);
1864 /* (const (plus (symbol) (const_int))) must be forced to
1865 memory during/after reload if the const_int will not fit
1867 if (GET_CODE (operand1) == CONST
1868 && GET_CODE (XEXP (operand1, 0)) == PLUS
1869 && GET_CODE (XEXP (XEXP (operand1, 0), 1)) == CONST_INT
1870 && !INT_14_BITS (XEXP (XEXP (operand1, 0), 1))
1871 && (reload_completed || reload_in_progress)
1874 rtx const_mem = force_const_mem (mode, operand1);
1875 operands[1] = legitimize_pic_address (XEXP (const_mem, 0),
1877 operands[1] = replace_equiv_address (const_mem, operands[1]);
1878 emit_move_sequence (operands, mode, temp);
1882 operands[1] = legitimize_pic_address (operand1, mode, temp);
1883 if (REG_P (operand0) && REG_P (operands[1]))
1884 copy_reg_pointer (operand0, operands[1]);
1885 emit_insn (gen_rtx_SET (VOIDmode, operand0, operands[1]));
1888 /* On the HPPA, references to data space are supposed to use dp,
1889 register 27, but showing it in the RTL inhibits various cse
1890 and loop optimizations. */
1895 if (reload_in_progress || reload_completed)
1897 temp = scratch_reg ? scratch_reg : operand0;
1898 /* TEMP will hold an address and maybe the actual
1899 data. We want it in WORD_MODE regardless of what mode it
1900 was originally given to us. */
1901 temp = force_mode (word_mode, temp);
1904 temp = gen_reg_rtx (mode);
1906 /* Loading a SYMBOL_REF into a register makes that register
1907 safe to be used as the base in an indexed address.
1909 Don't mark hard registers though. That loses. */
1910 if (GET_CODE (operand0) == REG
1911 && REGNO (operand0) >= FIRST_PSEUDO_REGISTER)
1912 mark_reg_pointer (operand0, BITS_PER_UNIT);
1913 if (REGNO (temp) >= FIRST_PSEUDO_REGISTER)
1914 mark_reg_pointer (temp, BITS_PER_UNIT);
1917 set = gen_rtx_SET (mode, operand0, temp);
1919 set = gen_rtx_SET (VOIDmode,
1921 gen_rtx_LO_SUM (mode, temp, operand1));
1923 emit_insn (gen_rtx_SET (VOIDmode,
1925 gen_rtx_HIGH (mode, operand1)));
1931 else if (pa_tls_referenced_p (operand1))
1936 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
1938 addend = XEXP (XEXP (tmp, 0), 1);
1939 tmp = XEXP (XEXP (tmp, 0), 0);
1942 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
1943 tmp = legitimize_tls_address (tmp);
1946 tmp = gen_rtx_PLUS (mode, tmp, addend);
1947 tmp = force_operand (tmp, operands[0]);
1951 else if (GET_CODE (operand1) != CONST_INT
1952 || !cint_ok_for_move (INTVAL (operand1)))
1956 HOST_WIDE_INT value = 0;
1957 HOST_WIDE_INT insv = 0;
1960 if (GET_CODE (operand1) == CONST_INT)
1961 value = INTVAL (operand1);
1964 && GET_CODE (operand1) == CONST_INT
1965 && HOST_BITS_PER_WIDE_INT > 32
1966 && GET_MODE_BITSIZE (GET_MODE (operand0)) > 32)
1970 /* Extract the low order 32 bits of the value and sign extend.
1971 If the new value is the same as the original value, we can
1972 can use the original value as-is. If the new value is
1973 different, we use it and insert the most-significant 32-bits
1974 of the original value into the final result. */
1975 nval = ((value & (((HOST_WIDE_INT) 2 << 31) - 1))
1976 ^ ((HOST_WIDE_INT) 1 << 31)) - ((HOST_WIDE_INT) 1 << 31);
1979 #if HOST_BITS_PER_WIDE_INT > 32
1980 insv = value >= 0 ? value >> 32 : ~(~value >> 32);
1984 operand1 = GEN_INT (nval);
1988 if (reload_in_progress || reload_completed)
1989 temp = scratch_reg ? scratch_reg : operand0;
1991 temp = gen_reg_rtx (mode);
1993 /* We don't directly split DImode constants on 32-bit targets
1994 because PLUS uses an 11-bit immediate and the insn sequence
1995 generated is not as efficient as the one using HIGH/LO_SUM. */
1996 if (GET_CODE (operand1) == CONST_INT
1997 && GET_MODE_BITSIZE (mode) <= BITS_PER_WORD
1998 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT
2001 /* Directly break constant into high and low parts. This
2002 provides better optimization opportunities because various
2003 passes recognize constants split with PLUS but not LO_SUM.
2004 We use a 14-bit signed low part except when the addition
2005 of 0x4000 to the high part might change the sign of the
2007 HOST_WIDE_INT low = value & 0x3fff;
2008 HOST_WIDE_INT high = value & ~ 0x3fff;
2012 if (high == 0x7fffc000 || (mode == HImode && high == 0x4000))
2020 emit_insn (gen_rtx_SET (VOIDmode, temp, GEN_INT (high)));
2021 operands[1] = gen_rtx_PLUS (mode, temp, GEN_INT (low));
2025 emit_insn (gen_rtx_SET (VOIDmode, temp,
2026 gen_rtx_HIGH (mode, operand1)));
2027 operands[1] = gen_rtx_LO_SUM (mode, temp, operand1);
2030 insn = emit_move_insn (operands[0], operands[1]);
2032 /* Now insert the most significant 32 bits of the value
2033 into the register. When we don't have a second register
2034 available, it could take up to nine instructions to load
2035 a 64-bit integer constant. Prior to reload, we force
2036 constants that would take more than three instructions
2037 to load to the constant pool. During and after reload,
2038 we have to handle all possible values. */
2041 /* Use a HIGH/LO_SUM/INSV sequence if we have a second
2042 register and the value to be inserted is outside the
2043 range that can be loaded with three depdi instructions. */
2044 if (temp != operand0 && (insv >= 16384 || insv < -16384))
2046 operand1 = GEN_INT (insv);
2048 emit_insn (gen_rtx_SET (VOIDmode, temp,
2049 gen_rtx_HIGH (mode, operand1)));
2050 emit_move_insn (temp, gen_rtx_LO_SUM (mode, temp, operand1));
2051 emit_insn (gen_insv (operand0, GEN_INT (32),
2056 int len = 5, pos = 27;
2058 /* Insert the bits using the depdi instruction. */
2061 HOST_WIDE_INT v5 = ((insv & 31) ^ 16) - 16;
2062 HOST_WIDE_INT sign = v5 < 0;
2064 /* Left extend the insertion. */
2065 insv = (insv >= 0 ? insv >> len : ~(~insv >> len));
2066 while (pos > 0 && (insv & 1) == sign)
2068 insv = (insv >= 0 ? insv >> 1 : ~(~insv >> 1));
2073 emit_insn (gen_insv (operand0, GEN_INT (len),
2074 GEN_INT (pos), GEN_INT (v5)));
2076 len = pos > 0 && pos < 5 ? pos : 5;
2082 set_unique_reg_note (insn, REG_EQUAL, op1);
2087 /* Now have insn-emit do whatever it normally does. */
2091 /* Examine EXP and return nonzero if it contains an ADDR_EXPR (meaning
2092 it will need a link/runtime reloc). */
2095 reloc_needed (tree exp)
2099 switch (TREE_CODE (exp))
2104 case POINTER_PLUS_EXPR:
2107 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2108 reloc |= reloc_needed (TREE_OPERAND (exp, 1));
2112 case NON_LVALUE_EXPR:
2113 reloc = reloc_needed (TREE_OPERAND (exp, 0));
2119 unsigned HOST_WIDE_INT ix;
2121 FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), ix, value)
2123 reloc |= reloc_needed (value);
2136 /* Does operand (which is a symbolic_operand) live in text space?
2137 If so, SYMBOL_REF_FLAG, which is set by pa_encode_section_info,
2141 read_only_operand (rtx operand, enum machine_mode mode ATTRIBUTE_UNUSED)
2143 if (GET_CODE (operand) == CONST)
2144 operand = XEXP (XEXP (operand, 0), 0);
2147 if (GET_CODE (operand) == SYMBOL_REF)
2148 return SYMBOL_REF_FLAG (operand) && !CONSTANT_POOL_ADDRESS_P (operand);
2152 if (GET_CODE (operand) == SYMBOL_REF)
2153 return SYMBOL_REF_FLAG (operand) || CONSTANT_POOL_ADDRESS_P (operand);
2159 /* Return the best assembler insn template
2160 for moving operands[1] into operands[0] as a fullword. */
2162 singlemove_string (rtx *operands)
2164 HOST_WIDE_INT intval;
2166 if (GET_CODE (operands[0]) == MEM)
2167 return "stw %r1,%0";
2168 if (GET_CODE (operands[1]) == MEM)
2170 if (GET_CODE (operands[1]) == CONST_DOUBLE)
2175 gcc_assert (GET_MODE (operands[1]) == SFmode);
2177 /* Translate the CONST_DOUBLE to a CONST_INT with the same target
2179 REAL_VALUE_FROM_CONST_DOUBLE (d, operands[1]);
2180 REAL_VALUE_TO_TARGET_SINGLE (d, i);
2182 operands[1] = GEN_INT (i);
2183 /* Fall through to CONST_INT case. */
2185 if (GET_CODE (operands[1]) == CONST_INT)
2187 intval = INTVAL (operands[1]);
2189 if (VAL_14_BITS_P (intval))
2191 else if ((intval & 0x7ff) == 0)
2192 return "ldil L'%1,%0";
2193 else if (zdepi_cint_p (intval))
2194 return "{zdepi %Z1,%0|depwi,z %Z1,%0}";
2196 return "ldil L'%1,%0\n\tldo R'%1(%0),%0";
2198 return "copy %1,%0";
2202 /* Compute position (in OP[1]) and width (in OP[2])
2203 useful for copying IMM to a register using the zdepi
2204 instructions. Store the immediate value to insert in OP[0]. */
2206 compute_zdepwi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2210 /* Find the least significant set bit in IMM. */
2211 for (lsb = 0; lsb < 32; lsb++)
2218 /* Choose variants based on *sign* of the 5-bit field. */
2219 if ((imm & 0x10) == 0)
2220 len = (lsb <= 28) ? 4 : 32 - lsb;
2223 /* Find the width of the bitstring in IMM. */
2224 for (len = 5; len < 32; len++)
2226 if ((imm & (1 << len)) == 0)
2230 /* Sign extend IMM as a 5-bit value. */
2231 imm = (imm & 0xf) - 0x10;
2239 /* Compute position (in OP[1]) and width (in OP[2])
2240 useful for copying IMM to a register using the depdi,z
2241 instructions. Store the immediate value to insert in OP[0]. */
2243 compute_zdepdi_operands (unsigned HOST_WIDE_INT imm, unsigned *op)
2245 HOST_WIDE_INT lsb, len;
2247 /* Find the least significant set bit in IMM. */
2248 for (lsb = 0; lsb < HOST_BITS_PER_WIDE_INT; lsb++)
2255 /* Choose variants based on *sign* of the 5-bit field. */
2256 if ((imm & 0x10) == 0)
2257 len = ((lsb <= HOST_BITS_PER_WIDE_INT - 4)
2258 ? 4 : HOST_BITS_PER_WIDE_INT - lsb);
2261 /* Find the width of the bitstring in IMM. */
2262 for (len = 5; len < HOST_BITS_PER_WIDE_INT; len++)
2264 if ((imm & ((unsigned HOST_WIDE_INT) 1 << len)) == 0)
2268 /* Sign extend IMM as a 5-bit value. */
2269 imm = (imm & 0xf) - 0x10;
2277 /* Output assembler code to perform a doubleword move insn
2278 with operands OPERANDS. */
2281 output_move_double (rtx *operands)
2283 enum { REGOP, OFFSOP, MEMOP, CNSTOP, RNDOP } optype0, optype1;
2285 rtx addreg0 = 0, addreg1 = 0;
2287 /* First classify both operands. */
2289 if (REG_P (operands[0]))
2291 else if (offsettable_memref_p (operands[0]))
2293 else if (GET_CODE (operands[0]) == MEM)
2298 if (REG_P (operands[1]))
2300 else if (CONSTANT_P (operands[1]))
2302 else if (offsettable_memref_p (operands[1]))
2304 else if (GET_CODE (operands[1]) == MEM)
2309 /* Check for the cases that the operand constraints are not
2310 supposed to allow to happen. */
2311 gcc_assert (optype0 == REGOP || optype1 == REGOP);
2313 /* Handle copies between general and floating registers. */
2315 if (optype0 == REGOP && optype1 == REGOP
2316 && FP_REG_P (operands[0]) ^ FP_REG_P (operands[1]))
2318 if (FP_REG_P (operands[0]))
2320 output_asm_insn ("{stws|stw} %1,-16(%%sp)", operands);
2321 output_asm_insn ("{stws|stw} %R1,-12(%%sp)", operands);
2322 return "{fldds|fldd} -16(%%sp),%0";
2326 output_asm_insn ("{fstds|fstd} %1,-16(%%sp)", operands);
2327 output_asm_insn ("{ldws|ldw} -16(%%sp),%0", operands);
2328 return "{ldws|ldw} -12(%%sp),%R0";
2332 /* Handle auto decrementing and incrementing loads and stores
2333 specifically, since the structure of the function doesn't work
2334 for them without major modification. Do it better when we learn
2335 this port about the general inc/dec addressing of PA.
2336 (This was written by tege. Chide him if it doesn't work.) */
2338 if (optype0 == MEMOP)
2340 /* We have to output the address syntax ourselves, since print_operand
2341 doesn't deal with the addresses we want to use. Fix this later. */
2343 rtx addr = XEXP (operands[0], 0);
2344 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2346 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2348 operands[0] = XEXP (addr, 0);
2349 gcc_assert (GET_CODE (operands[1]) == REG
2350 && GET_CODE (operands[0]) == REG);
2352 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2354 /* No overlap between high target register and address
2355 register. (We do this in a non-obvious way to
2356 save a register file writeback) */
2357 if (GET_CODE (addr) == POST_INC)
2358 return "{stws|stw},ma %1,8(%0)\n\tstw %R1,-4(%0)";
2359 return "{stws|stw},ma %1,-8(%0)\n\tstw %R1,12(%0)";
2361 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2363 rtx high_reg = gen_rtx_SUBREG (SImode, operands[1], 0);
2365 operands[0] = XEXP (addr, 0);
2366 gcc_assert (GET_CODE (operands[1]) == REG
2367 && GET_CODE (operands[0]) == REG);
2369 gcc_assert (!reg_overlap_mentioned_p (high_reg, addr));
2370 /* No overlap between high target register and address
2371 register. (We do this in a non-obvious way to save a
2372 register file writeback) */
2373 if (GET_CODE (addr) == PRE_INC)
2374 return "{stws|stw},mb %1,8(%0)\n\tstw %R1,4(%0)";
2375 return "{stws|stw},mb %1,-8(%0)\n\tstw %R1,4(%0)";
2378 if (optype1 == MEMOP)
2380 /* We have to output the address syntax ourselves, since print_operand
2381 doesn't deal with the addresses we want to use. Fix this later. */
2383 rtx addr = XEXP (operands[1], 0);
2384 if (GET_CODE (addr) == POST_INC || GET_CODE (addr) == POST_DEC)
2386 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2388 operands[1] = XEXP (addr, 0);
2389 gcc_assert (GET_CODE (operands[0]) == REG
2390 && GET_CODE (operands[1]) == REG);
2392 if (!reg_overlap_mentioned_p (high_reg, addr))
2394 /* No overlap between high target register and address
2395 register. (We do this in a non-obvious way to
2396 save a register file writeback) */
2397 if (GET_CODE (addr) == POST_INC)
2398 return "{ldws|ldw},ma 8(%1),%0\n\tldw -4(%1),%R0";
2399 return "{ldws|ldw},ma -8(%1),%0\n\tldw 12(%1),%R0";
2403 /* This is an undefined situation. We should load into the
2404 address register *and* update that register. Probably
2405 we don't need to handle this at all. */
2406 if (GET_CODE (addr) == POST_INC)
2407 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma 8(%1),%0";
2408 return "ldw 4(%1),%R0\n\t{ldws|ldw},ma -8(%1),%0";
2411 else if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
2413 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2415 operands[1] = XEXP (addr, 0);
2416 gcc_assert (GET_CODE (operands[0]) == REG
2417 && GET_CODE (operands[1]) == REG);
2419 if (!reg_overlap_mentioned_p (high_reg, addr))
2421 /* No overlap between high target register and address
2422 register. (We do this in a non-obvious way to
2423 save a register file writeback) */
2424 if (GET_CODE (addr) == PRE_INC)
2425 return "{ldws|ldw},mb 8(%1),%0\n\tldw 4(%1),%R0";
2426 return "{ldws|ldw},mb -8(%1),%0\n\tldw 4(%1),%R0";
2430 /* This is an undefined situation. We should load into the
2431 address register *and* update that register. Probably
2432 we don't need to handle this at all. */
2433 if (GET_CODE (addr) == PRE_INC)
2434 return "ldw 12(%1),%R0\n\t{ldws|ldw},mb 8(%1),%0";
2435 return "ldw -4(%1),%R0\n\t{ldws|ldw},mb -8(%1),%0";
2438 else if (GET_CODE (addr) == PLUS
2439 && GET_CODE (XEXP (addr, 0)) == MULT)
2442 rtx high_reg = gen_rtx_SUBREG (SImode, operands[0], 0);
2444 if (!reg_overlap_mentioned_p (high_reg, addr))
2446 xoperands[0] = high_reg;
2447 xoperands[1] = XEXP (addr, 1);
2448 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2449 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2450 output_asm_insn ("{sh%O3addl %2,%1,%0|shladd,l %2,%O3,%1,%0}",
2452 return "ldw 4(%0),%R0\n\tldw 0(%0),%0";
2456 xoperands[0] = high_reg;
2457 xoperands[1] = XEXP (addr, 1);
2458 xoperands[2] = XEXP (XEXP (addr, 0), 0);
2459 xoperands[3] = XEXP (XEXP (addr, 0), 1);
2460 output_asm_insn ("{sh%O3addl %2,%1,%R0|shladd,l %2,%O3,%1,%R0}",
2462 return "ldw 0(%R0),%0\n\tldw 4(%R0),%R0";
2467 /* If an operand is an unoffsettable memory ref, find a register
2468 we can increment temporarily to make it refer to the second word. */
2470 if (optype0 == MEMOP)
2471 addreg0 = find_addr_reg (XEXP (operands[0], 0));
2473 if (optype1 == MEMOP)
2474 addreg1 = find_addr_reg (XEXP (operands[1], 0));
2476 /* Ok, we can do one word at a time.
2477 Normally we do the low-numbered word first.
2479 In either case, set up in LATEHALF the operands to use
2480 for the high-numbered word and in some cases alter the
2481 operands in OPERANDS to be suitable for the low-numbered word. */
2483 if (optype0 == REGOP)
2484 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2485 else if (optype0 == OFFSOP)
2486 latehalf[0] = adjust_address (operands[0], SImode, 4);
2488 latehalf[0] = operands[0];
2490 if (optype1 == REGOP)
2491 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
2492 else if (optype1 == OFFSOP)
2493 latehalf[1] = adjust_address (operands[1], SImode, 4);
2494 else if (optype1 == CNSTOP)
2495 split_double (operands[1], &operands[1], &latehalf[1]);
2497 latehalf[1] = operands[1];
2499 /* If the first move would clobber the source of the second one,
2500 do them in the other order.
2502 This can happen in two cases:
2504 mem -> register where the first half of the destination register
2505 is the same register used in the memory's address. Reload
2506 can create such insns.
2508 mem in this case will be either register indirect or register
2509 indirect plus a valid offset.
2511 register -> register move where REGNO(dst) == REGNO(src + 1)
2512 someone (Tim/Tege?) claimed this can happen for parameter loads.
2514 Handle mem -> register case first. */
2515 if (optype0 == REGOP
2516 && (optype1 == MEMOP || optype1 == OFFSOP)
2517 && refers_to_regno_p (REGNO (operands[0]), REGNO (operands[0]) + 1,
2520 /* Do the late half first. */
2522 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2523 output_asm_insn (singlemove_string (latehalf), latehalf);
2527 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2528 return singlemove_string (operands);
2531 /* Now handle register -> register case. */
2532 if (optype0 == REGOP && optype1 == REGOP
2533 && REGNO (operands[0]) == REGNO (operands[1]) + 1)
2535 output_asm_insn (singlemove_string (latehalf), latehalf);
2536 return singlemove_string (operands);
2539 /* Normal case: do the two words, low-numbered first. */
2541 output_asm_insn (singlemove_string (operands), operands);
2543 /* Make any unoffsettable addresses point at high-numbered word. */
2545 output_asm_insn ("ldo 4(%0),%0", &addreg0);
2547 output_asm_insn ("ldo 4(%0),%0", &addreg1);
2550 output_asm_insn (singlemove_string (latehalf), latehalf);
2552 /* Undo the adds we just did. */
2554 output_asm_insn ("ldo -4(%0),%0", &addreg0);
2556 output_asm_insn ("ldo -4(%0),%0", &addreg1);
2562 output_fp_move_double (rtx *operands)
2564 if (FP_REG_P (operands[0]))
2566 if (FP_REG_P (operands[1])
2567 || operands[1] == CONST0_RTX (GET_MODE (operands[0])))
2568 output_asm_insn ("fcpy,dbl %f1,%0", operands);
2570 output_asm_insn ("fldd%F1 %1,%0", operands);
2572 else if (FP_REG_P (operands[1]))
2574 output_asm_insn ("fstd%F0 %1,%0", operands);
2580 gcc_assert (operands[1] == CONST0_RTX (GET_MODE (operands[0])));
2582 /* This is a pain. You have to be prepared to deal with an
2583 arbitrary address here including pre/post increment/decrement.
2585 so avoid this in the MD. */
2586 gcc_assert (GET_CODE (operands[0]) == REG);
2588 xoperands[1] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
2589 xoperands[0] = operands[0];
2590 output_asm_insn ("copy %%r0,%0\n\tcopy %%r0,%1", xoperands);
2595 /* Return a REG that occurs in ADDR with coefficient 1.
2596 ADDR can be effectively incremented by incrementing REG. */
2599 find_addr_reg (rtx addr)
2601 while (GET_CODE (addr) == PLUS)
2603 if (GET_CODE (XEXP (addr, 0)) == REG)
2604 addr = XEXP (addr, 0);
2605 else if (GET_CODE (XEXP (addr, 1)) == REG)
2606 addr = XEXP (addr, 1);
2607 else if (CONSTANT_P (XEXP (addr, 0)))
2608 addr = XEXP (addr, 1);
2609 else if (CONSTANT_P (XEXP (addr, 1)))
2610 addr = XEXP (addr, 0);
2614 gcc_assert (GET_CODE (addr) == REG);
2618 /* Emit code to perform a block move.
2620 OPERANDS[0] is the destination pointer as a REG, clobbered.
2621 OPERANDS[1] is the source pointer as a REG, clobbered.
2622 OPERANDS[2] is a register for temporary storage.
2623 OPERANDS[3] is a register for temporary storage.
2624 OPERANDS[4] is the size as a CONST_INT
2625 OPERANDS[5] is the alignment safe to use, as a CONST_INT.
2626 OPERANDS[6] is another temporary register. */
2629 output_block_move (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2631 int align = INTVAL (operands[5]);
2632 unsigned long n_bytes = INTVAL (operands[4]);
2634 /* We can't move more than a word at a time because the PA
2635 has no longer integer move insns. (Could use fp mem ops?) */
2636 if (align > (TARGET_64BIT ? 8 : 4))
2637 align = (TARGET_64BIT ? 8 : 4);
2639 /* Note that we know each loop below will execute at least twice
2640 (else we would have open-coded the copy). */
2644 /* Pre-adjust the loop counter. */
2645 operands[4] = GEN_INT (n_bytes - 16);
2646 output_asm_insn ("ldi %4,%2", operands);
2649 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2650 output_asm_insn ("ldd,ma 8(%1),%6", operands);
2651 output_asm_insn ("std,ma %3,8(%0)", operands);
2652 output_asm_insn ("addib,>= -16,%2,.-12", operands);
2653 output_asm_insn ("std,ma %6,8(%0)", operands);
2655 /* Handle the residual. There could be up to 7 bytes of
2656 residual to copy! */
2657 if (n_bytes % 16 != 0)
2659 operands[4] = GEN_INT (n_bytes % 8);
2660 if (n_bytes % 16 >= 8)
2661 output_asm_insn ("ldd,ma 8(%1),%3", operands);
2662 if (n_bytes % 8 != 0)
2663 output_asm_insn ("ldd 0(%1),%6", operands);
2664 if (n_bytes % 16 >= 8)
2665 output_asm_insn ("std,ma %3,8(%0)", operands);
2666 if (n_bytes % 8 != 0)
2667 output_asm_insn ("stdby,e %6,%4(%0)", operands);
2672 /* Pre-adjust the loop counter. */
2673 operands[4] = GEN_INT (n_bytes - 8);
2674 output_asm_insn ("ldi %4,%2", operands);
2677 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2678 output_asm_insn ("{ldws|ldw},ma 4(%1),%6", operands);
2679 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2680 output_asm_insn ("addib,>= -8,%2,.-12", operands);
2681 output_asm_insn ("{stws|stw},ma %6,4(%0)", operands);
2683 /* Handle the residual. There could be up to 7 bytes of
2684 residual to copy! */
2685 if (n_bytes % 8 != 0)
2687 operands[4] = GEN_INT (n_bytes % 4);
2688 if (n_bytes % 8 >= 4)
2689 output_asm_insn ("{ldws|ldw},ma 4(%1),%3", operands);
2690 if (n_bytes % 4 != 0)
2691 output_asm_insn ("ldw 0(%1),%6", operands);
2692 if (n_bytes % 8 >= 4)
2693 output_asm_insn ("{stws|stw},ma %3,4(%0)", operands);
2694 if (n_bytes % 4 != 0)
2695 output_asm_insn ("{stbys|stby},e %6,%4(%0)", operands);
2700 /* Pre-adjust the loop counter. */
2701 operands[4] = GEN_INT (n_bytes - 4);
2702 output_asm_insn ("ldi %4,%2", operands);
2705 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2706 output_asm_insn ("{ldhs|ldh},ma 2(%1),%6", operands);
2707 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2708 output_asm_insn ("addib,>= -4,%2,.-12", operands);
2709 output_asm_insn ("{sths|sth},ma %6,2(%0)", operands);
2711 /* Handle the residual. */
2712 if (n_bytes % 4 != 0)
2714 if (n_bytes % 4 >= 2)
2715 output_asm_insn ("{ldhs|ldh},ma 2(%1),%3", operands);
2716 if (n_bytes % 2 != 0)
2717 output_asm_insn ("ldb 0(%1),%6", operands);
2718 if (n_bytes % 4 >= 2)
2719 output_asm_insn ("{sths|sth},ma %3,2(%0)", operands);
2720 if (n_bytes % 2 != 0)
2721 output_asm_insn ("stb %6,0(%0)", operands);
2726 /* Pre-adjust the loop counter. */
2727 operands[4] = GEN_INT (n_bytes - 2);
2728 output_asm_insn ("ldi %4,%2", operands);
2731 output_asm_insn ("{ldbs|ldb},ma 1(%1),%3", operands);
2732 output_asm_insn ("{ldbs|ldb},ma 1(%1),%6", operands);
2733 output_asm_insn ("{stbs|stb},ma %3,1(%0)", operands);
2734 output_asm_insn ("addib,>= -2,%2,.-12", operands);
2735 output_asm_insn ("{stbs|stb},ma %6,1(%0)", operands);
2737 /* Handle the residual. */
2738 if (n_bytes % 2 != 0)
2740 output_asm_insn ("ldb 0(%1),%3", operands);
2741 output_asm_insn ("stb %3,0(%0)", operands);
2750 /* Count the number of insns necessary to handle this block move.
2752 Basic structure is the same as emit_block_move, except that we
2753 count insns rather than emit them. */
2756 compute_movmem_length (rtx insn)
2758 rtx pat = PATTERN (insn);
2759 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 7), 0));
2760 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 6), 0));
2761 unsigned int n_insns = 0;
2763 /* We can't move more than four bytes at a time because the PA
2764 has no longer integer move insns. (Could use fp mem ops?) */
2765 if (align > (TARGET_64BIT ? 8 : 4))
2766 align = (TARGET_64BIT ? 8 : 4);
2768 /* The basic copying loop. */
2772 if (n_bytes % (2 * align) != 0)
2774 if ((n_bytes % (2 * align)) >= align)
2777 if ((n_bytes % align) != 0)
2781 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2785 /* Emit code to perform a block clear.
2787 OPERANDS[0] is the destination pointer as a REG, clobbered.
2788 OPERANDS[1] is a register for temporary storage.
2789 OPERANDS[2] is the size as a CONST_INT
2790 OPERANDS[3] is the alignment safe to use, as a CONST_INT. */
2793 output_block_clear (rtx *operands, int size_is_constant ATTRIBUTE_UNUSED)
2795 int align = INTVAL (operands[3]);
2796 unsigned long n_bytes = INTVAL (operands[2]);
2798 /* We can't clear more than a word at a time because the PA
2799 has no longer integer move insns. */
2800 if (align > (TARGET_64BIT ? 8 : 4))
2801 align = (TARGET_64BIT ? 8 : 4);
2803 /* Note that we know each loop below will execute at least twice
2804 (else we would have open-coded the copy). */
2808 /* Pre-adjust the loop counter. */
2809 operands[2] = GEN_INT (n_bytes - 16);
2810 output_asm_insn ("ldi %2,%1", operands);
2813 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2814 output_asm_insn ("addib,>= -16,%1,.-4", operands);
2815 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2817 /* Handle the residual. There could be up to 7 bytes of
2818 residual to copy! */
2819 if (n_bytes % 16 != 0)
2821 operands[2] = GEN_INT (n_bytes % 8);
2822 if (n_bytes % 16 >= 8)
2823 output_asm_insn ("std,ma %%r0,8(%0)", operands);
2824 if (n_bytes % 8 != 0)
2825 output_asm_insn ("stdby,e %%r0,%2(%0)", operands);
2830 /* Pre-adjust the loop counter. */
2831 operands[2] = GEN_INT (n_bytes - 8);
2832 output_asm_insn ("ldi %2,%1", operands);
2835 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2836 output_asm_insn ("addib,>= -8,%1,.-4", operands);
2837 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2839 /* Handle the residual. There could be up to 7 bytes of
2840 residual to copy! */
2841 if (n_bytes % 8 != 0)
2843 operands[2] = GEN_INT (n_bytes % 4);
2844 if (n_bytes % 8 >= 4)
2845 output_asm_insn ("{stws|stw},ma %%r0,4(%0)", operands);
2846 if (n_bytes % 4 != 0)
2847 output_asm_insn ("{stbys|stby},e %%r0,%2(%0)", operands);
2852 /* Pre-adjust the loop counter. */
2853 operands[2] = GEN_INT (n_bytes - 4);
2854 output_asm_insn ("ldi %2,%1", operands);
2857 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2858 output_asm_insn ("addib,>= -4,%1,.-4", operands);
2859 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2861 /* Handle the residual. */
2862 if (n_bytes % 4 != 0)
2864 if (n_bytes % 4 >= 2)
2865 output_asm_insn ("{sths|sth},ma %%r0,2(%0)", operands);
2866 if (n_bytes % 2 != 0)
2867 output_asm_insn ("stb %%r0,0(%0)", operands);
2872 /* Pre-adjust the loop counter. */
2873 operands[2] = GEN_INT (n_bytes - 2);
2874 output_asm_insn ("ldi %2,%1", operands);
2877 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2878 output_asm_insn ("addib,>= -2,%1,.-4", operands);
2879 output_asm_insn ("{stbs|stb},ma %%r0,1(%0)", operands);
2881 /* Handle the residual. */
2882 if (n_bytes % 2 != 0)
2883 output_asm_insn ("stb %%r0,0(%0)", operands);
2892 /* Count the number of insns necessary to handle this block move.
2894 Basic structure is the same as emit_block_move, except that we
2895 count insns rather than emit them. */
2898 compute_clrmem_length (rtx insn)
2900 rtx pat = PATTERN (insn);
2901 unsigned int align = INTVAL (XEXP (XVECEXP (pat, 0, 4), 0));
2902 unsigned long n_bytes = INTVAL (XEXP (XVECEXP (pat, 0, 3), 0));
2903 unsigned int n_insns = 0;
2905 /* We can't clear more than a word at a time because the PA
2906 has no longer integer move insns. */
2907 if (align > (TARGET_64BIT ? 8 : 4))
2908 align = (TARGET_64BIT ? 8 : 4);
2910 /* The basic loop. */
2914 if (n_bytes % (2 * align) != 0)
2916 if ((n_bytes % (2 * align)) >= align)
2919 if ((n_bytes % align) != 0)
2923 /* Lengths are expressed in bytes now; each insn is 4 bytes. */
2929 output_and (rtx *operands)
2931 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2933 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2934 int ls0, ls1, ms0, p, len;
2936 for (ls0 = 0; ls0 < 32; ls0++)
2937 if ((mask & (1 << ls0)) == 0)
2940 for (ls1 = ls0; ls1 < 32; ls1++)
2941 if ((mask & (1 << ls1)) != 0)
2944 for (ms0 = ls1; ms0 < 32; ms0++)
2945 if ((mask & (1 << ms0)) == 0)
2948 gcc_assert (ms0 == 32);
2956 operands[2] = GEN_INT (len);
2957 return "{extru|extrw,u} %1,31,%2,%0";
2961 /* We could use this `depi' for the case above as well, but `depi'
2962 requires one more register file access than an `extru'. */
2967 operands[2] = GEN_INT (p);
2968 operands[3] = GEN_INT (len);
2969 return "{depi|depwi} 0,%2,%3,%0";
2973 return "and %1,%2,%0";
2976 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
2977 storing the result in operands[0]. */
2979 output_64bit_and (rtx *operands)
2981 if (GET_CODE (operands[2]) == CONST_INT && INTVAL (operands[2]) != 0)
2983 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
2984 int ls0, ls1, ms0, p, len;
2986 for (ls0 = 0; ls0 < HOST_BITS_PER_WIDE_INT; ls0++)
2987 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls0)) == 0)
2990 for (ls1 = ls0; ls1 < HOST_BITS_PER_WIDE_INT; ls1++)
2991 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ls1)) != 0)
2994 for (ms0 = ls1; ms0 < HOST_BITS_PER_WIDE_INT; ms0++)
2995 if ((mask & ((unsigned HOST_WIDE_INT) 1 << ms0)) == 0)
2998 gcc_assert (ms0 == HOST_BITS_PER_WIDE_INT);
3000 if (ls1 == HOST_BITS_PER_WIDE_INT)
3006 operands[2] = GEN_INT (len);
3007 return "extrd,u %1,63,%2,%0";
3011 /* We could use this `depi' for the case above as well, but `depi'
3012 requires one more register file access than an `extru'. */
3017 operands[2] = GEN_INT (p);
3018 operands[3] = GEN_INT (len);
3019 return "depdi 0,%2,%3,%0";
3023 return "and %1,%2,%0";
3027 output_ior (rtx *operands)
3029 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3030 int bs0, bs1, p, len;
3032 if (INTVAL (operands[2]) == 0)
3033 return "copy %1,%0";
3035 for (bs0 = 0; bs0 < 32; bs0++)
3036 if ((mask & (1 << bs0)) != 0)
3039 for (bs1 = bs0; bs1 < 32; bs1++)
3040 if ((mask & (1 << bs1)) == 0)
3043 gcc_assert (bs1 == 32 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3048 operands[2] = GEN_INT (p);
3049 operands[3] = GEN_INT (len);
3050 return "{depi|depwi} -1,%2,%3,%0";
3053 /* Return a string to perform a bitwise-and of operands[1] with operands[2]
3054 storing the result in operands[0]. */
3056 output_64bit_ior (rtx *operands)
3058 unsigned HOST_WIDE_INT mask = INTVAL (operands[2]);
3059 int bs0, bs1, p, len;
3061 if (INTVAL (operands[2]) == 0)
3062 return "copy %1,%0";
3064 for (bs0 = 0; bs0 < HOST_BITS_PER_WIDE_INT; bs0++)
3065 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs0)) != 0)
3068 for (bs1 = bs0; bs1 < HOST_BITS_PER_WIDE_INT; bs1++)
3069 if ((mask & ((unsigned HOST_WIDE_INT) 1 << bs1)) == 0)
3072 gcc_assert (bs1 == HOST_BITS_PER_WIDE_INT
3073 || ((unsigned HOST_WIDE_INT) 1 << bs1) > mask);
3078 operands[2] = GEN_INT (p);
3079 operands[3] = GEN_INT (len);
3080 return "depdi -1,%2,%3,%0";
3083 /* Target hook for assembling integer objects. This code handles
3084 aligned SI and DI integers specially since function references
3085 must be preceded by P%. */
3088 pa_assemble_integer (rtx x, unsigned int size, int aligned_p)
3090 if (size == UNITS_PER_WORD
3092 && function_label_operand (x, VOIDmode))
3094 fputs (size == 8? "\t.dword\tP%" : "\t.word\tP%", asm_out_file);
3095 output_addr_const (asm_out_file, x);
3096 fputc ('\n', asm_out_file);
3099 return default_assemble_integer (x, size, aligned_p);
3102 /* Output an ascii string. */
3104 output_ascii (FILE *file, const char *p, int size)
3108 unsigned char partial_output[16]; /* Max space 4 chars can occupy. */
3110 /* The HP assembler can only take strings of 256 characters at one
3111 time. This is a limitation on input line length, *not* the
3112 length of the string. Sigh. Even worse, it seems that the
3113 restriction is in number of input characters (see \xnn &
3114 \whatever). So we have to do this very carefully. */
3116 fputs ("\t.STRING \"", file);
3119 for (i = 0; i < size; i += 4)
3123 for (io = 0, co = 0; io < MIN (4, size - i); io++)
3125 register unsigned int c = (unsigned char) p[i + io];
3127 if (c == '\"' || c == '\\')
3128 partial_output[co++] = '\\';
3129 if (c >= ' ' && c < 0177)
3130 partial_output[co++] = c;
3134 partial_output[co++] = '\\';
3135 partial_output[co++] = 'x';
3136 hexd = c / 16 - 0 + '0';
3138 hexd -= '9' - 'a' + 1;
3139 partial_output[co++] = hexd;
3140 hexd = c % 16 - 0 + '0';
3142 hexd -= '9' - 'a' + 1;
3143 partial_output[co++] = hexd;
3146 if (chars_output + co > 243)
3148 fputs ("\"\n\t.STRING \"", file);
3151 fwrite (partial_output, 1, (size_t) co, file);
3155 fputs ("\"\n", file);
3158 /* Try to rewrite floating point comparisons & branches to avoid
3159 useless add,tr insns.
3161 CHECK_NOTES is nonzero if we should examine REG_DEAD notes
3162 to see if FPCC is dead. CHECK_NOTES is nonzero for the
3163 first attempt to remove useless add,tr insns. It is zero
3164 for the second pass as reorg sometimes leaves bogus REG_DEAD
3167 When CHECK_NOTES is zero we can only eliminate add,tr insns
3168 when there's a 1:1 correspondence between fcmp and ftest/fbranch
3171 remove_useless_addtr_insns (int check_notes)
3174 static int pass = 0;
3176 /* This is fairly cheap, so always run it when optimizing. */
3180 int fbranch_count = 0;
3182 /* Walk all the insns in this function looking for fcmp & fbranch
3183 instructions. Keep track of how many of each we find. */
3184 for (insn = get_insns (); insn; insn = next_insn (insn))
3188 /* Ignore anything that isn't an INSN or a JUMP_INSN. */
3189 if (GET_CODE (insn) != INSN && GET_CODE (insn) != JUMP_INSN)
3192 tmp = PATTERN (insn);
3194 /* It must be a set. */
3195 if (GET_CODE (tmp) != SET)
3198 /* If the destination is CCFP, then we've found an fcmp insn. */
3199 tmp = SET_DEST (tmp);
3200 if (GET_CODE (tmp) == REG && REGNO (tmp) == 0)
3206 tmp = PATTERN (insn);
3207 /* If this is an fbranch instruction, bump the fbranch counter. */
3208 if (GET_CODE (tmp) == SET
3209 && SET_DEST (tmp) == pc_rtx
3210 && GET_CODE (SET_SRC (tmp)) == IF_THEN_ELSE
3211 && GET_CODE (XEXP (SET_SRC (tmp), 0)) == NE
3212 && GET_CODE (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == REG
3213 && REGNO (XEXP (XEXP (SET_SRC (tmp), 0), 0)) == 0)
3221 /* Find all floating point compare + branch insns. If possible,
3222 reverse the comparison & the branch to avoid add,tr insns. */
3223 for (insn = get_insns (); insn; insn = next_insn (insn))
3227 /* Ignore anything that isn't an INSN. */
3228 if (GET_CODE (insn) != INSN)
3231 tmp = PATTERN (insn);
3233 /* It must be a set. */
3234 if (GET_CODE (tmp) != SET)
3237 /* The destination must be CCFP, which is register zero. */
3238 tmp = SET_DEST (tmp);
3239 if (GET_CODE (tmp) != REG || REGNO (tmp) != 0)
3242 /* INSN should be a set of CCFP.
3244 See if the result of this insn is used in a reversed FP
3245 conditional branch. If so, reverse our condition and
3246 the branch. Doing so avoids useless add,tr insns. */
3247 next = next_insn (insn);
3250 /* Jumps, calls and labels stop our search. */
3251 if (GET_CODE (next) == JUMP_INSN
3252 || GET_CODE (next) == CALL_INSN
3253 || GET_CODE (next) == CODE_LABEL)
3256 /* As does another fcmp insn. */
3257 if (GET_CODE (next) == INSN
3258 && GET_CODE (PATTERN (next)) == SET
3259 && GET_CODE (SET_DEST (PATTERN (next))) == REG
3260 && REGNO (SET_DEST (PATTERN (next))) == 0)
3263 next = next_insn (next);
3266 /* Is NEXT_INSN a branch? */
3268 && GET_CODE (next) == JUMP_INSN)
3270 rtx pattern = PATTERN (next);
3272 /* If it a reversed fp conditional branch (e.g. uses add,tr)
3273 and CCFP dies, then reverse our conditional and the branch
3274 to avoid the add,tr. */
3275 if (GET_CODE (pattern) == SET
3276 && SET_DEST (pattern) == pc_rtx
3277 && GET_CODE (SET_SRC (pattern)) == IF_THEN_ELSE
3278 && GET_CODE (XEXP (SET_SRC (pattern), 0)) == NE
3279 && GET_CODE (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == REG
3280 && REGNO (XEXP (XEXP (SET_SRC (pattern), 0), 0)) == 0
3281 && GET_CODE (XEXP (SET_SRC (pattern), 1)) == PC
3282 && (fcmp_count == fbranch_count
3284 && find_regno_note (next, REG_DEAD, 0))))
3286 /* Reverse the branch. */
3287 tmp = XEXP (SET_SRC (pattern), 1);
3288 XEXP (SET_SRC (pattern), 1) = XEXP (SET_SRC (pattern), 2);
3289 XEXP (SET_SRC (pattern), 2) = tmp;
3290 INSN_CODE (next) = -1;
3292 /* Reverse our condition. */
3293 tmp = PATTERN (insn);
3294 PUT_CODE (XEXP (tmp, 1),
3295 (reverse_condition_maybe_unordered
3296 (GET_CODE (XEXP (tmp, 1)))));
3306 /* You may have trouble believing this, but this is the 32 bit HP-PA
3311 Variable arguments (optional; any number may be allocated)
3313 SP-(4*(N+9)) arg word N
3318 Fixed arguments (must be allocated; may remain unused)
3327 SP-32 External Data Pointer (DP)
3329 SP-24 External/stub RP (RP')
3333 SP-8 Calling Stub RP (RP'')
3338 SP-0 Stack Pointer (points to next available address)
3342 /* This function saves registers as follows. Registers marked with ' are
3343 this function's registers (as opposed to the previous function's).
3344 If a frame_pointer isn't needed, r4 is saved as a general register;
3345 the space for the frame pointer is still allocated, though, to keep
3351 SP (FP') Previous FP
3352 SP + 4 Alignment filler (sigh)
3353 SP + 8 Space for locals reserved here.
3357 SP + n All call saved register used.
3361 SP + o All call saved fp registers used.
3365 SP + p (SP') points to next available address.
3369 /* Global variables set by output_function_prologue(). */
3370 /* Size of frame. Need to know this to emit return insns from
3372 static HOST_WIDE_INT actual_fsize, local_fsize;
3373 static int save_fregs;
3375 /* Emit RTL to store REG at the memory location specified by BASE+DISP.
3376 Handle case where DISP > 8k by using the add_high_const patterns.
3378 Note in DISP > 8k case, we will leave the high part of the address
3379 in %r1. There is code in expand_hppa_{prologue,epilogue} that knows this.*/
3382 store_reg (int reg, HOST_WIDE_INT disp, int base)
3384 rtx insn, dest, src, basereg;
3386 src = gen_rtx_REG (word_mode, reg);
3387 basereg = gen_rtx_REG (Pmode, base);
3388 if (VAL_14_BITS_P (disp))
3390 dest = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3391 insn = emit_move_insn (dest, src);
3393 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3395 rtx delta = GEN_INT (disp);
3396 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3398 emit_move_insn (tmpreg, delta);
3399 insn = emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3402 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3403 gen_rtx_SET (VOIDmode, tmpreg,
3404 gen_rtx_PLUS (Pmode, basereg, delta)));
3405 RTX_FRAME_RELATED_P (insn) = 1;
3407 dest = gen_rtx_MEM (word_mode, tmpreg);
3408 insn = emit_move_insn (dest, src);
3412 rtx delta = GEN_INT (disp);
3413 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3414 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3416 emit_move_insn (tmpreg, high);
3417 dest = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3418 insn = emit_move_insn (dest, src);
3420 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3421 gen_rtx_SET (VOIDmode,
3422 gen_rtx_MEM (word_mode,
3423 gen_rtx_PLUS (word_mode,
3430 RTX_FRAME_RELATED_P (insn) = 1;
3433 /* Emit RTL to store REG at the memory location specified by BASE and then
3434 add MOD to BASE. MOD must be <= 8k. */
3437 store_reg_modify (int base, int reg, HOST_WIDE_INT mod)
3439 rtx insn, basereg, srcreg, delta;
3441 gcc_assert (VAL_14_BITS_P (mod));
3443 basereg = gen_rtx_REG (Pmode, base);
3444 srcreg = gen_rtx_REG (word_mode, reg);
3445 delta = GEN_INT (mod);
3447 insn = emit_insn (gen_post_store (basereg, srcreg, delta));
3450 RTX_FRAME_RELATED_P (insn) = 1;
3452 /* RTX_FRAME_RELATED_P must be set on each frame related set
3453 in a parallel with more than one element. */
3454 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 0)) = 1;
3455 RTX_FRAME_RELATED_P (XVECEXP (PATTERN (insn), 0, 1)) = 1;
3459 /* Emit RTL to set REG to the value specified by BASE+DISP. Handle case
3460 where DISP > 8k by using the add_high_const patterns. NOTE indicates
3461 whether to add a frame note or not.
3463 In the DISP > 8k case, we leave the high part of the address in %r1.
3464 There is code in expand_hppa_{prologue,epilogue} that knows about this. */
3467 set_reg_plus_d (int reg, int base, HOST_WIDE_INT disp, int note)
3471 if (VAL_14_BITS_P (disp))
3473 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3474 plus_constant (gen_rtx_REG (Pmode, base), disp));
3476 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3478 rtx basereg = gen_rtx_REG (Pmode, base);
3479 rtx delta = GEN_INT (disp);
3480 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3482 emit_move_insn (tmpreg, delta);
3483 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3484 gen_rtx_PLUS (Pmode, tmpreg, basereg));
3486 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3487 gen_rtx_SET (VOIDmode, tmpreg,
3488 gen_rtx_PLUS (Pmode, basereg, delta)));
3492 rtx basereg = gen_rtx_REG (Pmode, base);
3493 rtx delta = GEN_INT (disp);
3494 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3496 emit_move_insn (tmpreg,
3497 gen_rtx_PLUS (Pmode, basereg,
3498 gen_rtx_HIGH (Pmode, delta)));
3499 insn = emit_move_insn (gen_rtx_REG (Pmode, reg),
3500 gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3503 if (DO_FRAME_NOTES && note)
3504 RTX_FRAME_RELATED_P (insn) = 1;
3508 compute_frame_size (HOST_WIDE_INT size, int *fregs_live)
3513 /* The code in hppa_expand_prologue and hppa_expand_epilogue must
3514 be consistent with the rounding and size calculation done here.
3515 Change them at the same time. */
3517 /* We do our own stack alignment. First, round the size of the
3518 stack locals up to a word boundary. */
3519 size = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3521 /* Space for previous frame pointer + filler. If any frame is
3522 allocated, we need to add in the STARTING_FRAME_OFFSET. We
3523 waste some space here for the sake of HP compatibility. The
3524 first slot is only used when the frame pointer is needed. */
3525 if (size || frame_pointer_needed)
3526 size += STARTING_FRAME_OFFSET;
3528 /* If the current function calls __builtin_eh_return, then we need
3529 to allocate stack space for registers that will hold data for
3530 the exception handler. */
3531 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3535 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
3537 size += i * UNITS_PER_WORD;
3540 /* Account for space used by the callee general register saves. */
3541 for (i = 18, j = frame_pointer_needed ? 4 : 3; i >= j; i--)
3542 if (df_regs_ever_live_p (i))
3543 size += UNITS_PER_WORD;
3545 /* Account for space used by the callee floating point register saves. */
3546 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3547 if (df_regs_ever_live_p (i)
3548 || (!TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3552 /* We always save both halves of the FP register, so always
3553 increment the frame size by 8 bytes. */
3557 /* If any of the floating registers are saved, account for the
3558 alignment needed for the floating point register save block. */
3561 size = (size + 7) & ~7;
3566 /* The various ABIs include space for the outgoing parameters in the
3567 size of the current function's stack frame. We don't need to align
3568 for the outgoing arguments as their alignment is set by the final
3569 rounding for the frame as a whole. */
3570 size += crtl->outgoing_args_size;
3572 /* Allocate space for the fixed frame marker. This space must be
3573 allocated for any function that makes calls or allocates
3575 if (!current_function_is_leaf || size)
3576 size += TARGET_64BIT ? 48 : 32;
3578 /* Finally, round to the preferred stack boundary. */
3579 return ((size + PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1)
3580 & ~(PREFERRED_STACK_BOUNDARY / BITS_PER_UNIT - 1));
3583 /* Generate the assembly code for function entry. FILE is a stdio
3584 stream to output the code to. SIZE is an int: how many units of
3585 temporary storage to allocate.
3587 Refer to the array `regs_ever_live' to determine which registers to
3588 save; `regs_ever_live[I]' is nonzero if register number I is ever
3589 used in the function. This function is responsible for knowing
3590 which registers should not be saved even if used. */
3592 /* On HP-PA, move-double insns between fpu and cpu need an 8-byte block
3593 of memory. If any fpu reg is used in the function, we allocate
3594 such a block here, at the bottom of the frame, just in case it's needed.
3596 If this function is a leaf procedure, then we may choose not
3597 to do a "save" insn. The decision about whether or not
3598 to do this is made in regclass.c. */
3601 pa_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
3603 /* The function's label and associated .PROC must never be
3604 separated and must be output *after* any profiling declarations
3605 to avoid changing spaces/subspaces within a procedure. */
3606 ASM_OUTPUT_LABEL (file, XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0));
3607 fputs ("\t.PROC\n", file);
3609 /* hppa_expand_prologue does the dirty work now. We just need
3610 to output the assembler directives which denote the start
3612 fprintf (file, "\t.CALLINFO FRAME=" HOST_WIDE_INT_PRINT_DEC, actual_fsize);
3613 if (current_function_is_leaf)
3614 fputs (",NO_CALLS", file);
3616 fputs (",CALLS", file);
3618 fputs (",SAVE_RP", file);
3620 /* The SAVE_SP flag is used to indicate that register %r3 is stored
3621 at the beginning of the frame and that it is used as the frame
3622 pointer for the frame. We do this because our current frame
3623 layout doesn't conform to that specified in the HP runtime
3624 documentation and we need a way to indicate to programs such as
3625 GDB where %r3 is saved. The SAVE_SP flag was chosen because it
3626 isn't used by HP compilers but is supported by the assembler.
3627 However, SAVE_SP is supposed to indicate that the previous stack
3628 pointer has been saved in the frame marker. */
3629 if (frame_pointer_needed)
3630 fputs (",SAVE_SP", file);
3632 /* Pass on information about the number of callee register saves
3633 performed in the prologue.
3635 The compiler is supposed to pass the highest register number
3636 saved, the assembler then has to adjust that number before
3637 entering it into the unwind descriptor (to account for any
3638 caller saved registers with lower register numbers than the
3639 first callee saved register). */
3641 fprintf (file, ",ENTRY_GR=%d", gr_saved + 2);
3644 fprintf (file, ",ENTRY_FR=%d", fr_saved + 11);
3646 fputs ("\n\t.ENTRY\n", file);
3648 remove_useless_addtr_insns (0);
3652 hppa_expand_prologue (void)
3654 int merge_sp_adjust_with_store = 0;
3655 HOST_WIDE_INT size = get_frame_size ();
3656 HOST_WIDE_INT offset;
3664 /* Compute total size for frame pointer, filler, locals and rounding to
3665 the next word boundary. Similar code appears in compute_frame_size
3666 and must be changed in tandem with this code. */
3667 local_fsize = (size + UNITS_PER_WORD - 1) & ~(UNITS_PER_WORD - 1);
3668 if (local_fsize || frame_pointer_needed)
3669 local_fsize += STARTING_FRAME_OFFSET;
3671 actual_fsize = compute_frame_size (size, &save_fregs);
3673 /* Compute a few things we will use often. */
3674 tmpreg = gen_rtx_REG (word_mode, 1);
3676 /* Save RP first. The calling conventions manual states RP will
3677 always be stored into the caller's frame at sp - 20 or sp - 16
3678 depending on which ABI is in use. */
3679 if (df_regs_ever_live_p (2) || crtl->calls_eh_return)
3681 store_reg (2, TARGET_64BIT ? -16 : -20, STACK_POINTER_REGNUM);
3687 /* Allocate the local frame and set up the frame pointer if needed. */
3688 if (actual_fsize != 0)
3690 if (frame_pointer_needed)
3692 /* Copy the old frame pointer temporarily into %r1. Set up the
3693 new stack pointer, then store away the saved old frame pointer
3694 into the stack at sp and at the same time update the stack
3695 pointer by actual_fsize bytes. Two versions, first
3696 handles small (<8k) frames. The second handles large (>=8k)
3698 insn = emit_move_insn (tmpreg, frame_pointer_rtx);
3700 RTX_FRAME_RELATED_P (insn) = 1;
3702 insn = emit_move_insn (frame_pointer_rtx, stack_pointer_rtx);
3704 RTX_FRAME_RELATED_P (insn) = 1;
3706 if (VAL_14_BITS_P (actual_fsize))
3707 store_reg_modify (STACK_POINTER_REGNUM, 1, actual_fsize);
3710 /* It is incorrect to store the saved frame pointer at *sp,
3711 then increment sp (writes beyond the current stack boundary).
3713 So instead use stwm to store at *sp and post-increment the
3714 stack pointer as an atomic operation. Then increment sp to
3715 finish allocating the new frame. */
3716 HOST_WIDE_INT adjust1 = 8192 - 64;
3717 HOST_WIDE_INT adjust2 = actual_fsize - adjust1;
3719 store_reg_modify (STACK_POINTER_REGNUM, 1, adjust1);
3720 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3724 /* We set SAVE_SP in frames that need a frame pointer. Thus,
3725 we need to store the previous stack pointer (frame pointer)
3726 into the frame marker on targets that use the HP unwind
3727 library. This allows the HP unwind library to be used to
3728 unwind GCC frames. However, we are not fully compatible
3729 with the HP library because our frame layout differs from
3730 that specified in the HP runtime specification.
3732 We don't want a frame note on this instruction as the frame
3733 marker moves during dynamic stack allocation.
3735 This instruction also serves as a blockage to prevent
3736 register spills from being scheduled before the stack
3737 pointer is raised. This is necessary as we store
3738 registers using the frame pointer as a base register,
3739 and the frame pointer is set before sp is raised. */
3740 if (TARGET_HPUX_UNWIND_LIBRARY)
3742 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx,
3743 GEN_INT (TARGET_64BIT ? -8 : -4));
3745 emit_move_insn (gen_rtx_MEM (word_mode, addr),
3749 emit_insn (gen_blockage ());
3751 /* no frame pointer needed. */
3754 /* In some cases we can perform the first callee register save
3755 and allocating the stack frame at the same time. If so, just
3756 make a note of it and defer allocating the frame until saving
3757 the callee registers. */
3758 if (VAL_14_BITS_P (actual_fsize) && local_fsize == 0)
3759 merge_sp_adjust_with_store = 1;
3760 /* Can not optimize. Adjust the stack frame by actual_fsize
3763 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3768 /* Normal register save.
3770 Do not save the frame pointer in the frame_pointer_needed case. It
3771 was done earlier. */
3772 if (frame_pointer_needed)
3774 offset = local_fsize;
3776 /* Saving the EH return data registers in the frame is the simplest
3777 way to get the frame unwind information emitted. We put them
3778 just before the general registers. */
3779 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3781 unsigned int i, regno;
3785 regno = EH_RETURN_DATA_REGNO (i);
3786 if (regno == INVALID_REGNUM)
3789 store_reg (regno, offset, FRAME_POINTER_REGNUM);
3790 offset += UNITS_PER_WORD;
3794 for (i = 18; i >= 4; i--)
3795 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3797 store_reg (i, offset, FRAME_POINTER_REGNUM);
3798 offset += UNITS_PER_WORD;
3801 /* Account for %r3 which is saved in a special place. */
3804 /* No frame pointer needed. */
3807 offset = local_fsize - actual_fsize;
3809 /* Saving the EH return data registers in the frame is the simplest
3810 way to get the frame unwind information emitted. */
3811 if (DO_FRAME_NOTES && crtl->calls_eh_return)
3813 unsigned int i, regno;
3817 regno = EH_RETURN_DATA_REGNO (i);
3818 if (regno == INVALID_REGNUM)
3821 /* If merge_sp_adjust_with_store is nonzero, then we can
3822 optimize the first save. */
3823 if (merge_sp_adjust_with_store)
3825 store_reg_modify (STACK_POINTER_REGNUM, regno, -offset);
3826 merge_sp_adjust_with_store = 0;
3829 store_reg (regno, offset, STACK_POINTER_REGNUM);
3830 offset += UNITS_PER_WORD;
3834 for (i = 18; i >= 3; i--)
3835 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
3837 /* If merge_sp_adjust_with_store is nonzero, then we can
3838 optimize the first GR save. */
3839 if (merge_sp_adjust_with_store)
3841 store_reg_modify (STACK_POINTER_REGNUM, i, -offset);
3842 merge_sp_adjust_with_store = 0;
3845 store_reg (i, offset, STACK_POINTER_REGNUM);
3846 offset += UNITS_PER_WORD;
3850 /* If we wanted to merge the SP adjustment with a GR save, but we never
3851 did any GR saves, then just emit the adjustment here. */
3852 if (merge_sp_adjust_with_store)
3853 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
3857 /* The hppa calling conventions say that %r19, the pic offset
3858 register, is saved at sp - 32 (in this function's frame)
3859 when generating PIC code. FIXME: What is the correct thing
3860 to do for functions which make no calls and allocate no
3861 frame? Do we need to allocate a frame, or can we just omit
3862 the save? For now we'll just omit the save.
3864 We don't want a note on this insn as the frame marker can
3865 move if there is a dynamic stack allocation. */
3866 if (flag_pic && actual_fsize != 0 && !TARGET_64BIT)
3868 rtx addr = gen_rtx_PLUS (word_mode, stack_pointer_rtx, GEN_INT (-32));
3870 emit_move_insn (gen_rtx_MEM (word_mode, addr), pic_offset_table_rtx);
3874 /* Align pointer properly (doubleword boundary). */
3875 offset = (offset + 7) & ~7;
3877 /* Floating point register store. */
3882 /* First get the frame or stack pointer to the start of the FP register
3884 if (frame_pointer_needed)
3886 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
3887 base = frame_pointer_rtx;
3891 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
3892 base = stack_pointer_rtx;
3895 /* Now actually save the FP registers. */
3896 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
3898 if (df_regs_ever_live_p (i)
3899 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
3901 rtx addr, insn, reg;
3902 addr = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
3903 reg = gen_rtx_REG (DFmode, i);
3904 insn = emit_move_insn (addr, reg);
3907 RTX_FRAME_RELATED_P (insn) = 1;
3910 rtx mem = gen_rtx_MEM (DFmode,
3911 plus_constant (base, offset));
3912 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3913 gen_rtx_SET (VOIDmode, mem, reg));
3917 rtx meml = gen_rtx_MEM (SFmode,
3918 plus_constant (base, offset));
3919 rtx memr = gen_rtx_MEM (SFmode,
3920 plus_constant (base, offset + 4));
3921 rtx regl = gen_rtx_REG (SFmode, i);
3922 rtx regr = gen_rtx_REG (SFmode, i + 1);
3923 rtx setl = gen_rtx_SET (VOIDmode, meml, regl);
3924 rtx setr = gen_rtx_SET (VOIDmode, memr, regr);
3927 RTX_FRAME_RELATED_P (setl) = 1;
3928 RTX_FRAME_RELATED_P (setr) = 1;
3929 vec = gen_rtvec (2, setl, setr);
3930 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
3931 gen_rtx_SEQUENCE (VOIDmode, vec));
3934 offset += GET_MODE_SIZE (DFmode);
3941 /* Emit RTL to load REG from the memory location specified by BASE+DISP.
3942 Handle case where DISP > 8k by using the add_high_const patterns. */
3945 load_reg (int reg, HOST_WIDE_INT disp, int base)
3947 rtx dest = gen_rtx_REG (word_mode, reg);
3948 rtx basereg = gen_rtx_REG (Pmode, base);
3951 if (VAL_14_BITS_P (disp))
3952 src = gen_rtx_MEM (word_mode, plus_constant (basereg, disp));
3953 else if (TARGET_64BIT && !VAL_32_BITS_P (disp))
3955 rtx delta = GEN_INT (disp);
3956 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3958 emit_move_insn (tmpreg, delta);
3959 if (TARGET_DISABLE_INDEXING)
3961 emit_move_insn (tmpreg, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3962 src = gen_rtx_MEM (word_mode, tmpreg);
3965 src = gen_rtx_MEM (word_mode, gen_rtx_PLUS (Pmode, tmpreg, basereg));
3969 rtx delta = GEN_INT (disp);
3970 rtx high = gen_rtx_PLUS (Pmode, basereg, gen_rtx_HIGH (Pmode, delta));
3971 rtx tmpreg = gen_rtx_REG (Pmode, 1);
3973 emit_move_insn (tmpreg, high);
3974 src = gen_rtx_MEM (word_mode, gen_rtx_LO_SUM (Pmode, tmpreg, delta));
3977 emit_move_insn (dest, src);
3980 /* Update the total code bytes output to the text section. */
3983 update_total_code_bytes (unsigned int nbytes)
3985 if ((TARGET_PORTABLE_RUNTIME || !TARGET_GAS || !TARGET_SOM)
3986 && !IN_NAMED_SECTION_P (cfun->decl))
3988 unsigned int old_total = total_code_bytes;
3990 total_code_bytes += nbytes;
3992 /* Be prepared to handle overflows. */
3993 if (old_total > total_code_bytes)
3994 total_code_bytes = UINT_MAX;
3998 /* This function generates the assembly code for function exit.
3999 Args are as for output_function_prologue ().
4001 The function epilogue should not depend on the current stack
4002 pointer! It should use the frame pointer only. This is mandatory
4003 because of alloca; we also take advantage of it to omit stack
4004 adjustments before returning. */
4007 pa_output_function_epilogue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
4009 rtx insn = get_last_insn ();
4013 /* hppa_expand_epilogue does the dirty work now. We just need
4014 to output the assembler directives which denote the end
4017 To make debuggers happy, emit a nop if the epilogue was completely
4018 eliminated due to a volatile call as the last insn in the
4019 current function. That way the return address (in %r2) will
4020 always point to a valid instruction in the current function. */
4022 /* Get the last real insn. */
4023 if (GET_CODE (insn) == NOTE)
4024 insn = prev_real_insn (insn);
4026 /* If it is a sequence, then look inside. */
4027 if (insn && GET_CODE (insn) == INSN && GET_CODE (PATTERN (insn)) == SEQUENCE)
4028 insn = XVECEXP (PATTERN (insn), 0, 0);
4030 /* If insn is a CALL_INSN, then it must be a call to a volatile
4031 function (otherwise there would be epilogue insns). */
4032 if (insn && GET_CODE (insn) == CALL_INSN)
4034 fputs ("\tnop\n", file);
4038 fputs ("\t.EXIT\n\t.PROCEND\n", file);
4040 if (TARGET_SOM && TARGET_GAS)
4042 /* We done with this subspace except possibly for some additional
4043 debug information. Forget that we are in this subspace to ensure
4044 that the next function is output in its own subspace. */
4046 cfun->machine->in_nsubspa = 2;
4049 if (INSN_ADDRESSES_SET_P ())
4051 insn = get_last_nonnote_insn ();
4052 last_address += INSN_ADDRESSES (INSN_UID (insn));
4054 last_address += insn_default_length (insn);
4055 last_address = ((last_address + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
4056 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
4059 last_address = UINT_MAX;
4061 /* Finally, update the total number of code bytes output so far. */
4062 update_total_code_bytes (last_address);
4066 hppa_expand_epilogue (void)
4069 HOST_WIDE_INT offset;
4070 HOST_WIDE_INT ret_off = 0;
4072 int merge_sp_adjust_with_load = 0;
4074 /* We will use this often. */
4075 tmpreg = gen_rtx_REG (word_mode, 1);
4077 /* Try to restore RP early to avoid load/use interlocks when
4078 RP gets used in the return (bv) instruction. This appears to still
4079 be necessary even when we schedule the prologue and epilogue. */
4082 ret_off = TARGET_64BIT ? -16 : -20;
4083 if (frame_pointer_needed)
4085 load_reg (2, ret_off, FRAME_POINTER_REGNUM);
4090 /* No frame pointer, and stack is smaller than 8k. */
4091 if (VAL_14_BITS_P (ret_off - actual_fsize))
4093 load_reg (2, ret_off - actual_fsize, STACK_POINTER_REGNUM);
4099 /* General register restores. */
4100 if (frame_pointer_needed)
4102 offset = local_fsize;
4104 /* If the current function calls __builtin_eh_return, then we need
4105 to restore the saved EH data registers. */
4106 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4108 unsigned int i, regno;
4112 regno = EH_RETURN_DATA_REGNO (i);
4113 if (regno == INVALID_REGNUM)
4116 load_reg (regno, offset, FRAME_POINTER_REGNUM);
4117 offset += UNITS_PER_WORD;
4121 for (i = 18; i >= 4; i--)
4122 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4124 load_reg (i, offset, FRAME_POINTER_REGNUM);
4125 offset += UNITS_PER_WORD;
4130 offset = local_fsize - actual_fsize;
4132 /* If the current function calls __builtin_eh_return, then we need
4133 to restore the saved EH data registers. */
4134 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4136 unsigned int i, regno;
4140 regno = EH_RETURN_DATA_REGNO (i);
4141 if (regno == INVALID_REGNUM)
4144 /* Only for the first load.
4145 merge_sp_adjust_with_load holds the register load
4146 with which we will merge the sp adjustment. */
4147 if (merge_sp_adjust_with_load == 0
4149 && VAL_14_BITS_P (-actual_fsize))
4150 merge_sp_adjust_with_load = regno;
4152 load_reg (regno, offset, STACK_POINTER_REGNUM);
4153 offset += UNITS_PER_WORD;
4157 for (i = 18; i >= 3; i--)
4159 if (df_regs_ever_live_p (i) && ! call_used_regs[i])
4161 /* Only for the first load.
4162 merge_sp_adjust_with_load holds the register load
4163 with which we will merge the sp adjustment. */
4164 if (merge_sp_adjust_with_load == 0
4166 && VAL_14_BITS_P (-actual_fsize))
4167 merge_sp_adjust_with_load = i;
4169 load_reg (i, offset, STACK_POINTER_REGNUM);
4170 offset += UNITS_PER_WORD;
4175 /* Align pointer properly (doubleword boundary). */
4176 offset = (offset + 7) & ~7;
4178 /* FP register restores. */
4181 /* Adjust the register to index off of. */
4182 if (frame_pointer_needed)
4183 set_reg_plus_d (1, FRAME_POINTER_REGNUM, offset, 0);
4185 set_reg_plus_d (1, STACK_POINTER_REGNUM, offset, 0);
4187 /* Actually do the restores now. */
4188 for (i = FP_SAVED_REG_LAST; i >= FP_SAVED_REG_FIRST; i -= FP_REG_STEP)
4189 if (df_regs_ever_live_p (i)
4190 || (! TARGET_64BIT && df_regs_ever_live_p (i + 1)))
4192 rtx src = gen_rtx_MEM (DFmode, gen_rtx_POST_INC (DFmode, tmpreg));
4193 rtx dest = gen_rtx_REG (DFmode, i);
4194 emit_move_insn (dest, src);
4198 /* Emit a blockage insn here to keep these insns from being moved to
4199 an earlier spot in the epilogue, or into the main instruction stream.
4201 This is necessary as we must not cut the stack back before all the
4202 restores are finished. */
4203 emit_insn (gen_blockage ());
4205 /* Reset stack pointer (and possibly frame pointer). The stack
4206 pointer is initially set to fp + 64 to avoid a race condition. */
4207 if (frame_pointer_needed)
4209 rtx delta = GEN_INT (-64);
4211 set_reg_plus_d (STACK_POINTER_REGNUM, FRAME_POINTER_REGNUM, 64, 0);
4212 emit_insn (gen_pre_load (frame_pointer_rtx, stack_pointer_rtx, delta));
4214 /* If we were deferring a callee register restore, do it now. */
4215 else if (merge_sp_adjust_with_load)
4217 rtx delta = GEN_INT (-actual_fsize);
4218 rtx dest = gen_rtx_REG (word_mode, merge_sp_adjust_with_load);
4220 emit_insn (gen_pre_load (dest, stack_pointer_rtx, delta));
4222 else if (actual_fsize != 0)
4223 set_reg_plus_d (STACK_POINTER_REGNUM, STACK_POINTER_REGNUM,
4226 /* If we haven't restored %r2 yet (no frame pointer, and a stack
4227 frame greater than 8k), do so now. */
4229 load_reg (2, ret_off, STACK_POINTER_REGNUM);
4231 if (DO_FRAME_NOTES && crtl->calls_eh_return)
4233 rtx sa = EH_RETURN_STACKADJ_RTX;
4235 emit_insn (gen_blockage ());
4236 emit_insn (TARGET_64BIT
4237 ? gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, sa)
4238 : gen_subsi3 (stack_pointer_rtx, stack_pointer_rtx, sa));
4243 hppa_pic_save_rtx (void)
4245 return get_hard_reg_initial_val (word_mode, PIC_OFFSET_TABLE_REGNUM);
4248 #ifndef NO_DEFERRED_PROFILE_COUNTERS
4249 #define NO_DEFERRED_PROFILE_COUNTERS 0
4253 /* Vector of funcdef numbers. */
4254 static VEC(int,heap) *funcdef_nos;
4256 /* Output deferred profile counters. */
4258 output_deferred_profile_counters (void)
4263 if (VEC_empty (int, funcdef_nos))
4266 switch_to_section (data_section);
4267 align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE);
4268 ASM_OUTPUT_ALIGN (asm_out_file, floor_log2 (align / BITS_PER_UNIT));
4270 for (i = 0; VEC_iterate (int, funcdef_nos, i, n); i++)
4272 targetm.asm_out.internal_label (asm_out_file, "LP", n);
4273 assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1);
4276 VEC_free (int, heap, funcdef_nos);
4280 hppa_profile_hook (int label_no)
4282 /* We use SImode for the address of the function in both 32 and
4283 64-bit code to avoid having to provide DImode versions of the
4284 lcla2 and load_offset_label_address insn patterns. */
4285 rtx reg = gen_reg_rtx (SImode);
4286 rtx label_rtx = gen_label_rtx ();
4287 rtx begin_label_rtx, call_insn;
4288 char begin_label_name[16];
4290 ASM_GENERATE_INTERNAL_LABEL (begin_label_name, FUNC_BEGIN_PROLOG_LABEL,
4292 begin_label_rtx = gen_rtx_SYMBOL_REF (SImode, ggc_strdup (begin_label_name));
4295 emit_move_insn (arg_pointer_rtx,
4296 gen_rtx_PLUS (word_mode, virtual_outgoing_args_rtx,
4299 emit_move_insn (gen_rtx_REG (word_mode, 26), gen_rtx_REG (word_mode, 2));
4301 /* The address of the function is loaded into %r25 with an instruction-
4302 relative sequence that avoids the use of relocations. The sequence
4303 is split so that the load_offset_label_address instruction can
4304 occupy the delay slot of the call to _mcount. */
4306 emit_insn (gen_lcla2 (reg, label_rtx));
4308 emit_insn (gen_lcla1 (reg, label_rtx));
4310 emit_insn (gen_load_offset_label_address (gen_rtx_REG (SImode, 25),
4311 reg, begin_label_rtx, label_rtx));
4313 #if !NO_DEFERRED_PROFILE_COUNTERS
4315 rtx count_label_rtx, addr, r24;
4316 char count_label_name[16];
4318 VEC_safe_push (int, heap, funcdef_nos, label_no);
4319 ASM_GENERATE_INTERNAL_LABEL (count_label_name, "LP", label_no);
4320 count_label_rtx = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (count_label_name));
4322 addr = force_reg (Pmode, count_label_rtx);
4323 r24 = gen_rtx_REG (Pmode, 24);
4324 emit_move_insn (r24, addr);
4327 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4328 gen_rtx_SYMBOL_REF (Pmode,
4330 GEN_INT (TARGET_64BIT ? 24 : 12)));
4332 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), r24);
4337 emit_call_insn (gen_call (gen_rtx_MEM (Pmode,
4338 gen_rtx_SYMBOL_REF (Pmode,
4340 GEN_INT (TARGET_64BIT ? 16 : 8)));
4344 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 25));
4345 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), gen_rtx_REG (SImode, 26));
4347 /* Indicate the _mcount call cannot throw, nor will it execute a
4349 add_reg_note (call_insn, REG_EH_REGION, constm1_rtx);
4352 /* Fetch the return address for the frame COUNT steps up from
4353 the current frame, after the prologue. FRAMEADDR is the
4354 frame pointer of the COUNT frame.
4356 We want to ignore any export stub remnants here. To handle this,
4357 we examine the code at the return address, and if it is an export
4358 stub, we return a memory rtx for the stub return address stored
4361 The value returned is used in two different ways:
4363 1. To find a function's caller.
4365 2. To change the return address for a function.
4367 This function handles most instances of case 1; however, it will
4368 fail if there are two levels of stubs to execute on the return
4369 path. The only way I believe that can happen is if the return value
4370 needs a parameter relocation, which never happens for C code.
4372 This function handles most instances of case 2; however, it will
4373 fail if we did not originally have stub code on the return path
4374 but will need stub code on the new return path. This can happen if
4375 the caller & callee are both in the main program, but the new
4376 return location is in a shared library. */
4379 return_addr_rtx (int count, rtx frameaddr)
4389 rp = get_hard_reg_initial_val (Pmode, 2);
4391 if (TARGET_64BIT || TARGET_NO_SPACE_REGS)
4394 saved_rp = gen_reg_rtx (Pmode);
4395 emit_move_insn (saved_rp, rp);
4397 /* Get pointer to the instruction stream. We have to mask out the
4398 privilege level from the two low order bits of the return address
4399 pointer here so that ins will point to the start of the first
4400 instruction that would have been executed if we returned. */
4401 ins = copy_to_reg (gen_rtx_AND (Pmode, rp, MASK_RETURN_ADDR));
4402 label = gen_label_rtx ();
4404 /* Check the instruction stream at the normal return address for the
4407 0x4bc23fd1 | stub+8: ldw -18(sr0,sp),rp
4408 0x004010a1 | stub+12: ldsid (sr0,rp),r1
4409 0x00011820 | stub+16: mtsp r1,sr0
4410 0xe0400002 | stub+20: be,n 0(sr0,rp)
4412 If it is an export stub, than our return address is really in
4415 emit_cmp_insn (gen_rtx_MEM (SImode, ins), GEN_INT (0x4bc23fd1), NE,
4416 NULL_RTX, SImode, 1);
4417 emit_jump_insn (gen_bne (label));
4419 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 4)),
4420 GEN_INT (0x004010a1), NE, NULL_RTX, SImode, 1);
4421 emit_jump_insn (gen_bne (label));
4423 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 8)),
4424 GEN_INT (0x00011820), NE, NULL_RTX, SImode, 1);
4425 emit_jump_insn (gen_bne (label));
4427 /* 0xe0400002 must be specified as -532676606 so that it won't be
4428 rejected as an invalid immediate operand on 64-bit hosts. */
4429 emit_cmp_insn (gen_rtx_MEM (SImode, plus_constant (ins, 12)),
4430 GEN_INT (-532676606), NE, NULL_RTX, SImode, 1);
4432 /* If there is no export stub then just use the value saved from
4433 the return pointer register. */
4435 emit_jump_insn (gen_bne (label));
4437 /* Here we know that our return address points to an export
4438 stub. We don't want to return the address of the export stub,
4439 but rather the return address of the export stub. That return
4440 address is stored at -24[frameaddr]. */
4442 emit_move_insn (saved_rp,
4444 memory_address (Pmode,
4445 plus_constant (frameaddr,
4453 emit_bcond_fp (enum rtx_code code, rtx operand0)
4455 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
4456 gen_rtx_IF_THEN_ELSE (VOIDmode,
4457 gen_rtx_fmt_ee (code,
4459 gen_rtx_REG (CCFPmode, 0),
4461 gen_rtx_LABEL_REF (VOIDmode, operand0),
4467 gen_cmp_fp (enum rtx_code code, rtx operand0, rtx operand1)
4469 return gen_rtx_SET (VOIDmode, gen_rtx_REG (CCFPmode, 0),
4470 gen_rtx_fmt_ee (code, CCFPmode, operand0, operand1));
4473 /* Adjust the cost of a scheduling dependency. Return the new cost of
4474 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
4477 pa_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
4479 enum attr_type attr_type;
4481 /* Don't adjust costs for a pa8000 chip, also do not adjust any
4482 true dependencies as they are described with bypasses now. */
4483 if (pa_cpu >= PROCESSOR_8000 || REG_NOTE_KIND (link) == 0)
4486 if (! recog_memoized (insn))
4489 attr_type = get_attr_type (insn);
4491 switch (REG_NOTE_KIND (link))
4494 /* Anti dependency; DEP_INSN reads a register that INSN writes some
4497 if (attr_type == TYPE_FPLOAD)
4499 rtx pat = PATTERN (insn);
4500 rtx dep_pat = PATTERN (dep_insn);
4501 if (GET_CODE (pat) == PARALLEL)
4503 /* This happens for the fldXs,mb patterns. */
4504 pat = XVECEXP (pat, 0, 0);
4506 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4507 /* If this happens, we have to extend this to schedule
4508 optimally. Return 0 for now. */
4511 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4513 if (! recog_memoized (dep_insn))
4515 switch (get_attr_type (dep_insn))
4522 case TYPE_FPSQRTSGL:
4523 case TYPE_FPSQRTDBL:
4524 /* A fpload can't be issued until one cycle before a
4525 preceding arithmetic operation has finished if
4526 the target of the fpload is any of the sources
4527 (or destination) of the arithmetic operation. */
4528 return insn_default_latency (dep_insn) - 1;
4535 else if (attr_type == TYPE_FPALU)
4537 rtx pat = PATTERN (insn);
4538 rtx dep_pat = PATTERN (dep_insn);
4539 if (GET_CODE (pat) == PARALLEL)
4541 /* This happens for the fldXs,mb patterns. */
4542 pat = XVECEXP (pat, 0, 0);
4544 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4545 /* If this happens, we have to extend this to schedule
4546 optimally. Return 0 for now. */
4549 if (reg_mentioned_p (SET_DEST (pat), SET_SRC (dep_pat)))
4551 if (! recog_memoized (dep_insn))
4553 switch (get_attr_type (dep_insn))
4557 case TYPE_FPSQRTSGL:
4558 case TYPE_FPSQRTDBL:
4559 /* An ALU flop can't be issued until two cycles before a
4560 preceding divide or sqrt operation has finished if
4561 the target of the ALU flop is any of the sources
4562 (or destination) of the divide or sqrt operation. */
4563 return insn_default_latency (dep_insn) - 2;
4571 /* For other anti dependencies, the cost is 0. */
4574 case REG_DEP_OUTPUT:
4575 /* Output dependency; DEP_INSN writes a register that INSN writes some
4577 if (attr_type == TYPE_FPLOAD)
4579 rtx pat = PATTERN (insn);
4580 rtx dep_pat = PATTERN (dep_insn);
4581 if (GET_CODE (pat) == PARALLEL)
4583 /* This happens for the fldXs,mb patterns. */
4584 pat = XVECEXP (pat, 0, 0);
4586 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4587 /* If this happens, we have to extend this to schedule
4588 optimally. Return 0 for now. */
4591 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4593 if (! recog_memoized (dep_insn))
4595 switch (get_attr_type (dep_insn))
4602 case TYPE_FPSQRTSGL:
4603 case TYPE_FPSQRTDBL:
4604 /* A fpload can't be issued until one cycle before a
4605 preceding arithmetic operation has finished if
4606 the target of the fpload is the destination of the
4607 arithmetic operation.
4609 Exception: For PA7100LC, PA7200 and PA7300, the cost
4610 is 3 cycles, unless they bundle together. We also
4611 pay the penalty if the second insn is a fpload. */
4612 return insn_default_latency (dep_insn) - 1;
4619 else if (attr_type == TYPE_FPALU)
4621 rtx pat = PATTERN (insn);
4622 rtx dep_pat = PATTERN (dep_insn);
4623 if (GET_CODE (pat) == PARALLEL)
4625 /* This happens for the fldXs,mb patterns. */
4626 pat = XVECEXP (pat, 0, 0);
4628 if (GET_CODE (pat) != SET || GET_CODE (dep_pat) != SET)
4629 /* If this happens, we have to extend this to schedule
4630 optimally. Return 0 for now. */
4633 if (reg_mentioned_p (SET_DEST (pat), SET_DEST (dep_pat)))
4635 if (! recog_memoized (dep_insn))
4637 switch (get_attr_type (dep_insn))
4641 case TYPE_FPSQRTSGL:
4642 case TYPE_FPSQRTDBL:
4643 /* An ALU flop can't be issued until two cycles before a
4644 preceding divide or sqrt operation has finished if
4645 the target of the ALU flop is also the target of
4646 the divide or sqrt operation. */
4647 return insn_default_latency (dep_insn) - 2;
4655 /* For other output dependencies, the cost is 0. */
4663 /* Adjust scheduling priorities. We use this to try and keep addil
4664 and the next use of %r1 close together. */
4666 pa_adjust_priority (rtx insn, int priority)
4668 rtx set = single_set (insn);
4672 src = SET_SRC (set);
4673 dest = SET_DEST (set);
4674 if (GET_CODE (src) == LO_SUM
4675 && symbolic_operand (XEXP (src, 1), VOIDmode)
4676 && ! read_only_operand (XEXP (src, 1), VOIDmode))
4679 else if (GET_CODE (src) == MEM
4680 && GET_CODE (XEXP (src, 0)) == LO_SUM
4681 && symbolic_operand (XEXP (XEXP (src, 0), 1), VOIDmode)
4682 && ! read_only_operand (XEXP (XEXP (src, 0), 1), VOIDmode))
4685 else if (GET_CODE (dest) == MEM
4686 && GET_CODE (XEXP (dest, 0)) == LO_SUM
4687 && symbolic_operand (XEXP (XEXP (dest, 0), 1), VOIDmode)
4688 && ! read_only_operand (XEXP (XEXP (dest, 0), 1), VOIDmode))
4694 /* The 700 can only issue a single insn at a time.
4695 The 7XXX processors can issue two insns at a time.
4696 The 8000 can issue 4 insns at a time. */
4698 pa_issue_rate (void)
4702 case PROCESSOR_700: return 1;
4703 case PROCESSOR_7100: return 2;
4704 case PROCESSOR_7100LC: return 2;
4705 case PROCESSOR_7200: return 2;
4706 case PROCESSOR_7300: return 2;
4707 case PROCESSOR_8000: return 4;
4716 /* Return any length adjustment needed by INSN which already has its length
4717 computed as LENGTH. Return zero if no adjustment is necessary.
4719 For the PA: function calls, millicode calls, and backwards short
4720 conditional branches with unfilled delay slots need an adjustment by +1
4721 (to account for the NOP which will be inserted into the instruction stream).
4723 Also compute the length of an inline block move here as it is too
4724 complicated to express as a length attribute in pa.md. */
4726 pa_adjust_insn_length (rtx insn, int length)
4728 rtx pat = PATTERN (insn);
4730 /* Jumps inside switch tables which have unfilled delay slots need
4732 if (GET_CODE (insn) == JUMP_INSN
4733 && GET_CODE (pat) == PARALLEL
4734 && get_attr_type (insn) == TYPE_BTABLE_BRANCH)
4736 /* Millicode insn with an unfilled delay slot. */
4737 else if (GET_CODE (insn) == INSN
4738 && GET_CODE (pat) != SEQUENCE
4739 && GET_CODE (pat) != USE
4740 && GET_CODE (pat) != CLOBBER
4741 && get_attr_type (insn) == TYPE_MILLI)
4743 /* Block move pattern. */
4744 else if (GET_CODE (insn) == INSN
4745 && GET_CODE (pat) == PARALLEL
4746 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4747 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4748 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 1)) == MEM
4749 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode
4750 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 1)) == BLKmode)
4751 return compute_movmem_length (insn) - 4;
4752 /* Block clear pattern. */
4753 else if (GET_CODE (insn) == INSN
4754 && GET_CODE (pat) == PARALLEL
4755 && GET_CODE (XVECEXP (pat, 0, 0)) == SET
4756 && GET_CODE (XEXP (XVECEXP (pat, 0, 0), 0)) == MEM
4757 && XEXP (XVECEXP (pat, 0, 0), 1) == const0_rtx
4758 && GET_MODE (XEXP (XVECEXP (pat, 0, 0), 0)) == BLKmode)
4759 return compute_clrmem_length (insn) - 4;
4760 /* Conditional branch with an unfilled delay slot. */
4761 else if (GET_CODE (insn) == JUMP_INSN && ! simplejump_p (insn))
4763 /* Adjust a short backwards conditional with an unfilled delay slot. */
4764 if (GET_CODE (pat) == SET
4766 && ! forward_branch_p (insn))
4768 else if (GET_CODE (pat) == PARALLEL
4769 && get_attr_type (insn) == TYPE_PARALLEL_BRANCH
4772 /* Adjust dbra insn with short backwards conditional branch with
4773 unfilled delay slot -- only for case where counter is in a
4774 general register register. */
4775 else if (GET_CODE (pat) == PARALLEL
4776 && GET_CODE (XVECEXP (pat, 0, 1)) == SET
4777 && GET_CODE (XEXP (XVECEXP (pat, 0, 1), 0)) == REG
4778 && ! FP_REG_P (XEXP (XVECEXP (pat, 0, 1), 0))
4780 && ! forward_branch_p (insn))
4788 /* Print operand X (an rtx) in assembler syntax to file FILE.
4789 CODE is a letter or dot (`z' in `%z0') or 0 if no letter was specified.
4790 For `%' followed by punctuation, CODE is the punctuation and X is null. */
4793 print_operand (FILE *file, rtx x, int code)
4798 /* Output a 'nop' if there's nothing for the delay slot. */
4799 if (dbr_sequence_length () == 0)
4800 fputs ("\n\tnop", file);
4803 /* Output a nullification completer if there's nothing for the */
4804 /* delay slot or nullification is requested. */
4805 if (dbr_sequence_length () == 0 ||
4807 INSN_ANNULLED_BRANCH_P (XVECEXP (final_sequence, 0, 0))))
4811 /* Print out the second register name of a register pair.
4812 I.e., R (6) => 7. */
4813 fputs (reg_names[REGNO (x) + 1], file);
4816 /* A register or zero. */
4818 || (x == CONST0_RTX (DFmode))
4819 || (x == CONST0_RTX (SFmode)))
4821 fputs ("%r0", file);
4827 /* A register or zero (floating point). */
4829 || (x == CONST0_RTX (DFmode))
4830 || (x == CONST0_RTX (SFmode)))
4832 fputs ("%fr0", file);
4841 xoperands[0] = XEXP (XEXP (x, 0), 0);
4842 xoperands[1] = XVECEXP (XEXP (XEXP (x, 0), 1), 0, 0);
4843 output_global_address (file, xoperands[1], 0);
4844 fprintf (file, "(%s)", reg_names [REGNO (xoperands[0])]);
4848 case 'C': /* Plain (C)ondition */
4850 switch (GET_CODE (x))
4853 fputs ("=", file); break;
4855 fputs ("<>", file); break;
4857 fputs (">", file); break;
4859 fputs (">=", file); break;
4861 fputs (">>=", file); break;
4863 fputs (">>", file); break;
4865 fputs ("<", file); break;
4867 fputs ("<=", file); break;
4869 fputs ("<<=", file); break;
4871 fputs ("<<", file); break;
4876 case 'N': /* Condition, (N)egated */
4877 switch (GET_CODE (x))
4880 fputs ("<>", file); break;
4882 fputs ("=", file); break;
4884 fputs ("<=", file); break;
4886 fputs ("<", file); break;
4888 fputs ("<<", file); break;
4890 fputs ("<<=", file); break;
4892 fputs (">=", file); break;
4894 fputs (">", file); break;
4896 fputs (">>", file); break;
4898 fputs (">>=", file); break;
4903 /* For floating point comparisons. Note that the output
4904 predicates are the complement of the desired mode. The
4905 conditions for GT, GE, LT, LE and LTGT cause an invalid
4906 operation exception if the result is unordered and this
4907 exception is enabled in the floating-point status register. */
4909 switch (GET_CODE (x))
4912 fputs ("!=", file); break;
4914 fputs ("=", file); break;
4916 fputs ("!>", file); break;
4918 fputs ("!>=", file); break;
4920 fputs ("!<", file); break;
4922 fputs ("!<=", file); break;
4924 fputs ("!<>", file); break;
4926 fputs ("!?<=", file); break;
4928 fputs ("!?<", file); break;
4930 fputs ("!?>=", file); break;
4932 fputs ("!?>", file); break;
4934 fputs ("!?=", file); break;
4936 fputs ("!?", file); break;
4938 fputs ("?", file); break;
4943 case 'S': /* Condition, operands are (S)wapped. */
4944 switch (GET_CODE (x))
4947 fputs ("=", file); break;
4949 fputs ("<>", file); break;
4951 fputs ("<", file); break;
4953 fputs ("<=", file); break;
4955 fputs ("<<=", file); break;
4957 fputs ("<<", file); break;
4959 fputs (">", file); break;
4961 fputs (">=", file); break;
4963 fputs (">>=", file); break;
4965 fputs (">>", file); break;
4970 case 'B': /* Condition, (B)oth swapped and negate. */
4971 switch (GET_CODE (x))
4974 fputs ("<>", file); break;
4976 fputs ("=", file); break;
4978 fputs (">=", file); break;
4980 fputs (">", file); break;
4982 fputs (">>", file); break;
4984 fputs (">>=", file); break;
4986 fputs ("<=", file); break;
4988 fputs ("<", file); break;
4990 fputs ("<<", file); break;
4992 fputs ("<<=", file); break;
4998 gcc_assert (GET_CODE (x) == CONST_INT);
4999 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x));
5002 gcc_assert (GET_CODE (x) == CONST_INT);
5003 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 64 - (INTVAL (x) & 63));
5006 gcc_assert (GET_CODE (x) == CONST_INT);
5007 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 32 - (INTVAL (x) & 31));
5010 gcc_assert (GET_CODE (x) == CONST_INT && exact_log2 (INTVAL (x)) >= 0);
5011 fprintf (file, "%d", exact_log2 (INTVAL (x)));
5014 gcc_assert (GET_CODE (x) == CONST_INT);
5015 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 63 - (INTVAL (x) & 63));
5018 gcc_assert (GET_CODE (x) == CONST_INT);
5019 fprintf (file, HOST_WIDE_INT_PRINT_DEC, 31 - (INTVAL (x) & 31));
5022 if (GET_CODE (x) == CONST_INT)
5027 switch (GET_CODE (XEXP (x, 0)))
5031 if (ASSEMBLER_DIALECT == 0)
5032 fputs ("s,mb", file);
5034 fputs (",mb", file);
5038 if (ASSEMBLER_DIALECT == 0)
5039 fputs ("s,ma", file);
5041 fputs (",ma", file);
5044 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5045 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5047 if (ASSEMBLER_DIALECT == 0)
5050 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
5051 || GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5053 if (ASSEMBLER_DIALECT == 0)
5054 fputs ("x,s", file);
5058 else if (code == 'F' && ASSEMBLER_DIALECT == 0)
5062 if (code == 'F' && ASSEMBLER_DIALECT == 0)
5068 output_global_address (file, x, 0);
5071 output_global_address (file, x, 1);
5073 case 0: /* Don't do anything special */
5078 compute_zdepwi_operands (INTVAL (x), op);
5079 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5085 compute_zdepdi_operands (INTVAL (x), op);
5086 fprintf (file, "%d,%d,%d", op[0], op[1], op[2]);
5090 /* We can get here from a .vtable_inherit due to our
5091 CONSTANT_ADDRESS_P rejecting perfectly good constant
5097 if (GET_CODE (x) == REG)
5099 fputs (reg_names [REGNO (x)], file);
5100 if (TARGET_64BIT && FP_REG_P (x) && GET_MODE_SIZE (GET_MODE (x)) <= 4)
5106 && GET_MODE_SIZE (GET_MODE (x)) <= 4
5107 && (REGNO (x) & 1) == 0)
5110 else if (GET_CODE (x) == MEM)
5112 int size = GET_MODE_SIZE (GET_MODE (x));
5113 rtx base = NULL_RTX;
5114 switch (GET_CODE (XEXP (x, 0)))
5118 base = XEXP (XEXP (x, 0), 0);
5119 fprintf (file, "-%d(%s)", size, reg_names [REGNO (base)]);
5123 base = XEXP (XEXP (x, 0), 0);
5124 fprintf (file, "%d(%s)", size, reg_names [REGNO (base)]);
5127 if (GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT)
5128 fprintf (file, "%s(%s)",
5129 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 0), 0))],
5130 reg_names [REGNO (XEXP (XEXP (x, 0), 1))]);
5131 else if (GET_CODE (XEXP (XEXP (x, 0), 1)) == MULT)
5132 fprintf (file, "%s(%s)",
5133 reg_names [REGNO (XEXP (XEXP (XEXP (x, 0), 1), 0))],
5134 reg_names [REGNO (XEXP (XEXP (x, 0), 0))]);
5135 else if (GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
5136 && GET_CODE (XEXP (XEXP (x, 0), 1)) == REG)
5138 /* Because the REG_POINTER flag can get lost during reload,
5139 GO_IF_LEGITIMATE_ADDRESS canonicalizes the order of the
5140 index and base registers in the combined move patterns. */
5141 rtx base = XEXP (XEXP (x, 0), 1);
5142 rtx index = XEXP (XEXP (x, 0), 0);
5144 fprintf (file, "%s(%s)",
5145 reg_names [REGNO (index)], reg_names [REGNO (base)]);
5148 output_address (XEXP (x, 0));
5151 output_address (XEXP (x, 0));
5156 output_addr_const (file, x);
5159 /* output a SYMBOL_REF or a CONST expression involving a SYMBOL_REF. */
5162 output_global_address (FILE *file, rtx x, int round_constant)
5165 /* Imagine (high (const (plus ...))). */
5166 if (GET_CODE (x) == HIGH)
5169 if (GET_CODE (x) == SYMBOL_REF && read_only_operand (x, VOIDmode))
5170 output_addr_const (file, x);
5171 else if (GET_CODE (x) == SYMBOL_REF && !flag_pic)
5173 output_addr_const (file, x);
5174 fputs ("-$global$", file);
5176 else if (GET_CODE (x) == CONST)
5178 const char *sep = "";
5179 int offset = 0; /* assembler wants -$global$ at end */
5180 rtx base = NULL_RTX;
5182 switch (GET_CODE (XEXP (XEXP (x, 0), 0)))
5185 base = XEXP (XEXP (x, 0), 0);
5186 output_addr_const (file, base);
5189 offset = INTVAL (XEXP (XEXP (x, 0), 0));
5195 switch (GET_CODE (XEXP (XEXP (x, 0), 1)))
5198 base = XEXP (XEXP (x, 0), 1);
5199 output_addr_const (file, base);
5202 offset = INTVAL (XEXP (XEXP (x, 0), 1));
5208 /* How bogus. The compiler is apparently responsible for
5209 rounding the constant if it uses an LR field selector.
5211 The linker and/or assembler seem a better place since
5212 they have to do this kind of thing already.
5214 If we fail to do this, HP's optimizing linker may eliminate
5215 an addil, but not update the ldw/stw/ldo instruction that
5216 uses the result of the addil. */
5218 offset = ((offset + 0x1000) & ~0x1fff);
5220 switch (GET_CODE (XEXP (x, 0)))
5233 gcc_assert (GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF);
5241 if (!read_only_operand (base, VOIDmode) && !flag_pic)
5242 fputs ("-$global$", file);
5244 fprintf (file, "%s%d", sep, offset);
5247 output_addr_const (file, x);
5250 /* Output boilerplate text to appear at the beginning of the file.
5251 There are several possible versions. */
5252 #define aputs(x) fputs(x, asm_out_file)
5254 pa_file_start_level (void)
5257 aputs ("\t.LEVEL 2.0w\n");
5258 else if (TARGET_PA_20)
5259 aputs ("\t.LEVEL 2.0\n");
5260 else if (TARGET_PA_11)
5261 aputs ("\t.LEVEL 1.1\n");
5263 aputs ("\t.LEVEL 1.0\n");
5267 pa_file_start_space (int sortspace)
5269 aputs ("\t.SPACE $PRIVATE$");
5272 aputs ("\n\t.SUBSPA $DATA$,QUAD=1,ALIGN=8,ACCESS=31"
5273 "\n\t.SUBSPA $BSS$,QUAD=1,ALIGN=8,ACCESS=31,ZERO,SORT=82"
5274 "\n\t.SPACE $TEXT$");
5277 aputs ("\n\t.SUBSPA $LIT$,QUAD=0,ALIGN=8,ACCESS=44"
5278 "\n\t.SUBSPA $CODE$,QUAD=0,ALIGN=8,ACCESS=44,CODE_ONLY\n");
5282 pa_file_start_file (int want_version)
5284 if (write_symbols != NO_DEBUG)
5286 output_file_directive (asm_out_file, main_input_filename);
5288 aputs ("\t.version\t\"01.01\"\n");
5293 pa_file_start_mcount (const char *aswhat)
5296 fprintf (asm_out_file, "\t.IMPORT _mcount,%s\n", aswhat);
5300 pa_elf_file_start (void)
5302 pa_file_start_level ();
5303 pa_file_start_mcount ("ENTRY");
5304 pa_file_start_file (0);
5308 pa_som_file_start (void)
5310 pa_file_start_level ();
5311 pa_file_start_space (0);
5312 aputs ("\t.IMPORT $global$,DATA\n"
5313 "\t.IMPORT $$dyncall,MILLICODE\n");
5314 pa_file_start_mcount ("CODE");
5315 pa_file_start_file (0);
5319 pa_linux_file_start (void)
5321 pa_file_start_file (1);
5322 pa_file_start_level ();
5323 pa_file_start_mcount ("CODE");
5327 pa_hpux64_gas_file_start (void)
5329 pa_file_start_level ();
5330 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5332 ASM_OUTPUT_TYPE_DIRECTIVE (asm_out_file, "_mcount", "function");
5334 pa_file_start_file (1);
5338 pa_hpux64_hpas_file_start (void)
5340 pa_file_start_level ();
5341 pa_file_start_space (1);
5342 pa_file_start_mcount ("CODE");
5343 pa_file_start_file (0);
5347 /* Search the deferred plabel list for SYMBOL and return its internal
5348 label. If an entry for SYMBOL is not found, a new entry is created. */
5351 get_deferred_plabel (rtx symbol)
5353 const char *fname = XSTR (symbol, 0);
5356 /* See if we have already put this function on the list of deferred
5357 plabels. This list is generally small, so a liner search is not
5358 too ugly. If it proves too slow replace it with something faster. */
5359 for (i = 0; i < n_deferred_plabels; i++)
5360 if (strcmp (fname, XSTR (deferred_plabels[i].symbol, 0)) == 0)
5363 /* If the deferred plabel list is empty, or this entry was not found
5364 on the list, create a new entry on the list. */
5365 if (deferred_plabels == NULL || i == n_deferred_plabels)
5369 if (deferred_plabels == 0)
5370 deferred_plabels = (struct deferred_plabel *)
5371 ggc_alloc (sizeof (struct deferred_plabel));
5373 deferred_plabels = (struct deferred_plabel *)
5374 ggc_realloc (deferred_plabels,
5375 ((n_deferred_plabels + 1)
5376 * sizeof (struct deferred_plabel)));
5378 i = n_deferred_plabels++;
5379 deferred_plabels[i].internal_label = gen_label_rtx ();
5380 deferred_plabels[i].symbol = symbol;
5382 /* Gross. We have just implicitly taken the address of this
5383 function. Mark it in the same manner as assemble_name. */
5384 id = maybe_get_identifier (targetm.strip_name_encoding (fname));
5386 mark_referenced (id);
5389 return deferred_plabels[i].internal_label;
5393 output_deferred_plabels (void)
5397 /* If we have some deferred plabels, then we need to switch into the
5398 data or readonly data section, and align it to a 4 byte boundary
5399 before outputting the deferred plabels. */
5400 if (n_deferred_plabels)
5402 switch_to_section (flag_pic ? data_section : readonly_data_section);
5403 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
5406 /* Now output the deferred plabels. */
5407 for (i = 0; i < n_deferred_plabels; i++)
5409 targetm.asm_out.internal_label (asm_out_file, "L",
5410 CODE_LABEL_NUMBER (deferred_plabels[i].internal_label));
5411 assemble_integer (deferred_plabels[i].symbol,
5412 TARGET_64BIT ? 8 : 4, TARGET_64BIT ? 64 : 32, 1);
5416 #ifdef HPUX_LONG_DOUBLE_LIBRARY
5417 /* Initialize optabs to point to HPUX long double emulation routines. */
5419 pa_hpux_init_libfuncs (void)
5421 set_optab_libfunc (add_optab, TFmode, "_U_Qfadd");
5422 set_optab_libfunc (sub_optab, TFmode, "_U_Qfsub");
5423 set_optab_libfunc (smul_optab, TFmode, "_U_Qfmpy");
5424 set_optab_libfunc (sdiv_optab, TFmode, "_U_Qfdiv");
5425 set_optab_libfunc (smin_optab, TFmode, "_U_Qmin");
5426 set_optab_libfunc (smax_optab, TFmode, "_U_Qfmax");
5427 set_optab_libfunc (sqrt_optab, TFmode, "_U_Qfsqrt");
5428 set_optab_libfunc (abs_optab, TFmode, "_U_Qfabs");
5429 set_optab_libfunc (neg_optab, TFmode, "_U_Qfneg");
5431 set_optab_libfunc (eq_optab, TFmode, "_U_Qfeq");
5432 set_optab_libfunc (ne_optab, TFmode, "_U_Qfne");
5433 set_optab_libfunc (gt_optab, TFmode, "_U_Qfgt");
5434 set_optab_libfunc (ge_optab, TFmode, "_U_Qfge");
5435 set_optab_libfunc (lt_optab, TFmode, "_U_Qflt");
5436 set_optab_libfunc (le_optab, TFmode, "_U_Qfle");
5437 set_optab_libfunc (unord_optab, TFmode, "_U_Qfunord");
5439 set_conv_libfunc (sext_optab, TFmode, SFmode, "_U_Qfcnvff_sgl_to_quad");
5440 set_conv_libfunc (sext_optab, TFmode, DFmode, "_U_Qfcnvff_dbl_to_quad");
5441 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_U_Qfcnvff_quad_to_sgl");
5442 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_U_Qfcnvff_quad_to_dbl");
5444 set_conv_libfunc (sfix_optab, SImode, TFmode, TARGET_64BIT
5445 ? "__U_Qfcnvfxt_quad_to_sgl"
5446 : "_U_Qfcnvfxt_quad_to_sgl");
5447 set_conv_libfunc (sfix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_dbl");
5448 set_conv_libfunc (ufix_optab, SImode, TFmode, "_U_Qfcnvfxt_quad_to_usgl");
5449 set_conv_libfunc (ufix_optab, DImode, TFmode, "_U_Qfcnvfxt_quad_to_udbl");
5451 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_U_Qfcnvxf_sgl_to_quad");
5452 set_conv_libfunc (sfloat_optab, TFmode, DImode, "_U_Qfcnvxf_dbl_to_quad");
5453 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_U_Qfcnvxf_usgl_to_quad");
5454 set_conv_libfunc (ufloat_optab, TFmode, DImode, "_U_Qfcnvxf_udbl_to_quad");
5458 /* HP's millicode routines mean something special to the assembler.
5459 Keep track of which ones we have used. */
5461 enum millicodes { remI, remU, divI, divU, mulI, end1000 };
5462 static void import_milli (enum millicodes);
5463 static char imported[(int) end1000];
5464 static const char * const milli_names[] = {"remI", "remU", "divI", "divU", "mulI"};
5465 static const char import_string[] = ".IMPORT $$....,MILLICODE";
5466 #define MILLI_START 10
5469 import_milli (enum millicodes code)
5471 char str[sizeof (import_string)];
5473 if (!imported[(int) code])
5475 imported[(int) code] = 1;
5476 strcpy (str, import_string);
5477 strncpy (str + MILLI_START, milli_names[(int) code], 4);
5478 output_asm_insn (str, 0);
5482 /* The register constraints have put the operands and return value in
5483 the proper registers. */
5486 output_mul_insn (int unsignedp ATTRIBUTE_UNUSED, rtx insn)
5488 import_milli (mulI);
5489 return output_millicode_call (insn, gen_rtx_SYMBOL_REF (Pmode, "$$mulI"));
5492 /* Emit the rtl for doing a division by a constant. */
5494 /* Do magic division millicodes exist for this value? */
5495 const int magic_milli[]= {0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1};
5497 /* We'll use an array to keep track of the magic millicodes and
5498 whether or not we've used them already. [n][0] is signed, [n][1] is
5501 static int div_milli[16][2];
5504 emit_hpdiv_const (rtx *operands, int unsignedp)
5506 if (GET_CODE (operands[2]) == CONST_INT
5507 && INTVAL (operands[2]) > 0
5508 && INTVAL (operands[2]) < 16
5509 && magic_milli[INTVAL (operands[2])])
5511 rtx ret = gen_rtx_REG (SImode, TARGET_64BIT ? 2 : 31);
5513 emit_move_insn (gen_rtx_REG (SImode, 26), operands[1]);
5517 gen_rtvec (6, gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, 29),
5518 gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5520 gen_rtx_REG (SImode, 26),
5522 gen_rtx_CLOBBER (VOIDmode, operands[4]),
5523 gen_rtx_CLOBBER (VOIDmode, operands[3]),
5524 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 26)),
5525 gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 25)),
5526 gen_rtx_CLOBBER (VOIDmode, ret))));
5527 emit_move_insn (operands[0], gen_rtx_REG (SImode, 29));
5534 output_div_insn (rtx *operands, int unsignedp, rtx insn)
5538 /* If the divisor is a constant, try to use one of the special
5540 if (GET_CODE (operands[0]) == CONST_INT)
5542 static char buf[100];
5543 divisor = INTVAL (operands[0]);
5544 if (!div_milli[divisor][unsignedp])
5546 div_milli[divisor][unsignedp] = 1;
5548 output_asm_insn (".IMPORT $$divU_%0,MILLICODE", operands);
5550 output_asm_insn (".IMPORT $$divI_%0,MILLICODE", operands);
5554 sprintf (buf, "$$divU_" HOST_WIDE_INT_PRINT_DEC,
5555 INTVAL (operands[0]));
5556 return output_millicode_call (insn,
5557 gen_rtx_SYMBOL_REF (SImode, buf));
5561 sprintf (buf, "$$divI_" HOST_WIDE_INT_PRINT_DEC,
5562 INTVAL (operands[0]));
5563 return output_millicode_call (insn,
5564 gen_rtx_SYMBOL_REF (SImode, buf));
5567 /* Divisor isn't a special constant. */
5572 import_milli (divU);
5573 return output_millicode_call (insn,
5574 gen_rtx_SYMBOL_REF (SImode, "$$divU"));
5578 import_milli (divI);
5579 return output_millicode_call (insn,
5580 gen_rtx_SYMBOL_REF (SImode, "$$divI"));
5585 /* Output a $$rem millicode to do mod. */
5588 output_mod_insn (int unsignedp, rtx insn)
5592 import_milli (remU);
5593 return output_millicode_call (insn,
5594 gen_rtx_SYMBOL_REF (SImode, "$$remU"));
5598 import_milli (remI);
5599 return output_millicode_call (insn,
5600 gen_rtx_SYMBOL_REF (SImode, "$$remI"));
5605 output_arg_descriptor (rtx call_insn)
5607 const char *arg_regs[4];
5608 enum machine_mode arg_mode;
5610 int i, output_flag = 0;
5613 /* We neither need nor want argument location descriptors for the
5614 64bit runtime environment or the ELF32 environment. */
5615 if (TARGET_64BIT || TARGET_ELF32)
5618 for (i = 0; i < 4; i++)
5621 /* Specify explicitly that no argument relocations should take place
5622 if using the portable runtime calling conventions. */
5623 if (TARGET_PORTABLE_RUNTIME)
5625 fputs ("\t.CALL ARGW0=NO,ARGW1=NO,ARGW2=NO,ARGW3=NO,RETVAL=NO\n",
5630 gcc_assert (GET_CODE (call_insn) == CALL_INSN);
5631 for (link = CALL_INSN_FUNCTION_USAGE (call_insn);
5632 link; link = XEXP (link, 1))
5634 rtx use = XEXP (link, 0);
5636 if (! (GET_CODE (use) == USE
5637 && GET_CODE (XEXP (use, 0)) == REG
5638 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
5641 arg_mode = GET_MODE (XEXP (use, 0));
5642 regno = REGNO (XEXP (use, 0));
5643 if (regno >= 23 && regno <= 26)
5645 arg_regs[26 - regno] = "GR";
5646 if (arg_mode == DImode)
5647 arg_regs[25 - regno] = "GR";
5649 else if (regno >= 32 && regno <= 39)
5651 if (arg_mode == SFmode)
5652 arg_regs[(regno - 32) / 2] = "FR";
5655 #ifndef HP_FP_ARG_DESCRIPTOR_REVERSED
5656 arg_regs[(regno - 34) / 2] = "FR";
5657 arg_regs[(regno - 34) / 2 + 1] = "FU";
5659 arg_regs[(regno - 34) / 2] = "FU";
5660 arg_regs[(regno - 34) / 2 + 1] = "FR";
5665 fputs ("\t.CALL ", asm_out_file);
5666 for (i = 0; i < 4; i++)
5671 fputc (',', asm_out_file);
5672 fprintf (asm_out_file, "ARGW%d=%s", i, arg_regs[i]);
5675 fputc ('\n', asm_out_file);
5678 static enum reg_class
5679 pa_secondary_reload (bool in_p, rtx x, enum reg_class rclass,
5680 enum machine_mode mode, secondary_reload_info *sri)
5682 int is_symbolic, regno;
5684 /* Handle the easy stuff first. */
5685 if (rclass == R1_REGS)
5691 if (rclass == BASE_REG_CLASS && regno < FIRST_PSEUDO_REGISTER)
5697 /* If we have something like (mem (mem (...)), we can safely assume the
5698 inner MEM will end up in a general register after reloading, so there's
5699 no need for a secondary reload. */
5700 if (GET_CODE (x) == MEM && GET_CODE (XEXP (x, 0)) == MEM)
5703 /* Trying to load a constant into a FP register during PIC code
5704 generation requires %r1 as a scratch register. */
5706 && (mode == SImode || mode == DImode)
5707 && FP_REG_CLASS_P (rclass)
5708 && (GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST_DOUBLE))
5710 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5711 : CODE_FOR_reload_indi_r1);
5715 /* Profiling showed the PA port spends about 1.3% of its compilation
5716 time in true_regnum from calls inside pa_secondary_reload_class. */
5717 if (regno >= FIRST_PSEUDO_REGISTER || GET_CODE (x) == SUBREG)
5718 regno = true_regnum (x);
5720 /* In order to allow 14-bit displacements in integer loads and stores,
5721 we need to prevent reload from generating out of range integer mode
5722 loads and stores to the floating point registers. Previously, we
5723 used to call for a secondary reload and have emit_move_sequence()
5724 fix the instruction sequence. However, reload occasionally wouldn't
5725 generate the reload and we would end up with an invalid REG+D memory
5726 address. So, now we use an intermediate general register for most
5727 memory loads and stores. */
5728 if ((regno >= FIRST_PSEUDO_REGISTER || regno == -1)
5729 && GET_MODE_CLASS (mode) == MODE_INT
5730 && FP_REG_CLASS_P (rclass))
5732 /* Reload passes (mem:SI (reg/f:DI 30 %r30) when it wants to check
5733 the secondary reload needed for a pseudo. It never passes a
5735 if (GET_CODE (x) == MEM)
5739 /* We don't need an intermediate for indexed and LO_SUM DLT
5740 memory addresses. When INT14_OK_STRICT is true, it might
5741 appear that we could directly allow register indirect
5742 memory addresses. However, this doesn't work because we
5743 don't support SUBREGs in floating-point register copies
5744 and reload doesn't tell us when it's going to use a SUBREG. */
5745 if (IS_INDEX_ADDR_P (x)
5746 || IS_LO_SUM_DLT_ADDR_P (x))
5749 /* Otherwise, we need an intermediate general register. */
5750 return GENERAL_REGS;
5753 /* Request a secondary reload with a general scratch register
5754 for everthing else. ??? Could symbolic operands be handled
5755 directly when generating non-pic PA 2.0 code? */
5756 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5760 /* We need a secondary register (GPR) for copies between the SAR
5761 and anything other than a general register. */
5762 if (rclass == SHIFT_REGS && (regno <= 0 || regno >= 32))
5764 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5768 /* A SAR<->FP register copy requires a secondary register (GPR) as
5769 well as secondary memory. */
5770 if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER
5771 && (REGNO_REG_CLASS (regno) == SHIFT_REGS
5772 && FP_REG_CLASS_P (rclass)))
5774 sri->icode = in_p ? reload_in_optab[mode] : reload_out_optab[mode];
5778 /* Secondary reloads of symbolic operands require %r1 as a scratch
5779 register when we're generating PIC code and when the operand isn't
5781 if (GET_CODE (x) == HIGH)
5784 /* Profiling has showed GCC spends about 2.6% of its compilation
5785 time in symbolic_operand from calls inside pa_secondary_reload_class.
5786 So, we use an inline copy to avoid useless work. */
5787 switch (GET_CODE (x))
5792 is_symbolic = !SYMBOL_REF_TLS_MODEL (x);
5799 is_symbolic = (((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
5800 && !SYMBOL_REF_TLS_MODEL (XEXP (op, 0)))
5801 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
5802 && GET_CODE (XEXP (op, 1)) == CONST_INT);
5809 if (is_symbolic && (flag_pic || !read_only_operand (x, VOIDmode)))
5811 gcc_assert (mode == SImode || mode == DImode);
5812 sri->icode = (mode == SImode ? CODE_FOR_reload_insi_r1
5813 : CODE_FOR_reload_indi_r1);
5819 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. The argument pointer
5820 is only marked as live on entry by df-scan when it is a fixed
5821 register. It isn't a fixed register in the 64-bit runtime,
5822 so we need to mark it here. */
5825 pa_extra_live_on_entry (bitmap regs)
5828 bitmap_set_bit (regs, ARG_POINTER_REGNUM);
5831 /* Implement EH_RETURN_HANDLER_RTX. The MEM needs to be volatile
5832 to prevent it from being deleted. */
5835 pa_eh_return_handler_rtx (void)
5839 tmp = gen_rtx_PLUS (word_mode, frame_pointer_rtx,
5840 TARGET_64BIT ? GEN_INT (-16) : GEN_INT (-20));
5841 tmp = gen_rtx_MEM (word_mode, tmp);
5846 /* In the 32-bit runtime, arguments larger than eight bytes are passed
5847 by invisible reference. As a GCC extension, we also pass anything
5848 with a zero or variable size by reference.
5850 The 64-bit runtime does not describe passing any types by invisible
5851 reference. The internals of GCC can't currently handle passing
5852 empty structures, and zero or variable length arrays when they are
5853 not passed entirely on the stack or by reference. Thus, as a GCC
5854 extension, we pass these types by reference. The HP compiler doesn't
5855 support these types, so hopefully there shouldn't be any compatibility
5856 issues. This may have to be revisited when HP releases a C99 compiler
5857 or updates the ABI. */
5860 pa_pass_by_reference (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED,
5861 enum machine_mode mode, const_tree type,
5862 bool named ATTRIBUTE_UNUSED)
5867 size = int_size_in_bytes (type);
5869 size = GET_MODE_SIZE (mode);
5874 return size <= 0 || size > 8;
5878 function_arg_padding (enum machine_mode mode, const_tree type)
5883 && (AGGREGATE_TYPE_P (type)
5884 || TREE_CODE (type) == COMPLEX_TYPE
5885 || TREE_CODE (type) == VECTOR_TYPE)))
5887 /* Return none if justification is not required. */
5889 && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST
5890 && (int_size_in_bytes (type) * BITS_PER_UNIT) % PARM_BOUNDARY == 0)
5893 /* The directions set here are ignored when a BLKmode argument larger
5894 than a word is placed in a register. Different code is used for
5895 the stack and registers. This makes it difficult to have a
5896 consistent data representation for both the stack and registers.
5897 For both runtimes, the justification and padding for arguments on
5898 the stack and in registers should be identical. */
5900 /* The 64-bit runtime specifies left justification for aggregates. */
5903 /* The 32-bit runtime architecture specifies right justification.
5904 When the argument is passed on the stack, the argument is padded
5905 with garbage on the left. The HP compiler pads with zeros. */
5909 if (GET_MODE_BITSIZE (mode) < PARM_BOUNDARY)
5916 /* Do what is necessary for `va_start'. We look at the current function
5917 to determine if stdargs or varargs is used and fill in an initial
5918 va_list. A pointer to this constructor is returned. */
5921 hppa_builtin_saveregs (void)
5924 tree fntype = TREE_TYPE (current_function_decl);
5925 int argadj = ((!(TYPE_ARG_TYPES (fntype) != 0
5926 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype)))
5927 != void_type_node)))
5928 ? UNITS_PER_WORD : 0);
5931 offset = plus_constant (crtl->args.arg_offset_rtx, argadj);
5933 offset = crtl->args.arg_offset_rtx;
5939 /* Adjust for varargs/stdarg differences. */
5941 offset = plus_constant (crtl->args.arg_offset_rtx, -argadj);
5943 offset = crtl->args.arg_offset_rtx;
5945 /* We need to save %r26 .. %r19 inclusive starting at offset -64
5946 from the incoming arg pointer and growing to larger addresses. */
5947 for (i = 26, off = -64; i >= 19; i--, off += 8)
5948 emit_move_insn (gen_rtx_MEM (word_mode,
5949 plus_constant (arg_pointer_rtx, off)),
5950 gen_rtx_REG (word_mode, i));
5952 /* The incoming args pointer points just beyond the flushback area;
5953 normally this is not a serious concern. However, when we are doing
5954 varargs/stdargs we want to make the arg pointer point to the start
5955 of the incoming argument area. */
5956 emit_move_insn (virtual_incoming_args_rtx,
5957 plus_constant (arg_pointer_rtx, -64));
5959 /* Now return a pointer to the first anonymous argument. */
5960 return copy_to_reg (expand_binop (Pmode, add_optab,
5961 virtual_incoming_args_rtx,
5962 offset, 0, 0, OPTAB_LIB_WIDEN));
5965 /* Store general registers on the stack. */
5966 dest = gen_rtx_MEM (BLKmode,
5967 plus_constant (crtl->args.internal_arg_pointer,
5969 set_mem_alias_set (dest, get_varargs_alias_set ());
5970 set_mem_align (dest, BITS_PER_WORD);
5971 move_block_from_reg (23, dest, 4);
5973 /* move_block_from_reg will emit code to store the argument registers
5974 individually as scalar stores.
5976 However, other insns may later load from the same addresses for
5977 a structure load (passing a struct to a varargs routine).
5979 The alias code assumes that such aliasing can never happen, so we
5980 have to keep memory referencing insns from moving up beyond the
5981 last argument register store. So we emit a blockage insn here. */
5982 emit_insn (gen_blockage ());
5984 return copy_to_reg (expand_binop (Pmode, add_optab,
5985 crtl->args.internal_arg_pointer,
5986 offset, 0, 0, OPTAB_LIB_WIDEN));
5990 hppa_va_start (tree valist, rtx nextarg)
5992 nextarg = expand_builtin_saveregs ();
5993 std_expand_builtin_va_start (valist, nextarg);
5997 hppa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
6002 /* Args grow upward. We can use the generic routines. */
6003 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
6005 else /* !TARGET_64BIT */
6007 tree ptr = build_pointer_type (type);
6010 unsigned int size, ofs;
6013 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
6017 ptr = build_pointer_type (type);
6019 size = int_size_in_bytes (type);
6020 valist_type = TREE_TYPE (valist);
6022 /* Args grow down. Not handled by generic routines. */
6024 u = fold_convert (sizetype, size_in_bytes (type));
6025 u = fold_build1 (NEGATE_EXPR, sizetype, u);
6026 t = build2 (POINTER_PLUS_EXPR, valist_type, valist, u);
6028 /* Copied from va-pa.h, but we probably don't need to align to
6029 word size, since we generate and preserve that invariant. */
6030 u = size_int (size > 4 ? -8 : -4);
6031 t = fold_convert (sizetype, t);
6032 t = build2 (BIT_AND_EXPR, sizetype, t, u);
6033 t = fold_convert (valist_type, t);
6035 t = build2 (MODIFY_EXPR, valist_type, valist, t);
6037 ofs = (8 - size) % 4;
6041 t = build2 (POINTER_PLUS_EXPR, valist_type, t, u);
6044 t = fold_convert (ptr, t);
6045 t = build_va_arg_indirect_ref (t);
6048 t = build_va_arg_indirect_ref (t);
6054 /* True if MODE is valid for the target. By "valid", we mean able to
6055 be manipulated in non-trivial ways. In particular, this means all
6056 the arithmetic is supported.
6058 Currently, TImode is not valid as the HP 64-bit runtime documentation
6059 doesn't document the alignment and calling conventions for this type.
6060 Thus, we return false when PRECISION is 2 * BITS_PER_WORD and
6061 2 * BITS_PER_WORD isn't equal LONG_LONG_TYPE_SIZE. */
6064 pa_scalar_mode_supported_p (enum machine_mode mode)
6066 int precision = GET_MODE_PRECISION (mode);
6068 switch (GET_MODE_CLASS (mode))
6070 case MODE_PARTIAL_INT:
6072 if (precision == CHAR_TYPE_SIZE)
6074 if (precision == SHORT_TYPE_SIZE)
6076 if (precision == INT_TYPE_SIZE)
6078 if (precision == LONG_TYPE_SIZE)
6080 if (precision == LONG_LONG_TYPE_SIZE)
6085 if (precision == FLOAT_TYPE_SIZE)
6087 if (precision == DOUBLE_TYPE_SIZE)
6089 if (precision == LONG_DOUBLE_TYPE_SIZE)
6093 case MODE_DECIMAL_FLOAT:
6101 /* This routine handles all the normal conditional branch sequences we
6102 might need to generate. It handles compare immediate vs compare
6103 register, nullification of delay slots, varying length branches,
6104 negated branches, and all combinations of the above. It returns the
6105 output appropriate to emit the branch corresponding to all given
6109 output_cbranch (rtx *operands, int negated, rtx insn)
6111 static char buf[100];
6113 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6114 int length = get_attr_length (insn);
6117 /* A conditional branch to the following instruction (e.g. the delay slot)
6118 is asking for a disaster. This can happen when not optimizing and
6119 when jump optimization fails.
6121 While it is usually safe to emit nothing, this can fail if the
6122 preceding instruction is a nullified branch with an empty delay
6123 slot and the same branch target as this branch. We could check
6124 for this but jump optimization should eliminate nop jumps. It
6125 is always safe to emit a nop. */
6126 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6129 /* The doubleword form of the cmpib instruction doesn't have the LEU
6130 and GTU conditions while the cmpb instruction does. Since we accept
6131 zero for cmpb, we must ensure that we use cmpb for the comparison. */
6132 if (GET_MODE (operands[1]) == DImode && operands[2] == const0_rtx)
6133 operands[2] = gen_rtx_REG (DImode, 0);
6134 if (GET_MODE (operands[2]) == DImode && operands[1] == const0_rtx)
6135 operands[1] = gen_rtx_REG (DImode, 0);
6137 /* If this is a long branch with its delay slot unfilled, set `nullify'
6138 as it can nullify the delay slot and save a nop. */
6139 if (length == 8 && dbr_sequence_length () == 0)
6142 /* If this is a short forward conditional branch which did not get
6143 its delay slot filled, the delay slot can still be nullified. */
6144 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6145 nullify = forward_branch_p (insn);
6147 /* A forward branch over a single nullified insn can be done with a
6148 comclr instruction. This avoids a single cycle penalty due to
6149 mis-predicted branch if we fall through (branch not taken). */
6151 && next_real_insn (insn) != 0
6152 && get_attr_length (next_real_insn (insn)) == 4
6153 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6159 /* All short conditional branches except backwards with an unfilled
6163 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6165 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6166 if (GET_MODE (operands[1]) == DImode)
6169 strcat (buf, "%B3");
6171 strcat (buf, "%S3");
6173 strcat (buf, " %2,%r1,%%r0");
6175 strcat (buf, ",n %2,%r1,%0");
6177 strcat (buf, " %2,%r1,%0");
6180 /* All long conditionals. Note a short backward branch with an
6181 unfilled delay slot is treated just like a long backward branch
6182 with an unfilled delay slot. */
6184 /* Handle weird backwards branch with a filled delay slot
6185 which is nullified. */
6186 if (dbr_sequence_length () != 0
6187 && ! forward_branch_p (insn)
6190 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6191 if (GET_MODE (operands[1]) == DImode)
6194 strcat (buf, "%S3");
6196 strcat (buf, "%B3");
6197 strcat (buf, ",n %2,%r1,.+12\n\tb %0");
6199 /* Handle short backwards branch with an unfilled delay slot.
6200 Using a comb;nop rather than comiclr;bl saves 1 cycle for both
6201 taken and untaken branches. */
6202 else if (dbr_sequence_length () == 0
6203 && ! forward_branch_p (insn)
6204 && INSN_ADDRESSES_SET_P ()
6205 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6206 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6208 strcpy (buf, "{com%I2b,|cmp%I2b,}");
6209 if (GET_MODE (operands[1]) == DImode)
6212 strcat (buf, "%B3 %2,%r1,%0%#");
6214 strcat (buf, "%S3 %2,%r1,%0%#");
6218 strcpy (buf, "{com%I2clr,|cmp%I2clr,}");
6219 if (GET_MODE (operands[1]) == DImode)
6222 strcat (buf, "%S3");
6224 strcat (buf, "%B3");
6226 strcat (buf, " %2,%r1,%%r0\n\tb,n %0");
6228 strcat (buf, " %2,%r1,%%r0\n\tb %0");
6233 /* The reversed conditional branch must branch over one additional
6234 instruction if the delay slot is filled and needs to be extracted
6235 by output_lbranch. If the delay slot is empty or this is a
6236 nullified forward branch, the instruction after the reversed
6237 condition branch must be nullified. */
6238 if (dbr_sequence_length () == 0
6239 || (nullify && forward_branch_p (insn)))
6243 operands[4] = GEN_INT (length);
6248 operands[4] = GEN_INT (length + 4);
6251 /* Create a reversed conditional branch which branches around
6252 the following insns. */
6253 if (GET_MODE (operands[1]) != DImode)
6259 "{com%I2b,%S3,n %2,%r1,.+%4|cmp%I2b,%S3,n %2,%r1,.+%4}");
6262 "{com%I2b,%B3,n %2,%r1,.+%4|cmp%I2b,%B3,n %2,%r1,.+%4}");
6268 "{com%I2b,%S3 %2,%r1,.+%4|cmp%I2b,%S3 %2,%r1,.+%4}");
6271 "{com%I2b,%B3 %2,%r1,.+%4|cmp%I2b,%B3 %2,%r1,.+%4}");
6280 "{com%I2b,*%S3,n %2,%r1,.+%4|cmp%I2b,*%S3,n %2,%r1,.+%4}");
6283 "{com%I2b,*%B3,n %2,%r1,.+%4|cmp%I2b,*%B3,n %2,%r1,.+%4}");
6289 "{com%I2b,*%S3 %2,%r1,.+%4|cmp%I2b,*%S3 %2,%r1,.+%4}");
6292 "{com%I2b,*%B3 %2,%r1,.+%4|cmp%I2b,*%B3 %2,%r1,.+%4}");
6296 output_asm_insn (buf, operands);
6297 return output_lbranch (operands[0], insn, xdelay);
6302 /* This routine handles output of long unconditional branches that
6303 exceed the maximum range of a simple branch instruction. Since
6304 we don't have a register available for the branch, we save register
6305 %r1 in the frame marker, load the branch destination DEST into %r1,
6306 execute the branch, and restore %r1 in the delay slot of the branch.
6308 Since long branches may have an insn in the delay slot and the
6309 delay slot is used to restore %r1, we in general need to extract
6310 this insn and execute it before the branch. However, to facilitate
6311 use of this function by conditional branches, we also provide an
6312 option to not extract the delay insn so that it will be emitted
6313 after the long branch. So, if there is an insn in the delay slot,
6314 it is extracted if XDELAY is nonzero.
6316 The lengths of the various long-branch sequences are 20, 16 and 24
6317 bytes for the portable runtime, non-PIC and PIC cases, respectively. */
6320 output_lbranch (rtx dest, rtx insn, int xdelay)
6324 xoperands[0] = dest;
6326 /* First, free up the delay slot. */
6327 if (xdelay && dbr_sequence_length () != 0)
6329 /* We can't handle a jump in the delay slot. */
6330 gcc_assert (GET_CODE (NEXT_INSN (insn)) != JUMP_INSN);
6332 final_scan_insn (NEXT_INSN (insn), asm_out_file,
6335 /* Now delete the delay insn. */
6336 SET_INSN_DELETED (NEXT_INSN (insn));
6339 /* Output an insn to save %r1. The runtime documentation doesn't
6340 specify whether the "Clean Up" slot in the callers frame can
6341 be clobbered by the callee. It isn't copied by HP's builtin
6342 alloca, so this suggests that it can be clobbered if necessary.
6343 The "Static Link" location is copied by HP builtin alloca, so
6344 we avoid using it. Using the cleanup slot might be a problem
6345 if we have to interoperate with languages that pass cleanup
6346 information. However, it should be possible to handle these
6347 situations with GCC's asm feature.
6349 The "Current RP" slot is reserved for the called procedure, so
6350 we try to use it when we don't have a frame of our own. It's
6351 rather unlikely that we won't have a frame when we need to emit
6354 Really the way to go long term is a register scavenger; goto
6355 the target of the jump and find a register which we can use
6356 as a scratch to hold the value in %r1. Then, we wouldn't have
6357 to free up the delay slot or clobber a slot that may be needed
6358 for other purposes. */
6361 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6362 /* Use the return pointer slot in the frame marker. */
6363 output_asm_insn ("std %%r1,-16(%%r30)", xoperands);
6365 /* Use the slot at -40 in the frame marker since HP builtin
6366 alloca doesn't copy it. */
6367 output_asm_insn ("std %%r1,-40(%%r30)", xoperands);
6371 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6372 /* Use the return pointer slot in the frame marker. */
6373 output_asm_insn ("stw %%r1,-20(%%r30)", xoperands);
6375 /* Use the "Clean Up" slot in the frame marker. In GCC,
6376 the only other use of this location is for copying a
6377 floating point double argument from a floating-point
6378 register to two general registers. The copy is done
6379 as an "atomic" operation when outputting a call, so it
6380 won't interfere with our using the location here. */
6381 output_asm_insn ("stw %%r1,-12(%%r30)", xoperands);
6384 if (TARGET_PORTABLE_RUNTIME)
6386 output_asm_insn ("ldil L'%0,%%r1", xoperands);
6387 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
6388 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6392 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
6393 if (TARGET_SOM || !TARGET_GAS)
6395 xoperands[1] = gen_label_rtx ();
6396 output_asm_insn ("addil L'%l0-%l1,%%r1", xoperands);
6397 targetm.asm_out.internal_label (asm_out_file, "L",
6398 CODE_LABEL_NUMBER (xoperands[1]));
6399 output_asm_insn ("ldo R'%l0-%l1(%%r1),%%r1", xoperands);
6403 output_asm_insn ("addil L'%l0-$PIC_pcrel$0+4,%%r1", xoperands);
6404 output_asm_insn ("ldo R'%l0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
6406 output_asm_insn ("bv %%r0(%%r1)", xoperands);
6409 /* Now output a very long branch to the original target. */
6410 output_asm_insn ("ldil L'%l0,%%r1\n\tbe R'%l0(%%sr4,%%r1)", xoperands);
6412 /* Now restore the value of %r1 in the delay slot. */
6415 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6416 return "ldd -16(%%r30),%%r1";
6418 return "ldd -40(%%r30),%%r1";
6422 if (actual_fsize == 0 && !df_regs_ever_live_p (2))
6423 return "ldw -20(%%r30),%%r1";
6425 return "ldw -12(%%r30),%%r1";
6429 /* This routine handles all the branch-on-bit conditional branch sequences we
6430 might need to generate. It handles nullification of delay slots,
6431 varying length branches, negated branches and all combinations of the
6432 above. it returns the appropriate output template to emit the branch. */
6435 output_bb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6437 static char buf[100];
6439 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6440 int length = get_attr_length (insn);
6443 /* A conditional branch to the following instruction (e.g. the delay slot) is
6444 asking for a disaster. I do not think this can happen as this pattern
6445 is only used when optimizing; jump optimization should eliminate the
6446 jump. But be prepared just in case. */
6448 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6451 /* If this is a long branch with its delay slot unfilled, set `nullify'
6452 as it can nullify the delay slot and save a nop. */
6453 if (length == 8 && dbr_sequence_length () == 0)
6456 /* If this is a short forward conditional branch which did not get
6457 its delay slot filled, the delay slot can still be nullified. */
6458 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6459 nullify = forward_branch_p (insn);
6461 /* A forward branch over a single nullified insn can be done with a
6462 extrs instruction. This avoids a single cycle penalty due to
6463 mis-predicted branch if we fall through (branch not taken). */
6466 && next_real_insn (insn) != 0
6467 && get_attr_length (next_real_insn (insn)) == 4
6468 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6475 /* All short conditional branches except backwards with an unfilled
6479 strcpy (buf, "{extrs,|extrw,s,}");
6481 strcpy (buf, "bb,");
6482 if (useskip && GET_MODE (operands[0]) == DImode)
6483 strcpy (buf, "extrd,s,*");
6484 else if (GET_MODE (operands[0]) == DImode)
6485 strcpy (buf, "bb,*");
6486 if ((which == 0 && negated)
6487 || (which == 1 && ! negated))
6492 strcat (buf, " %0,%1,1,%%r0");
6493 else if (nullify && negated)
6494 strcat (buf, ",n %0,%1,%3");
6495 else if (nullify && ! negated)
6496 strcat (buf, ",n %0,%1,%2");
6497 else if (! nullify && negated)
6498 strcat (buf, "%0,%1,%3");
6499 else if (! nullify && ! negated)
6500 strcat (buf, " %0,%1,%2");
6503 /* All long conditionals. Note a short backward branch with an
6504 unfilled delay slot is treated just like a long backward branch
6505 with an unfilled delay slot. */
6507 /* Handle weird backwards branch with a filled delay slot
6508 which is nullified. */
6509 if (dbr_sequence_length () != 0
6510 && ! forward_branch_p (insn)
6513 strcpy (buf, "bb,");
6514 if (GET_MODE (operands[0]) == DImode)
6516 if ((which == 0 && negated)
6517 || (which == 1 && ! negated))
6522 strcat (buf, ",n %0,%1,.+12\n\tb %3");
6524 strcat (buf, ",n %0,%1,.+12\n\tb %2");
6526 /* Handle short backwards branch with an unfilled delay slot.
6527 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6528 taken and untaken branches. */
6529 else if (dbr_sequence_length () == 0
6530 && ! forward_branch_p (insn)
6531 && INSN_ADDRESSES_SET_P ()
6532 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6533 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6535 strcpy (buf, "bb,");
6536 if (GET_MODE (operands[0]) == DImode)
6538 if ((which == 0 && negated)
6539 || (which == 1 && ! negated))
6544 strcat (buf, " %0,%1,%3%#");
6546 strcat (buf, " %0,%1,%2%#");
6550 if (GET_MODE (operands[0]) == DImode)
6551 strcpy (buf, "extrd,s,*");
6553 strcpy (buf, "{extrs,|extrw,s,}");
6554 if ((which == 0 && negated)
6555 || (which == 1 && ! negated))
6559 if (nullify && negated)
6560 strcat (buf, " %0,%1,1,%%r0\n\tb,n %3");
6561 else if (nullify && ! negated)
6562 strcat (buf, " %0,%1,1,%%r0\n\tb,n %2");
6564 strcat (buf, " %0,%1,1,%%r0\n\tb %3");
6566 strcat (buf, " %0,%1,1,%%r0\n\tb %2");
6571 /* The reversed conditional branch must branch over one additional
6572 instruction if the delay slot is filled and needs to be extracted
6573 by output_lbranch. If the delay slot is empty or this is a
6574 nullified forward branch, the instruction after the reversed
6575 condition branch must be nullified. */
6576 if (dbr_sequence_length () == 0
6577 || (nullify && forward_branch_p (insn)))
6581 operands[4] = GEN_INT (length);
6586 operands[4] = GEN_INT (length + 4);
6589 if (GET_MODE (operands[0]) == DImode)
6590 strcpy (buf, "bb,*");
6592 strcpy (buf, "bb,");
6593 if ((which == 0 && negated)
6594 || (which == 1 && !negated))
6599 strcat (buf, ",n %0,%1,.+%4");
6601 strcat (buf, " %0,%1,.+%4");
6602 output_asm_insn (buf, operands);
6603 return output_lbranch (negated ? operands[3] : operands[2],
6609 /* This routine handles all the branch-on-variable-bit conditional branch
6610 sequences we might need to generate. It handles nullification of delay
6611 slots, varying length branches, negated branches and all combinations
6612 of the above. it returns the appropriate output template to emit the
6616 output_bvb (rtx *operands ATTRIBUTE_UNUSED, int negated, rtx insn, int which)
6618 static char buf[100];
6620 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6621 int length = get_attr_length (insn);
6624 /* A conditional branch to the following instruction (e.g. the delay slot) is
6625 asking for a disaster. I do not think this can happen as this pattern
6626 is only used when optimizing; jump optimization should eliminate the
6627 jump. But be prepared just in case. */
6629 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6632 /* If this is a long branch with its delay slot unfilled, set `nullify'
6633 as it can nullify the delay slot and save a nop. */
6634 if (length == 8 && dbr_sequence_length () == 0)
6637 /* If this is a short forward conditional branch which did not get
6638 its delay slot filled, the delay slot can still be nullified. */
6639 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6640 nullify = forward_branch_p (insn);
6642 /* A forward branch over a single nullified insn can be done with a
6643 extrs instruction. This avoids a single cycle penalty due to
6644 mis-predicted branch if we fall through (branch not taken). */
6647 && next_real_insn (insn) != 0
6648 && get_attr_length (next_real_insn (insn)) == 4
6649 && JUMP_LABEL (insn) == next_nonnote_insn (next_real_insn (insn))
6656 /* All short conditional branches except backwards with an unfilled
6660 strcpy (buf, "{vextrs,|extrw,s,}");
6662 strcpy (buf, "{bvb,|bb,}");
6663 if (useskip && GET_MODE (operands[0]) == DImode)
6664 strcpy (buf, "extrd,s,*");
6665 else if (GET_MODE (operands[0]) == DImode)
6666 strcpy (buf, "bb,*");
6667 if ((which == 0 && negated)
6668 || (which == 1 && ! negated))
6673 strcat (buf, "{ %0,1,%%r0| %0,%%sar,1,%%r0}");
6674 else if (nullify && negated)
6675 strcat (buf, "{,n %0,%3|,n %0,%%sar,%3}");
6676 else if (nullify && ! negated)
6677 strcat (buf, "{,n %0,%2|,n %0,%%sar,%2}");
6678 else if (! nullify && negated)
6679 strcat (buf, "{%0,%3|%0,%%sar,%3}");
6680 else if (! nullify && ! negated)
6681 strcat (buf, "{ %0,%2| %0,%%sar,%2}");
6684 /* All long conditionals. Note a short backward branch with an
6685 unfilled delay slot is treated just like a long backward branch
6686 with an unfilled delay slot. */
6688 /* Handle weird backwards branch with a filled delay slot
6689 which is nullified. */
6690 if (dbr_sequence_length () != 0
6691 && ! forward_branch_p (insn)
6694 strcpy (buf, "{bvb,|bb,}");
6695 if (GET_MODE (operands[0]) == DImode)
6697 if ((which == 0 && negated)
6698 || (which == 1 && ! negated))
6703 strcat (buf, "{,n %0,.+12\n\tb %3|,n %0,%%sar,.+12\n\tb %3}");
6705 strcat (buf, "{,n %0,.+12\n\tb %2|,n %0,%%sar,.+12\n\tb %2}");
6707 /* Handle short backwards branch with an unfilled delay slot.
6708 Using a bb;nop rather than extrs;bl saves 1 cycle for both
6709 taken and untaken branches. */
6710 else if (dbr_sequence_length () == 0
6711 && ! forward_branch_p (insn)
6712 && INSN_ADDRESSES_SET_P ()
6713 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6714 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6716 strcpy (buf, "{bvb,|bb,}");
6717 if (GET_MODE (operands[0]) == DImode)
6719 if ((which == 0 && negated)
6720 || (which == 1 && ! negated))
6725 strcat (buf, "{ %0,%3%#| %0,%%sar,%3%#}");
6727 strcat (buf, "{ %0,%2%#| %0,%%sar,%2%#}");
6731 strcpy (buf, "{vextrs,|extrw,s,}");
6732 if (GET_MODE (operands[0]) == DImode)
6733 strcpy (buf, "extrd,s,*");
6734 if ((which == 0 && negated)
6735 || (which == 1 && ! negated))
6739 if (nullify && negated)
6740 strcat (buf, "{ %0,1,%%r0\n\tb,n %3| %0,%%sar,1,%%r0\n\tb,n %3}");
6741 else if (nullify && ! negated)
6742 strcat (buf, "{ %0,1,%%r0\n\tb,n %2| %0,%%sar,1,%%r0\n\tb,n %2}");
6744 strcat (buf, "{ %0,1,%%r0\n\tb %3| %0,%%sar,1,%%r0\n\tb %3}");
6746 strcat (buf, "{ %0,1,%%r0\n\tb %2| %0,%%sar,1,%%r0\n\tb %2}");
6751 /* The reversed conditional branch must branch over one additional
6752 instruction if the delay slot is filled and needs to be extracted
6753 by output_lbranch. If the delay slot is empty or this is a
6754 nullified forward branch, the instruction after the reversed
6755 condition branch must be nullified. */
6756 if (dbr_sequence_length () == 0
6757 || (nullify && forward_branch_p (insn)))
6761 operands[4] = GEN_INT (length);
6766 operands[4] = GEN_INT (length + 4);
6769 if (GET_MODE (operands[0]) == DImode)
6770 strcpy (buf, "bb,*");
6772 strcpy (buf, "{bvb,|bb,}");
6773 if ((which == 0 && negated)
6774 || (which == 1 && !negated))
6779 strcat (buf, ",n {%0,.+%4|%0,%%sar,.+%4}");
6781 strcat (buf, " {%0,.+%4|%0,%%sar,.+%4}");
6782 output_asm_insn (buf, operands);
6783 return output_lbranch (negated ? operands[3] : operands[2],
6789 /* Return the output template for emitting a dbra type insn.
6791 Note it may perform some output operations on its own before
6792 returning the final output string. */
6794 output_dbra (rtx *operands, rtx insn, int which_alternative)
6796 int length = get_attr_length (insn);
6798 /* A conditional branch to the following instruction (e.g. the delay slot) is
6799 asking for a disaster. Be prepared! */
6801 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6803 if (which_alternative == 0)
6804 return "ldo %1(%0),%0";
6805 else if (which_alternative == 1)
6807 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)", operands);
6808 output_asm_insn ("ldw -16(%%r30),%4", operands);
6809 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6810 return "{fldws|fldw} -16(%%r30),%0";
6814 output_asm_insn ("ldw %0,%4", operands);
6815 return "ldo %1(%4),%4\n\tstw %4,%0";
6819 if (which_alternative == 0)
6821 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6824 /* If this is a long branch with its delay slot unfilled, set `nullify'
6825 as it can nullify the delay slot and save a nop. */
6826 if (length == 8 && dbr_sequence_length () == 0)
6829 /* If this is a short forward conditional branch which did not get
6830 its delay slot filled, the delay slot can still be nullified. */
6831 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6832 nullify = forward_branch_p (insn);
6838 return "addib,%C2,n %1,%0,%3";
6840 return "addib,%C2 %1,%0,%3";
6843 /* Handle weird backwards branch with a fulled delay slot
6844 which is nullified. */
6845 if (dbr_sequence_length () != 0
6846 && ! forward_branch_p (insn)
6848 return "addib,%N2,n %1,%0,.+12\n\tb %3";
6849 /* Handle short backwards branch with an unfilled delay slot.
6850 Using a addb;nop rather than addi;bl saves 1 cycle for both
6851 taken and untaken branches. */
6852 else if (dbr_sequence_length () == 0
6853 && ! forward_branch_p (insn)
6854 && INSN_ADDRESSES_SET_P ()
6855 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
6856 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
6857 return "addib,%C2 %1,%0,%3%#";
6859 /* Handle normal cases. */
6861 return "addi,%N2 %1,%0,%0\n\tb,n %3";
6863 return "addi,%N2 %1,%0,%0\n\tb %3";
6866 /* The reversed conditional branch must branch over one additional
6867 instruction if the delay slot is filled and needs to be extracted
6868 by output_lbranch. If the delay slot is empty or this is a
6869 nullified forward branch, the instruction after the reversed
6870 condition branch must be nullified. */
6871 if (dbr_sequence_length () == 0
6872 || (nullify && forward_branch_p (insn)))
6876 operands[4] = GEN_INT (length);
6881 operands[4] = GEN_INT (length + 4);
6885 output_asm_insn ("addib,%N2,n %1,%0,.+%4", operands);
6887 output_asm_insn ("addib,%N2 %1,%0,.+%4", operands);
6889 return output_lbranch (operands[3], insn, xdelay);
6893 /* Deal with gross reload from FP register case. */
6894 else if (which_alternative == 1)
6896 /* Move loop counter from FP register to MEM then into a GR,
6897 increment the GR, store the GR into MEM, and finally reload
6898 the FP register from MEM from within the branch's delay slot. */
6899 output_asm_insn ("{fstws|fstw} %0,-16(%%r30)\n\tldw -16(%%r30),%4",
6901 output_asm_insn ("ldo %1(%4),%4\n\tstw %4,-16(%%r30)", operands);
6903 return "{comb|cmpb},%S2 %%r0,%4,%3\n\t{fldws|fldw} -16(%%r30),%0";
6904 else if (length == 28)
6905 return "{comclr|cmpclr},%B2 %%r0,%4,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
6908 operands[5] = GEN_INT (length - 16);
6909 output_asm_insn ("{comb|cmpb},%B2 %%r0,%4,.+%5", operands);
6910 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
6911 return output_lbranch (operands[3], insn, 0);
6914 /* Deal with gross reload from memory case. */
6917 /* Reload loop counter from memory, the store back to memory
6918 happens in the branch's delay slot. */
6919 output_asm_insn ("ldw %0,%4", operands);
6921 return "addib,%C2 %1,%4,%3\n\tstw %4,%0";
6922 else if (length == 16)
6923 return "addi,%N2 %1,%4,%4\n\tb %3\n\tstw %4,%0";
6926 operands[5] = GEN_INT (length - 4);
6927 output_asm_insn ("addib,%N2 %1,%4,.+%5\n\tstw %4,%0", operands);
6928 return output_lbranch (operands[3], insn, 0);
6933 /* Return the output template for emitting a movb type insn.
6935 Note it may perform some output operations on its own before
6936 returning the final output string. */
6938 output_movb (rtx *operands, rtx insn, int which_alternative,
6939 int reverse_comparison)
6941 int length = get_attr_length (insn);
6943 /* A conditional branch to the following instruction (e.g. the delay slot) is
6944 asking for a disaster. Be prepared! */
6946 if (next_real_insn (JUMP_LABEL (insn)) == next_real_insn (insn))
6948 if (which_alternative == 0)
6949 return "copy %1,%0";
6950 else if (which_alternative == 1)
6952 output_asm_insn ("stw %1,-16(%%r30)", operands);
6953 return "{fldws|fldw} -16(%%r30),%0";
6955 else if (which_alternative == 2)
6961 /* Support the second variant. */
6962 if (reverse_comparison)
6963 PUT_CODE (operands[2], reverse_condition (GET_CODE (operands[2])));
6965 if (which_alternative == 0)
6967 int nullify = INSN_ANNULLED_BRANCH_P (insn);
6970 /* If this is a long branch with its delay slot unfilled, set `nullify'
6971 as it can nullify the delay slot and save a nop. */
6972 if (length == 8 && dbr_sequence_length () == 0)
6975 /* If this is a short forward conditional branch which did not get
6976 its delay slot filled, the delay slot can still be nullified. */
6977 if (! nullify && length == 4 && dbr_sequence_length () == 0)
6978 nullify = forward_branch_p (insn);
6984 return "movb,%C2,n %1,%0,%3";
6986 return "movb,%C2 %1,%0,%3";
6989 /* Handle weird backwards branch with a filled delay slot
6990 which is nullified. */
6991 if (dbr_sequence_length () != 0
6992 && ! forward_branch_p (insn)
6994 return "movb,%N2,n %1,%0,.+12\n\tb %3";
6996 /* Handle short backwards branch with an unfilled delay slot.
6997 Using a movb;nop rather than or;bl saves 1 cycle for both
6998 taken and untaken branches. */
6999 else if (dbr_sequence_length () == 0
7000 && ! forward_branch_p (insn)
7001 && INSN_ADDRESSES_SET_P ()
7002 && VAL_14_BITS_P (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (insn)))
7003 - INSN_ADDRESSES (INSN_UID (insn)) - 8))
7004 return "movb,%C2 %1,%0,%3%#";
7005 /* Handle normal cases. */
7007 return "or,%N2 %1,%%r0,%0\n\tb,n %3";
7009 return "or,%N2 %1,%%r0,%0\n\tb %3";
7012 /* The reversed conditional branch must branch over one additional
7013 instruction if the delay slot is filled and needs to be extracted
7014 by output_lbranch. If the delay slot is empty or this is a
7015 nullified forward branch, the instruction after the reversed
7016 condition branch must be nullified. */
7017 if (dbr_sequence_length () == 0
7018 || (nullify && forward_branch_p (insn)))
7022 operands[4] = GEN_INT (length);
7027 operands[4] = GEN_INT (length + 4);
7031 output_asm_insn ("movb,%N2,n %1,%0,.+%4", operands);
7033 output_asm_insn ("movb,%N2 %1,%0,.+%4", operands);
7035 return output_lbranch (operands[3], insn, xdelay);
7038 /* Deal with gross reload for FP destination register case. */
7039 else if (which_alternative == 1)
7041 /* Move source register to MEM, perform the branch test, then
7042 finally load the FP register from MEM from within the branch's
7044 output_asm_insn ("stw %1,-16(%%r30)", operands);
7046 return "{comb|cmpb},%S2 %%r0,%1,%3\n\t{fldws|fldw} -16(%%r30),%0";
7047 else if (length == 16)
7048 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\t{fldws|fldw} -16(%%r30),%0";
7051 operands[4] = GEN_INT (length - 4);
7052 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4", operands);
7053 output_asm_insn ("{fldws|fldw} -16(%%r30),%0", operands);
7054 return output_lbranch (operands[3], insn, 0);
7057 /* Deal with gross reload from memory case. */
7058 else if (which_alternative == 2)
7060 /* Reload loop counter from memory, the store back to memory
7061 happens in the branch's delay slot. */
7063 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tstw %1,%0";
7064 else if (length == 12)
7065 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tstw %1,%0";
7068 operands[4] = GEN_INT (length);
7069 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tstw %1,%0",
7071 return output_lbranch (operands[3], insn, 0);
7074 /* Handle SAR as a destination. */
7078 return "{comb|cmpb},%S2 %%r0,%1,%3\n\tmtsar %r1";
7079 else if (length == 12)
7080 return "{comclr|cmpclr},%B2 %%r0,%1,%%r0\n\tb %3\n\tmtsar %r1";
7083 operands[4] = GEN_INT (length);
7084 output_asm_insn ("{comb|cmpb},%B2 %%r0,%1,.+%4\n\tmtsar %r1",
7086 return output_lbranch (operands[3], insn, 0);
7091 /* Copy any FP arguments in INSN into integer registers. */
7093 copy_fp_args (rtx insn)
7098 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7100 int arg_mode, regno;
7101 rtx use = XEXP (link, 0);
7103 if (! (GET_CODE (use) == USE
7104 && GET_CODE (XEXP (use, 0)) == REG
7105 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7108 arg_mode = GET_MODE (XEXP (use, 0));
7109 regno = REGNO (XEXP (use, 0));
7111 /* Is it a floating point register? */
7112 if (regno >= 32 && regno <= 39)
7114 /* Copy the FP register into an integer register via memory. */
7115 if (arg_mode == SFmode)
7117 xoperands[0] = XEXP (use, 0);
7118 xoperands[1] = gen_rtx_REG (SImode, 26 - (regno - 32) / 2);
7119 output_asm_insn ("{fstws|fstw} %0,-16(%%sr0,%%r30)", xoperands);
7120 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7124 xoperands[0] = XEXP (use, 0);
7125 xoperands[1] = gen_rtx_REG (DImode, 25 - (regno - 34) / 2);
7126 output_asm_insn ("{fstds|fstd} %0,-16(%%sr0,%%r30)", xoperands);
7127 output_asm_insn ("ldw -12(%%sr0,%%r30),%R1", xoperands);
7128 output_asm_insn ("ldw -16(%%sr0,%%r30),%1", xoperands);
7134 /* Compute length of the FP argument copy sequence for INSN. */
7136 length_fp_args (rtx insn)
7141 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
7143 int arg_mode, regno;
7144 rtx use = XEXP (link, 0);
7146 if (! (GET_CODE (use) == USE
7147 && GET_CODE (XEXP (use, 0)) == REG
7148 && FUNCTION_ARG_REGNO_P (REGNO (XEXP (use, 0)))))
7151 arg_mode = GET_MODE (XEXP (use, 0));
7152 regno = REGNO (XEXP (use, 0));
7154 /* Is it a floating point register? */
7155 if (regno >= 32 && regno <= 39)
7157 if (arg_mode == SFmode)
7167 /* Return the attribute length for the millicode call instruction INSN.
7168 The length must match the code generated by output_millicode_call.
7169 We include the delay slot in the returned length as it is better to
7170 over estimate the length than to under estimate it. */
7173 attr_length_millicode_call (rtx insn)
7175 unsigned long distance = -1;
7176 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7178 if (INSN_ADDRESSES_SET_P ())
7180 distance = (total + insn_current_reference_address (insn));
7181 if (distance < total)
7187 if (!TARGET_LONG_CALLS && distance < 7600000)
7192 else if (TARGET_PORTABLE_RUNTIME)
7196 if (!TARGET_LONG_CALLS && distance < 240000)
7199 if (TARGET_LONG_ABS_CALL && !flag_pic)
7206 /* INSN is a function call. It may have an unconditional jump
7209 CALL_DEST is the routine we are calling. */
7212 output_millicode_call (rtx insn, rtx call_dest)
7214 int attr_length = get_attr_length (insn);
7215 int seq_length = dbr_sequence_length ();
7220 xoperands[0] = call_dest;
7221 xoperands[2] = gen_rtx_REG (Pmode, TARGET_64BIT ? 2 : 31);
7223 /* Handle the common case where we are sure that the branch will
7224 reach the beginning of the $CODE$ subspace. The within reach
7225 form of the $$sh_func_adrs call has a length of 28. Because
7226 it has an attribute type of multi, it never has a nonzero
7227 sequence length. The length of the $$sh_func_adrs is the same
7228 as certain out of reach PIC calls to other routines. */
7229 if (!TARGET_LONG_CALLS
7230 && ((seq_length == 0
7231 && (attr_length == 12
7232 || (attr_length == 28 && get_attr_type (insn) == TYPE_MULTI)))
7233 || (seq_length != 0 && attr_length == 8)))
7235 output_asm_insn ("{bl|b,l} %0,%2", xoperands);
7241 /* It might seem that one insn could be saved by accessing
7242 the millicode function using the linkage table. However,
7243 this doesn't work in shared libraries and other dynamically
7244 loaded objects. Using a pc-relative sequence also avoids
7245 problems related to the implicit use of the gp register. */
7246 output_asm_insn ("b,l .+8,%%r1", xoperands);
7250 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
7251 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
7255 xoperands[1] = gen_label_rtx ();
7256 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7257 targetm.asm_out.internal_label (asm_out_file, "L",
7258 CODE_LABEL_NUMBER (xoperands[1]));
7259 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7262 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7264 else if (TARGET_PORTABLE_RUNTIME)
7266 /* Pure portable runtime doesn't allow be/ble; we also don't
7267 have PIC support in the assembler/linker, so this sequence
7270 /* Get the address of our target into %r1. */
7271 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7272 output_asm_insn ("ldo R'%0(%%r1),%%r1", xoperands);
7274 /* Get our return address into %r31. */
7275 output_asm_insn ("{bl|b,l} .+8,%%r31", xoperands);
7276 output_asm_insn ("addi 8,%%r31,%%r31", xoperands);
7278 /* Jump to our target address in %r1. */
7279 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7283 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7285 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31", xoperands);
7287 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7291 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7292 output_asm_insn ("addi 16,%%r1,%%r31", xoperands);
7294 if (TARGET_SOM || !TARGET_GAS)
7296 /* The HP assembler can generate relocations for the
7297 difference of two symbols. GAS can do this for a
7298 millicode symbol but not an arbitrary external
7299 symbol when generating SOM output. */
7300 xoperands[1] = gen_label_rtx ();
7301 targetm.asm_out.internal_label (asm_out_file, "L",
7302 CODE_LABEL_NUMBER (xoperands[1]));
7303 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7304 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7308 output_asm_insn ("addil L'%0-$PIC_pcrel$0+8,%%r1", xoperands);
7309 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+12(%%r1),%%r1",
7313 /* Jump to our target address in %r1. */
7314 output_asm_insn ("bv %%r0(%%r1)", xoperands);
7318 if (seq_length == 0)
7319 output_asm_insn ("nop", xoperands);
7321 /* We are done if there isn't a jump in the delay slot. */
7322 if (seq_length == 0 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7325 /* This call has an unconditional jump in its delay slot. */
7326 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7328 /* See if the return address can be adjusted. Use the containing
7329 sequence insn's address. */
7330 if (INSN_ADDRESSES_SET_P ())
7332 seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7333 distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7334 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7336 if (VAL_14_BITS_P (distance))
7338 xoperands[1] = gen_label_rtx ();
7339 output_asm_insn ("ldo %0-%1(%2),%2", xoperands);
7340 targetm.asm_out.internal_label (asm_out_file, "L",
7341 CODE_LABEL_NUMBER (xoperands[1]));
7344 /* ??? This branch may not reach its target. */
7345 output_asm_insn ("nop\n\tb,n %0", xoperands);
7348 /* ??? This branch may not reach its target. */
7349 output_asm_insn ("nop\n\tb,n %0", xoperands);
7351 /* Delete the jump. */
7352 SET_INSN_DELETED (NEXT_INSN (insn));
7357 /* Return the attribute length of the call instruction INSN. The SIBCALL
7358 flag indicates whether INSN is a regular call or a sibling call. The
7359 length returned must be longer than the code actually generated by
7360 output_call. Since branch shortening is done before delay branch
7361 sequencing, there is no way to determine whether or not the delay
7362 slot will be filled during branch shortening. Even when the delay
7363 slot is filled, we may have to add a nop if the delay slot contains
7364 a branch that can't reach its target. Thus, we always have to include
7365 the delay slot in the length estimate. This used to be done in
7366 pa_adjust_insn_length but we do it here now as some sequences always
7367 fill the delay slot and we can save four bytes in the estimate for
7371 attr_length_call (rtx insn, int sibcall)
7374 rtx call, call_dest;
7377 rtx pat = PATTERN (insn);
7378 unsigned long distance = -1;
7380 gcc_assert (GET_CODE (insn) == CALL_INSN);
7382 if (INSN_ADDRESSES_SET_P ())
7384 unsigned long total;
7386 total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7387 distance = (total + insn_current_reference_address (insn));
7388 if (distance < total)
7392 gcc_assert (GET_CODE (pat) == PARALLEL);
7394 /* Get the call rtx. */
7395 call = XVECEXP (pat, 0, 0);
7396 if (GET_CODE (call) == SET)
7397 call = SET_SRC (call);
7399 gcc_assert (GET_CODE (call) == CALL);
7401 /* Determine if this is a local call. */
7402 call_dest = XEXP (XEXP (call, 0), 0);
7403 call_decl = SYMBOL_REF_DECL (call_dest);
7404 local_call = call_decl && targetm.binds_local_p (call_decl);
7406 /* pc-relative branch. */
7407 if (!TARGET_LONG_CALLS
7408 && ((TARGET_PA_20 && !sibcall && distance < 7600000)
7409 || distance < 240000))
7412 /* 64-bit plabel sequence. */
7413 else if (TARGET_64BIT && !local_call)
7414 length += sibcall ? 28 : 24;
7416 /* non-pic long absolute branch sequence. */
7417 else if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7420 /* long pc-relative branch sequence. */
7421 else if (TARGET_LONG_PIC_SDIFF_CALL
7422 || (TARGET_GAS && !TARGET_SOM
7423 && (TARGET_LONG_PIC_PCREL_CALL || local_call)))
7427 if (!TARGET_PA_20 && !TARGET_NO_SPACE_REGS && flag_pic)
7431 /* 32-bit plabel sequence. */
7437 length += length_fp_args (insn);
7447 if (!TARGET_NO_SPACE_REGS && flag_pic)
7455 /* INSN is a function call. It may have an unconditional jump
7458 CALL_DEST is the routine we are calling. */
7461 output_call (rtx insn, rtx call_dest, int sibcall)
7463 int delay_insn_deleted = 0;
7464 int delay_slot_filled = 0;
7465 int seq_length = dbr_sequence_length ();
7466 tree call_decl = SYMBOL_REF_DECL (call_dest);
7467 int local_call = call_decl && targetm.binds_local_p (call_decl);
7470 xoperands[0] = call_dest;
7472 /* Handle the common case where we're sure that the branch will reach
7473 the beginning of the "$CODE$" subspace. This is the beginning of
7474 the current function if we are in a named section. */
7475 if (!TARGET_LONG_CALLS && attr_length_call (insn, sibcall) == 8)
7477 xoperands[1] = gen_rtx_REG (word_mode, sibcall ? 0 : 2);
7478 output_asm_insn ("{bl|b,l} %0,%1", xoperands);
7482 if (TARGET_64BIT && !local_call)
7484 /* ??? As far as I can tell, the HP linker doesn't support the
7485 long pc-relative sequence described in the 64-bit runtime
7486 architecture. So, we use a slightly longer indirect call. */
7487 xoperands[0] = get_deferred_plabel (call_dest);
7488 xoperands[1] = gen_label_rtx ();
7490 /* If this isn't a sibcall, we put the load of %r27 into the
7491 delay slot. We can't do this in a sibcall as we don't
7492 have a second call-clobbered scratch register available. */
7494 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7497 final_scan_insn (NEXT_INSN (insn), asm_out_file,
7500 /* Now delete the delay insn. */
7501 SET_INSN_DELETED (NEXT_INSN (insn));
7502 delay_insn_deleted = 1;
7505 output_asm_insn ("addil LT'%0,%%r27", xoperands);
7506 output_asm_insn ("ldd RT'%0(%%r1),%%r1", xoperands);
7507 output_asm_insn ("ldd 0(%%r1),%%r1", xoperands);
7511 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7512 output_asm_insn ("ldd 16(%%r1),%%r1", xoperands);
7513 output_asm_insn ("bve (%%r1)", xoperands);
7517 output_asm_insn ("ldd 16(%%r1),%%r2", xoperands);
7518 output_asm_insn ("bve,l (%%r2),%%r2", xoperands);
7519 output_asm_insn ("ldd 24(%%r1),%%r27", xoperands);
7520 delay_slot_filled = 1;
7525 int indirect_call = 0;
7527 /* Emit a long call. There are several different sequences
7528 of increasing length and complexity. In most cases,
7529 they don't allow an instruction in the delay slot. */
7530 if (!((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7531 && !TARGET_LONG_PIC_SDIFF_CALL
7532 && !(TARGET_GAS && !TARGET_SOM
7533 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7538 && GET_CODE (NEXT_INSN (insn)) != JUMP_INSN
7542 || ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)))
7544 /* A non-jump insn in the delay slot. By definition we can
7545 emit this insn before the call (and in fact before argument
7547 final_scan_insn (NEXT_INSN (insn), asm_out_file, optimize, 0,
7550 /* Now delete the delay insn. */
7551 SET_INSN_DELETED (NEXT_INSN (insn));
7552 delay_insn_deleted = 1;
7555 if ((TARGET_LONG_ABS_CALL || local_call) && !flag_pic)
7557 /* This is the best sequence for making long calls in
7558 non-pic code. Unfortunately, GNU ld doesn't provide
7559 the stub needed for external calls, and GAS's support
7560 for this with the SOM linker is buggy. It is safe
7561 to use this for local calls. */
7562 output_asm_insn ("ldil L'%0,%%r1", xoperands);
7564 output_asm_insn ("be R'%0(%%sr4,%%r1)", xoperands);
7568 output_asm_insn ("be,l R'%0(%%sr4,%%r1),%%sr0,%%r31",
7571 output_asm_insn ("ble R'%0(%%sr4,%%r1)", xoperands);
7573 output_asm_insn ("copy %%r31,%%r2", xoperands);
7574 delay_slot_filled = 1;
7579 if (TARGET_LONG_PIC_SDIFF_CALL)
7581 /* The HP assembler and linker can handle relocations
7582 for the difference of two symbols. The HP assembler
7583 recognizes the sequence as a pc-relative call and
7584 the linker provides stubs when needed. */
7585 xoperands[1] = gen_label_rtx ();
7586 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7587 output_asm_insn ("addil L'%0-%l1,%%r1", xoperands);
7588 targetm.asm_out.internal_label (asm_out_file, "L",
7589 CODE_LABEL_NUMBER (xoperands[1]));
7590 output_asm_insn ("ldo R'%0-%l1(%%r1),%%r1", xoperands);
7592 else if (TARGET_GAS && !TARGET_SOM
7593 && (TARGET_LONG_PIC_PCREL_CALL || local_call))
7595 /* GAS currently can't generate the relocations that
7596 are needed for the SOM linker under HP-UX using this
7597 sequence. The GNU linker doesn't generate the stubs
7598 that are needed for external calls on TARGET_ELF32
7599 with this sequence. For now, we have to use a
7600 longer plabel sequence when using GAS. */
7601 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7602 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1",
7604 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1",
7609 /* Emit a long plabel-based call sequence. This is
7610 essentially an inline implementation of $$dyncall.
7611 We don't actually try to call $$dyncall as this is
7612 as difficult as calling the function itself. */
7613 xoperands[0] = get_deferred_plabel (call_dest);
7614 xoperands[1] = gen_label_rtx ();
7616 /* Since the call is indirect, FP arguments in registers
7617 need to be copied to the general registers. Then, the
7618 argument relocation stub will copy them back. */
7620 copy_fp_args (insn);
7624 output_asm_insn ("addil LT'%0,%%r19", xoperands);
7625 output_asm_insn ("ldw RT'%0(%%r1),%%r1", xoperands);
7626 output_asm_insn ("ldw 0(%%r1),%%r1", xoperands);
7630 output_asm_insn ("addil LR'%0-$global$,%%r27",
7632 output_asm_insn ("ldw RR'%0-$global$(%%r1),%%r1",
7636 output_asm_insn ("bb,>=,n %%r1,30,.+16", xoperands);
7637 output_asm_insn ("depi 0,31,2,%%r1", xoperands);
7638 output_asm_insn ("ldw 4(%%sr0,%%r1),%%r19", xoperands);
7639 output_asm_insn ("ldw 0(%%sr0,%%r1),%%r1", xoperands);
7641 if (!sibcall && !TARGET_PA_20)
7643 output_asm_insn ("{bl|b,l} .+8,%%r2", xoperands);
7644 if (TARGET_NO_SPACE_REGS)
7645 output_asm_insn ("addi 8,%%r2,%%r2", xoperands);
7647 output_asm_insn ("addi 16,%%r2,%%r2", xoperands);
7654 output_asm_insn ("bve (%%r1)", xoperands);
7659 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7660 output_asm_insn ("stw %%r2,-24(%%sp)", xoperands);
7661 delay_slot_filled = 1;
7664 output_asm_insn ("bve,l (%%r1),%%r2", xoperands);
7669 if (!TARGET_NO_SPACE_REGS && flag_pic)
7670 output_asm_insn ("ldsid (%%r1),%%r31\n\tmtsp %%r31,%%sr0",
7675 if (TARGET_NO_SPACE_REGS || !flag_pic)
7676 output_asm_insn ("be 0(%%sr4,%%r1)", xoperands);
7678 output_asm_insn ("be 0(%%sr0,%%r1)", xoperands);
7682 if (TARGET_NO_SPACE_REGS || !flag_pic)
7683 output_asm_insn ("ble 0(%%sr4,%%r1)", xoperands);
7685 output_asm_insn ("ble 0(%%sr0,%%r1)", xoperands);
7688 output_asm_insn ("stw %%r31,-24(%%sp)", xoperands);
7690 output_asm_insn ("copy %%r31,%%r2", xoperands);
7691 delay_slot_filled = 1;
7698 if (!delay_slot_filled && (seq_length == 0 || delay_insn_deleted))
7699 output_asm_insn ("nop", xoperands);
7701 /* We are done if there isn't a jump in the delay slot. */
7703 || delay_insn_deleted
7704 || GET_CODE (NEXT_INSN (insn)) != JUMP_INSN)
7707 /* A sibcall should never have a branch in the delay slot. */
7708 gcc_assert (!sibcall);
7710 /* This call has an unconditional jump in its delay slot. */
7711 xoperands[0] = XEXP (PATTERN (NEXT_INSN (insn)), 1);
7713 if (!delay_slot_filled && INSN_ADDRESSES_SET_P ())
7715 /* See if the return address can be adjusted. Use the containing
7716 sequence insn's address. */
7717 rtx seq_insn = NEXT_INSN (PREV_INSN (XVECEXP (final_sequence, 0, 0)));
7718 int distance = (INSN_ADDRESSES (INSN_UID (JUMP_LABEL (NEXT_INSN (insn))))
7719 - INSN_ADDRESSES (INSN_UID (seq_insn)) - 8);
7721 if (VAL_14_BITS_P (distance))
7723 xoperands[1] = gen_label_rtx ();
7724 output_asm_insn ("ldo %0-%1(%%r2),%%r2", xoperands);
7725 targetm.asm_out.internal_label (asm_out_file, "L",
7726 CODE_LABEL_NUMBER (xoperands[1]));
7729 output_asm_insn ("nop\n\tb,n %0", xoperands);
7732 output_asm_insn ("b,n %0", xoperands);
7734 /* Delete the jump. */
7735 SET_INSN_DELETED (NEXT_INSN (insn));
7740 /* Return the attribute length of the indirect call instruction INSN.
7741 The length must match the code generated by output_indirect call.
7742 The returned length includes the delay slot. Currently, the delay
7743 slot of an indirect call sequence is not exposed and it is used by
7744 the sequence itself. */
7747 attr_length_indirect_call (rtx insn)
7749 unsigned long distance = -1;
7750 unsigned long total = IN_NAMED_SECTION_P (cfun->decl) ? 0 : total_code_bytes;
7752 if (INSN_ADDRESSES_SET_P ())
7754 distance = (total + insn_current_reference_address (insn));
7755 if (distance < total)
7762 if (TARGET_FAST_INDIRECT_CALLS
7763 || (!TARGET_PORTABLE_RUNTIME
7764 && ((TARGET_PA_20 && !TARGET_SOM && distance < 7600000)
7765 || distance < 240000)))
7771 if (TARGET_PORTABLE_RUNTIME)
7774 /* Out of reach, can use ble. */
7779 output_indirect_call (rtx insn, rtx call_dest)
7785 xoperands[0] = call_dest;
7786 output_asm_insn ("ldd 16(%0),%%r2", xoperands);
7787 output_asm_insn ("bve,l (%%r2),%%r2\n\tldd 24(%0),%%r27", xoperands);
7791 /* First the special case for kernels, level 0 systems, etc. */
7792 if (TARGET_FAST_INDIRECT_CALLS)
7793 return "ble 0(%%sr4,%%r22)\n\tcopy %%r31,%%r2";
7795 /* Now the normal case -- we can reach $$dyncall directly or
7796 we're sure that we can get there via a long-branch stub.
7798 No need to check target flags as the length uniquely identifies
7799 the remaining cases. */
7800 if (attr_length_indirect_call (insn) == 8)
7802 /* The HP linker sometimes substitutes a BLE for BL/B,L calls to
7803 $$dyncall. Since BLE uses %r31 as the link register, the 22-bit
7804 variant of the B,L instruction can't be used on the SOM target. */
7805 if (TARGET_PA_20 && !TARGET_SOM)
7806 return ".CALL\tARGW0=GR\n\tb,l $$dyncall,%%r2\n\tcopy %%r2,%%r31";
7808 return ".CALL\tARGW0=GR\n\tbl $$dyncall,%%r31\n\tcopy %%r31,%%r2";
7811 /* Long millicode call, but we are not generating PIC or portable runtime
7813 if (attr_length_indirect_call (insn) == 12)
7814 return ".CALL\tARGW0=GR\n\tldil L'$$dyncall,%%r2\n\tble R'$$dyncall(%%sr4,%%r2)\n\tcopy %%r31,%%r2";
7816 /* Long millicode call for portable runtime. */
7817 if (attr_length_indirect_call (insn) == 20)
7818 return "ldil L'$$dyncall,%%r31\n\tldo R'$$dyncall(%%r31),%%r31\n\tblr %%r0,%%r2\n\tbv,n %%r0(%%r31)\n\tnop";
7820 /* We need a long PIC call to $$dyncall. */
7821 xoperands[0] = NULL_RTX;
7822 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
7823 if (TARGET_SOM || !TARGET_GAS)
7825 xoperands[0] = gen_label_rtx ();
7826 output_asm_insn ("addil L'$$dyncall-%0,%%r1", xoperands);
7827 targetm.asm_out.internal_label (asm_out_file, "L",
7828 CODE_LABEL_NUMBER (xoperands[0]));
7829 output_asm_insn ("ldo R'$$dyncall-%0(%%r1),%%r1", xoperands);
7833 output_asm_insn ("addil L'$$dyncall-$PIC_pcrel$0+4,%%r1", xoperands);
7834 output_asm_insn ("ldo R'$$dyncall-$PIC_pcrel$0+8(%%r1),%%r1",
7837 output_asm_insn ("blr %%r0,%%r2", xoperands);
7838 output_asm_insn ("bv,n %%r0(%%r1)\n\tnop", xoperands);
7842 /* Return the total length of the save and restore instructions needed for
7843 the data linkage table pointer (i.e., the PIC register) across the call
7844 instruction INSN. No-return calls do not require a save and restore.
7845 In addition, we may be able to avoid the save and restore for calls
7846 within the same translation unit. */
7849 attr_length_save_restore_dltp (rtx insn)
7851 if (find_reg_note (insn, REG_NORETURN, NULL_RTX))
7857 /* In HPUX 8.0's shared library scheme, special relocations are needed
7858 for function labels if they might be passed to a function
7859 in a shared library (because shared libraries don't live in code
7860 space), and special magic is needed to construct their address. */
7863 hppa_encode_label (rtx sym)
7865 const char *str = XSTR (sym, 0);
7866 int len = strlen (str) + 1;
7869 p = newstr = XALLOCAVEC (char, len + 1);
7873 XSTR (sym, 0) = ggc_alloc_string (newstr, len);
7877 pa_encode_section_info (tree decl, rtx rtl, int first)
7879 int old_referenced = 0;
7881 if (!first && MEM_P (rtl) && GET_CODE (XEXP (rtl, 0)) == SYMBOL_REF)
7883 = SYMBOL_REF_FLAGS (XEXP (rtl, 0)) & SYMBOL_FLAG_REFERENCED;
7885 default_encode_section_info (decl, rtl, first);
7887 if (first && TEXT_SPACE_P (decl))
7889 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
7890 if (TREE_CODE (decl) == FUNCTION_DECL)
7891 hppa_encode_label (XEXP (rtl, 0));
7893 else if (old_referenced)
7894 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= old_referenced;
7897 /* This is sort of inverse to pa_encode_section_info. */
7900 pa_strip_name_encoding (const char *str)
7902 str += (*str == '@');
7903 str += (*str == '*');
7908 function_label_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
7910 return GET_CODE (op) == SYMBOL_REF && FUNCTION_NAME_P (XSTR (op, 0));
7913 /* Returns 1 if OP is a function label involved in a simple addition
7914 with a constant. Used to keep certain patterns from matching
7915 during instruction combination. */
7917 is_function_label_plus_const (rtx op)
7919 /* Strip off any CONST. */
7920 if (GET_CODE (op) == CONST)
7923 return (GET_CODE (op) == PLUS
7924 && function_label_operand (XEXP (op, 0), Pmode)
7925 && GET_CODE (XEXP (op, 1)) == CONST_INT);
7928 /* Output assembly code for a thunk to FUNCTION. */
7931 pa_asm_output_mi_thunk (FILE *file, tree thunk_fndecl, HOST_WIDE_INT delta,
7932 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
7935 static unsigned int current_thunk_number;
7936 int val_14 = VAL_14_BITS_P (delta);
7937 unsigned int old_last_address = last_address, nbytes = 0;
7941 xoperands[0] = XEXP (DECL_RTL (function), 0);
7942 xoperands[1] = XEXP (DECL_RTL (thunk_fndecl), 0);
7943 xoperands[2] = GEN_INT (delta);
7945 ASM_OUTPUT_LABEL (file, XSTR (xoperands[1], 0));
7946 fprintf (file, "\t.PROC\n\t.CALLINFO FRAME=0,NO_CALLS\n\t.ENTRY\n");
7948 /* Output the thunk. We know that the function is in the same
7949 translation unit (i.e., the same space) as the thunk, and that
7950 thunks are output after their method. Thus, we don't need an
7951 external branch to reach the function. With SOM and GAS,
7952 functions and thunks are effectively in different sections.
7953 Thus, we can always use a IA-relative branch and the linker
7954 will add a long branch stub if necessary.
7956 However, we have to be careful when generating PIC code on the
7957 SOM port to ensure that the sequence does not transfer to an
7958 import stub for the target function as this could clobber the
7959 return value saved at SP-24. This would also apply to the
7960 32-bit linux port if the multi-space model is implemented. */
7961 if ((!TARGET_LONG_CALLS && TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7962 && !(flag_pic && TREE_PUBLIC (function))
7963 && (TARGET_GAS || last_address < 262132))
7964 || (!TARGET_LONG_CALLS && !TARGET_SOM && !TARGET_PORTABLE_RUNTIME
7965 && ((targetm.have_named_sections
7966 && DECL_SECTION_NAME (thunk_fndecl) != NULL
7967 /* The GNU 64-bit linker has rather poor stub management.
7968 So, we use a long branch from thunks that aren't in
7969 the same section as the target function. */
7971 && (DECL_SECTION_NAME (thunk_fndecl)
7972 != DECL_SECTION_NAME (function)))
7973 || ((DECL_SECTION_NAME (thunk_fndecl)
7974 == DECL_SECTION_NAME (function))
7975 && last_address < 262132)))
7976 || (targetm.have_named_sections
7977 && DECL_SECTION_NAME (thunk_fndecl) == NULL
7978 && DECL_SECTION_NAME (function) == NULL
7979 && last_address < 262132)
7980 || (!targetm.have_named_sections && last_address < 262132))))
7983 output_asm_insn ("addil L'%2,%%r26", xoperands);
7985 output_asm_insn ("b %0", xoperands);
7989 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
7994 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
7998 else if (TARGET_64BIT)
8000 /* We only have one call-clobbered scratch register, so we can't
8001 make use of the delay slot if delta doesn't fit in 14 bits. */
8004 output_asm_insn ("addil L'%2,%%r26", xoperands);
8005 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8008 output_asm_insn ("b,l .+8,%%r1", xoperands);
8012 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8013 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r1", xoperands);
8017 xoperands[3] = GEN_INT (val_14 ? 8 : 16);
8018 output_asm_insn ("addil L'%0-%1-%3,%%r1", xoperands);
8023 output_asm_insn ("bv %%r0(%%r1)", xoperands);
8024 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8029 output_asm_insn ("bv,n %%r0(%%r1)", xoperands);
8033 else if (TARGET_PORTABLE_RUNTIME)
8035 output_asm_insn ("ldil L'%0,%%r1", xoperands);
8036 output_asm_insn ("ldo R'%0(%%r1),%%r22", xoperands);
8039 output_asm_insn ("addil L'%2,%%r26", xoperands);
8041 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8045 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8050 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8054 else if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8056 /* The function is accessible from outside this module. The only
8057 way to avoid an import stub between the thunk and function is to
8058 call the function directly with an indirect sequence similar to
8059 that used by $$dyncall. This is possible because $$dyncall acts
8060 as the import stub in an indirect call. */
8061 ASM_GENERATE_INTERNAL_LABEL (label, "LTHN", current_thunk_number);
8062 xoperands[3] = gen_rtx_SYMBOL_REF (Pmode, label);
8063 output_asm_insn ("addil LT'%3,%%r19", xoperands);
8064 output_asm_insn ("ldw RT'%3(%%r1),%%r22", xoperands);
8065 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8066 output_asm_insn ("bb,>=,n %%r22,30,.+16", xoperands);
8067 output_asm_insn ("depi 0,31,2,%%r22", xoperands);
8068 output_asm_insn ("ldw 4(%%sr0,%%r22),%%r19", xoperands);
8069 output_asm_insn ("ldw 0(%%sr0,%%r22),%%r22", xoperands);
8073 output_asm_insn ("addil L'%2,%%r26", xoperands);
8079 output_asm_insn ("bve (%%r22)", xoperands);
8082 else if (TARGET_NO_SPACE_REGS)
8084 output_asm_insn ("be 0(%%sr4,%%r22)", xoperands);
8089 output_asm_insn ("ldsid (%%sr0,%%r22),%%r21", xoperands);
8090 output_asm_insn ("mtsp %%r21,%%sr0", xoperands);
8091 output_asm_insn ("be 0(%%sr0,%%r22)", xoperands);
8096 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8098 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8102 output_asm_insn ("{bl|b,l} .+8,%%r1", xoperands);
8104 if (TARGET_SOM || !TARGET_GAS)
8106 output_asm_insn ("addil L'%0-%1-8,%%r1", xoperands);
8107 output_asm_insn ("ldo R'%0-%1-8(%%r1),%%r22", xoperands);
8111 output_asm_insn ("addil L'%0-$PIC_pcrel$0+4,%%r1", xoperands);
8112 output_asm_insn ("ldo R'%0-$PIC_pcrel$0+8(%%r1),%%r22", xoperands);
8116 output_asm_insn ("addil L'%2,%%r26", xoperands);
8118 output_asm_insn ("bv %%r0(%%r22)", xoperands);
8122 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8127 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8134 output_asm_insn ("addil L'%2,%%r26", xoperands);
8136 output_asm_insn ("ldil L'%0,%%r22", xoperands);
8137 output_asm_insn ("be R'%0(%%sr4,%%r22)", xoperands);
8141 output_asm_insn ("ldo %2(%%r26),%%r26", xoperands);
8146 output_asm_insn ("ldo R'%2(%%r1),%%r26", xoperands);
8151 fprintf (file, "\t.EXIT\n\t.PROCEND\n");
8153 if (TARGET_SOM && TARGET_GAS)
8155 /* We done with this subspace except possibly for some additional
8156 debug information. Forget that we are in this subspace to ensure
8157 that the next function is output in its own subspace. */
8159 cfun->machine->in_nsubspa = 2;
8162 if (TARGET_SOM && flag_pic && TREE_PUBLIC (function))
8164 switch_to_section (data_section);
8165 output_asm_insn (".align 4", xoperands);
8166 ASM_OUTPUT_LABEL (file, label);
8167 output_asm_insn (".word P'%0", xoperands);
8170 current_thunk_number++;
8171 nbytes = ((nbytes + FUNCTION_BOUNDARY / BITS_PER_UNIT - 1)
8172 & ~(FUNCTION_BOUNDARY / BITS_PER_UNIT - 1));
8173 last_address += nbytes;
8174 if (old_last_address > last_address)
8175 last_address = UINT_MAX;
8176 update_total_code_bytes (nbytes);
8179 /* Only direct calls to static functions are allowed to be sibling (tail)
8182 This restriction is necessary because some linker generated stubs will
8183 store return pointers into rp' in some cases which might clobber a
8184 live value already in rp'.
8186 In a sibcall the current function and the target function share stack
8187 space. Thus if the path to the current function and the path to the
8188 target function save a value in rp', they save the value into the
8189 same stack slot, which has undesirable consequences.
8191 Because of the deferred binding nature of shared libraries any function
8192 with external scope could be in a different load module and thus require
8193 rp' to be saved when calling that function. So sibcall optimizations
8194 can only be safe for static function.
8196 Note that GCC never needs return value relocations, so we don't have to
8197 worry about static calls with return value relocations (which require
8200 It is safe to perform a sibcall optimization when the target function
8201 will never return. */
8203 pa_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
8205 if (TARGET_PORTABLE_RUNTIME)
8208 /* Sibcalls are ok for TARGET_ELF32 as along as the linker is used in
8209 single subspace mode and the call is not indirect. As far as I know,
8210 there is no operating system support for the multiple subspace mode.
8211 It might be possible to support indirect calls if we didn't use
8212 $$dyncall (see the indirect sequence generated in output_call). */
8214 return (decl != NULL_TREE);
8216 /* Sibcalls are not ok because the arg pointer register is not a fixed
8217 register. This prevents the sibcall optimization from occurring. In
8218 addition, there are problems with stub placement using GNU ld. This
8219 is because a normal sibcall branch uses a 17-bit relocation while
8220 a regular call branch uses a 22-bit relocation. As a result, more
8221 care needs to be taken in the placement of long-branch stubs. */
8225 /* Sibcalls are only ok within a translation unit. */
8226 return (decl && !TREE_PUBLIC (decl));
8229 /* ??? Addition is not commutative on the PA due to the weird implicit
8230 space register selection rules for memory addresses. Therefore, we
8231 don't consider a + b == b + a, as this might be inside a MEM. */
8233 pa_commutative_p (const_rtx x, int outer_code)
8235 return (COMMUTATIVE_P (x)
8236 && (TARGET_NO_SPACE_REGS
8237 || (outer_code != UNKNOWN && outer_code != MEM)
8238 || GET_CODE (x) != PLUS));
8241 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8242 use in fmpyadd instructions. */
8244 fmpyaddoperands (rtx *operands)
8246 enum machine_mode mode = GET_MODE (operands[0]);
8248 /* Must be a floating point mode. */
8249 if (mode != SFmode && mode != DFmode)
8252 /* All modes must be the same. */
8253 if (! (mode == GET_MODE (operands[1])
8254 && mode == GET_MODE (operands[2])
8255 && mode == GET_MODE (operands[3])
8256 && mode == GET_MODE (operands[4])
8257 && mode == GET_MODE (operands[5])))
8260 /* All operands must be registers. */
8261 if (! (GET_CODE (operands[1]) == REG
8262 && GET_CODE (operands[2]) == REG
8263 && GET_CODE (operands[3]) == REG
8264 && GET_CODE (operands[4]) == REG
8265 && GET_CODE (operands[5]) == REG))
8268 /* Only 2 real operands to the addition. One of the input operands must
8269 be the same as the output operand. */
8270 if (! rtx_equal_p (operands[3], operands[4])
8271 && ! rtx_equal_p (operands[3], operands[5]))
8274 /* Inout operand of add cannot conflict with any operands from multiply. */
8275 if (rtx_equal_p (operands[3], operands[0])
8276 || rtx_equal_p (operands[3], operands[1])
8277 || rtx_equal_p (operands[3], operands[2]))
8280 /* multiply cannot feed into addition operands. */
8281 if (rtx_equal_p (operands[4], operands[0])
8282 || rtx_equal_p (operands[5], operands[0]))
8285 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8287 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8288 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8289 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8290 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8291 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8292 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8295 /* Passed. Operands are suitable for fmpyadd. */
8299 #if !defined(USE_COLLECT2)
8301 pa_asm_out_constructor (rtx symbol, int priority)
8303 if (!function_label_operand (symbol, VOIDmode))
8304 hppa_encode_label (symbol);
8306 #ifdef CTORS_SECTION_ASM_OP
8307 default_ctor_section_asm_out_constructor (symbol, priority);
8309 # ifdef TARGET_ASM_NAMED_SECTION
8310 default_named_section_asm_out_constructor (symbol, priority);
8312 default_stabs_asm_out_constructor (symbol, priority);
8318 pa_asm_out_destructor (rtx symbol, int priority)
8320 if (!function_label_operand (symbol, VOIDmode))
8321 hppa_encode_label (symbol);
8323 #ifdef DTORS_SECTION_ASM_OP
8324 default_dtor_section_asm_out_destructor (symbol, priority);
8326 # ifdef TARGET_ASM_NAMED_SECTION
8327 default_named_section_asm_out_destructor (symbol, priority);
8329 default_stabs_asm_out_destructor (symbol, priority);
8335 /* This function places uninitialized global data in the bss section.
8336 The ASM_OUTPUT_ALIGNED_BSS macro needs to be defined to call this
8337 function on the SOM port to prevent uninitialized global data from
8338 being placed in the data section. */
8341 pa_asm_output_aligned_bss (FILE *stream,
8343 unsigned HOST_WIDE_INT size,
8346 switch_to_section (bss_section);
8347 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8349 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
8350 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
8353 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
8354 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
8357 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8358 ASM_OUTPUT_LABEL (stream, name);
8359 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8362 /* Both the HP and GNU assemblers under HP-UX provide a .comm directive
8363 that doesn't allow the alignment of global common storage to be directly
8364 specified. The SOM linker aligns common storage based on the rounded
8365 value of the NUM_BYTES parameter in the .comm directive. It's not
8366 possible to use the .align directive as it doesn't affect the alignment
8367 of the label associated with a .comm directive. */
8370 pa_asm_output_aligned_common (FILE *stream,
8372 unsigned HOST_WIDE_INT size,
8375 unsigned int max_common_align;
8377 max_common_align = TARGET_64BIT ? 128 : (size >= 4096 ? 256 : 64);
8378 if (align > max_common_align)
8380 warning (0, "alignment (%u) for %s exceeds maximum alignment "
8381 "for global common data. Using %u",
8382 align / BITS_PER_UNIT, name, max_common_align / BITS_PER_UNIT);
8383 align = max_common_align;
8386 switch_to_section (bss_section);
8388 assemble_name (stream, name);
8389 fprintf (stream, "\t.comm "HOST_WIDE_INT_PRINT_UNSIGNED"\n",
8390 MAX (size, align / BITS_PER_UNIT));
8393 /* We can't use .comm for local common storage as the SOM linker effectively
8394 treats the symbol as universal and uses the same storage for local symbols
8395 with the same name in different object files. The .block directive
8396 reserves an uninitialized block of storage. However, it's not common
8397 storage. Fortunately, GCC never requests common storage with the same
8398 name in any given translation unit. */
8401 pa_asm_output_aligned_local (FILE *stream,
8403 unsigned HOST_WIDE_INT size,
8406 switch_to_section (bss_section);
8407 fprintf (stream, "\t.align %u\n", align / BITS_PER_UNIT);
8410 fprintf (stream, "%s", LOCAL_ASM_OP);
8411 assemble_name (stream, name);
8412 fprintf (stream, "\n");
8415 ASM_OUTPUT_LABEL (stream, name);
8416 fprintf (stream, "\t.block "HOST_WIDE_INT_PRINT_UNSIGNED"\n", size);
8419 /* Returns 1 if the 6 operands specified in OPERANDS are suitable for
8420 use in fmpysub instructions. */
8422 fmpysuboperands (rtx *operands)
8424 enum machine_mode mode = GET_MODE (operands[0]);
8426 /* Must be a floating point mode. */
8427 if (mode != SFmode && mode != DFmode)
8430 /* All modes must be the same. */
8431 if (! (mode == GET_MODE (operands[1])
8432 && mode == GET_MODE (operands[2])
8433 && mode == GET_MODE (operands[3])
8434 && mode == GET_MODE (operands[4])
8435 && mode == GET_MODE (operands[5])))
8438 /* All operands must be registers. */
8439 if (! (GET_CODE (operands[1]) == REG
8440 && GET_CODE (operands[2]) == REG
8441 && GET_CODE (operands[3]) == REG
8442 && GET_CODE (operands[4]) == REG
8443 && GET_CODE (operands[5]) == REG))
8446 /* Only 2 real operands to the subtraction. Subtraction is not a commutative
8447 operation, so operands[4] must be the same as operand[3]. */
8448 if (! rtx_equal_p (operands[3], operands[4]))
8451 /* multiply cannot feed into subtraction. */
8452 if (rtx_equal_p (operands[5], operands[0]))
8455 /* Inout operand of sub cannot conflict with any operands from multiply. */
8456 if (rtx_equal_p (operands[3], operands[0])
8457 || rtx_equal_p (operands[3], operands[1])
8458 || rtx_equal_p (operands[3], operands[2]))
8461 /* SFmode limits the registers to the upper 32 of the 32bit FP regs. */
8463 && (REGNO_REG_CLASS (REGNO (operands[0])) != FPUPPER_REGS
8464 || REGNO_REG_CLASS (REGNO (operands[1])) != FPUPPER_REGS
8465 || REGNO_REG_CLASS (REGNO (operands[2])) != FPUPPER_REGS
8466 || REGNO_REG_CLASS (REGNO (operands[3])) != FPUPPER_REGS
8467 || REGNO_REG_CLASS (REGNO (operands[4])) != FPUPPER_REGS
8468 || REGNO_REG_CLASS (REGNO (operands[5])) != FPUPPER_REGS))
8471 /* Passed. Operands are suitable for fmpysub. */
8475 /* Return 1 if the given constant is 2, 4, or 8. These are the valid
8476 constants for shadd instructions. */
8478 shadd_constant_p (int val)
8480 if (val == 2 || val == 4 || val == 8)
8486 /* Return 1 if OP is valid as a base or index register in a
8490 borx_reg_operand (rtx op, enum machine_mode mode)
8492 if (GET_CODE (op) != REG)
8495 /* We must reject virtual registers as the only expressions that
8496 can be instantiated are REG and REG+CONST. */
8497 if (op == virtual_incoming_args_rtx
8498 || op == virtual_stack_vars_rtx
8499 || op == virtual_stack_dynamic_rtx
8500 || op == virtual_outgoing_args_rtx
8501 || op == virtual_cfa_rtx)
8504 /* While it's always safe to index off the frame pointer, it's not
8505 profitable to do so when the frame pointer is being eliminated. */
8506 if (!reload_completed
8507 && flag_omit_frame_pointer
8508 && !cfun->calls_alloca
8509 && op == frame_pointer_rtx)
8512 return register_operand (op, mode);
8515 /* Return 1 if this operand is anything other than a hard register. */
8518 non_hard_reg_operand (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8520 return ! (GET_CODE (op) == REG && REGNO (op) < FIRST_PSEUDO_REGISTER);
8523 /* Return 1 if INSN branches forward. Should be using insn_addresses
8524 to avoid walking through all the insns... */
8526 forward_branch_p (rtx insn)
8528 rtx label = JUMP_LABEL (insn);
8535 insn = NEXT_INSN (insn);
8538 return (insn == label);
8541 /* Return 1 if OP is an equality comparison, else return 0. */
8543 eq_neq_comparison_operator (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
8545 return (GET_CODE (op) == EQ || GET_CODE (op) == NE);
8548 /* Return 1 if INSN is in the delay slot of a call instruction. */
8550 jump_in_call_delay (rtx insn)
8553 if (GET_CODE (insn) != JUMP_INSN)
8556 if (PREV_INSN (insn)
8557 && PREV_INSN (PREV_INSN (insn))
8558 && GET_CODE (next_real_insn (PREV_INSN (PREV_INSN (insn)))) == INSN)
8560 rtx test_insn = next_real_insn (PREV_INSN (PREV_INSN (insn)));
8562 return (GET_CODE (PATTERN (test_insn)) == SEQUENCE
8563 && XVECEXP (PATTERN (test_insn), 0, 1) == insn);
8570 /* Output an unconditional move and branch insn. */
8573 output_parallel_movb (rtx *operands, rtx insn)
8575 int length = get_attr_length (insn);
8577 /* These are the cases in which we win. */
8579 return "mov%I1b,tr %1,%0,%2";
8581 /* None of the following cases win, but they don't lose either. */
8584 if (dbr_sequence_length () == 0)
8586 /* Nothing in the delay slot, fake it by putting the combined
8587 insn (the copy or add) in the delay slot of a bl. */
8588 if (GET_CODE (operands[1]) == CONST_INT)
8589 return "b %2\n\tldi %1,%0";
8591 return "b %2\n\tcopy %1,%0";
8595 /* Something in the delay slot, but we've got a long branch. */
8596 if (GET_CODE (operands[1]) == CONST_INT)
8597 return "ldi %1,%0\n\tb %2";
8599 return "copy %1,%0\n\tb %2";
8603 if (GET_CODE (operands[1]) == CONST_INT)
8604 output_asm_insn ("ldi %1,%0", operands);
8606 output_asm_insn ("copy %1,%0", operands);
8607 return output_lbranch (operands[2], insn, 1);
8610 /* Output an unconditional add and branch insn. */
8613 output_parallel_addb (rtx *operands, rtx insn)
8615 int length = get_attr_length (insn);
8617 /* To make life easy we want operand0 to be the shared input/output
8618 operand and operand1 to be the readonly operand. */
8619 if (operands[0] == operands[1])
8620 operands[1] = operands[2];
8622 /* These are the cases in which we win. */
8624 return "add%I1b,tr %1,%0,%3";
8626 /* None of the following cases win, but they don't lose either. */
8629 if (dbr_sequence_length () == 0)
8630 /* Nothing in the delay slot, fake it by putting the combined
8631 insn (the copy or add) in the delay slot of a bl. */
8632 return "b %3\n\tadd%I1 %1,%0,%0";
8634 /* Something in the delay slot, but we've got a long branch. */
8635 return "add%I1 %1,%0,%0\n\tb %3";
8638 output_asm_insn ("add%I1 %1,%0,%0", operands);
8639 return output_lbranch (operands[3], insn, 1);
8642 /* Return nonzero if INSN (a jump insn) immediately follows a call
8643 to a named function. This is used to avoid filling the delay slot
8644 of the jump since it can usually be eliminated by modifying RP in
8645 the delay slot of the call. */
8648 following_call (rtx insn)
8650 if (! TARGET_JUMP_IN_DELAY)
8653 /* Find the previous real insn, skipping NOTEs. */
8654 insn = PREV_INSN (insn);
8655 while (insn && GET_CODE (insn) == NOTE)
8656 insn = PREV_INSN (insn);
8658 /* Check for CALL_INSNs and millicode calls. */
8660 && ((GET_CODE (insn) == CALL_INSN
8661 && get_attr_type (insn) != TYPE_DYNCALL)
8662 || (GET_CODE (insn) == INSN
8663 && GET_CODE (PATTERN (insn)) != SEQUENCE
8664 && GET_CODE (PATTERN (insn)) != USE
8665 && GET_CODE (PATTERN (insn)) != CLOBBER
8666 && get_attr_type (insn) == TYPE_MILLI)))
8672 /* We use this hook to perform a PA specific optimization which is difficult
8673 to do in earlier passes.
8675 We want the delay slots of branches within jump tables to be filled.
8676 None of the compiler passes at the moment even has the notion that a
8677 PA jump table doesn't contain addresses, but instead contains actual
8680 Because we actually jump into the table, the addresses of each entry
8681 must stay constant in relation to the beginning of the table (which
8682 itself must stay constant relative to the instruction to jump into
8683 it). I don't believe we can guarantee earlier passes of the compiler
8684 will adhere to those rules.
8686 So, late in the compilation process we find all the jump tables, and
8687 expand them into real code -- e.g. each entry in the jump table vector
8688 will get an appropriate label followed by a jump to the final target.
8690 Reorg and the final jump pass can then optimize these branches and
8691 fill their delay slots. We end up with smaller, more efficient code.
8693 The jump instructions within the table are special; we must be able
8694 to identify them during assembly output (if the jumps don't get filled
8695 we need to emit a nop rather than nullifying the delay slot)). We
8696 identify jumps in switch tables by using insns with the attribute
8697 type TYPE_BTABLE_BRANCH.
8699 We also surround the jump table itself with BEGIN_BRTAB and END_BRTAB
8700 insns. This serves two purposes, first it prevents jump.c from
8701 noticing that the last N entries in the table jump to the instruction
8702 immediately after the table and deleting the jumps. Second, those
8703 insns mark where we should emit .begin_brtab and .end_brtab directives
8704 when using GAS (allows for better link time optimizations). */
8711 remove_useless_addtr_insns (1);
8713 if (pa_cpu < PROCESSOR_8000)
8714 pa_combine_instructions ();
8717 /* This is fairly cheap, so always run it if optimizing. */
8718 if (optimize > 0 && !TARGET_BIG_SWITCH)
8720 /* Find and explode all ADDR_VEC or ADDR_DIFF_VEC insns. */
8721 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8723 rtx pattern, tmp, location, label;
8724 unsigned int length, i;
8726 /* Find an ADDR_VEC or ADDR_DIFF_VEC insn to explode. */
8727 if (GET_CODE (insn) != JUMP_INSN
8728 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8729 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8732 /* Emit marker for the beginning of the branch table. */
8733 emit_insn_before (gen_begin_brtab (), insn);
8735 pattern = PATTERN (insn);
8736 location = PREV_INSN (insn);
8737 length = XVECLEN (pattern, GET_CODE (pattern) == ADDR_DIFF_VEC);
8739 for (i = 0; i < length; i++)
8741 /* Emit a label before each jump to keep jump.c from
8742 removing this code. */
8743 tmp = gen_label_rtx ();
8744 LABEL_NUSES (tmp) = 1;
8745 emit_label_after (tmp, location);
8746 location = NEXT_INSN (location);
8748 if (GET_CODE (pattern) == ADDR_VEC)
8749 label = XEXP (XVECEXP (pattern, 0, i), 0);
8751 label = XEXP (XVECEXP (pattern, 1, i), 0);
8753 tmp = gen_short_jump (label);
8755 /* Emit the jump itself. */
8756 tmp = emit_jump_insn_after (tmp, location);
8757 JUMP_LABEL (tmp) = label;
8758 LABEL_NUSES (label)++;
8759 location = NEXT_INSN (location);
8761 /* Emit a BARRIER after the jump. */
8762 emit_barrier_after (location);
8763 location = NEXT_INSN (location);
8766 /* Emit marker for the end of the branch table. */
8767 emit_insn_before (gen_end_brtab (), location);
8768 location = NEXT_INSN (location);
8769 emit_barrier_after (location);
8771 /* Delete the ADDR_VEC or ADDR_DIFF_VEC. */
8777 /* Still need brtab marker insns. FIXME: the presence of these
8778 markers disables output of the branch table to readonly memory,
8779 and any alignment directives that might be needed. Possibly,
8780 the begin_brtab insn should be output before the label for the
8781 table. This doesn't matter at the moment since the tables are
8782 always output in the text section. */
8783 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8785 /* Find an ADDR_VEC insn. */
8786 if (GET_CODE (insn) != JUMP_INSN
8787 || (GET_CODE (PATTERN (insn)) != ADDR_VEC
8788 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC))
8791 /* Now generate markers for the beginning and end of the
8793 emit_insn_before (gen_begin_brtab (), insn);
8794 emit_insn_after (gen_end_brtab (), insn);
8799 /* The PA has a number of odd instructions which can perform multiple
8800 tasks at once. On first generation PA machines (PA1.0 and PA1.1)
8801 it may be profitable to combine two instructions into one instruction
8802 with two outputs. It's not profitable PA2.0 machines because the
8803 two outputs would take two slots in the reorder buffers.
8805 This routine finds instructions which can be combined and combines
8806 them. We only support some of the potential combinations, and we
8807 only try common ways to find suitable instructions.
8809 * addb can add two registers or a register and a small integer
8810 and jump to a nearby (+-8k) location. Normally the jump to the
8811 nearby location is conditional on the result of the add, but by
8812 using the "true" condition we can make the jump unconditional.
8813 Thus addb can perform two independent operations in one insn.
8815 * movb is similar to addb in that it can perform a reg->reg
8816 or small immediate->reg copy and jump to a nearby (+-8k location).
8818 * fmpyadd and fmpysub can perform a FP multiply and either an
8819 FP add or FP sub if the operands of the multiply and add/sub are
8820 independent (there are other minor restrictions). Note both
8821 the fmpy and fadd/fsub can in theory move to better spots according
8822 to data dependencies, but for now we require the fmpy stay at a
8825 * Many of the memory operations can perform pre & post updates
8826 of index registers. GCC's pre/post increment/decrement addressing
8827 is far too simple to take advantage of all the possibilities. This
8828 pass may not be suitable since those insns may not be independent.
8830 * comclr can compare two ints or an int and a register, nullify
8831 the following instruction and zero some other register. This
8832 is more difficult to use as it's harder to find an insn which
8833 will generate a comclr than finding something like an unconditional
8834 branch. (conditional moves & long branches create comclr insns).
8836 * Most arithmetic operations can conditionally skip the next
8837 instruction. They can be viewed as "perform this operation
8838 and conditionally jump to this nearby location" (where nearby
8839 is an insns away). These are difficult to use due to the
8840 branch length restrictions. */
8843 pa_combine_instructions (void)
8845 rtx anchor, new_rtx;
8847 /* This can get expensive since the basic algorithm is on the
8848 order of O(n^2) (or worse). Only do it for -O2 or higher
8849 levels of optimization. */
8853 /* Walk down the list of insns looking for "anchor" insns which
8854 may be combined with "floating" insns. As the name implies,
8855 "anchor" instructions don't move, while "floating" insns may
8857 new_rtx = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, NULL_RTX, NULL_RTX));
8858 new_rtx = make_insn_raw (new_rtx);
8860 for (anchor = get_insns (); anchor; anchor = NEXT_INSN (anchor))
8862 enum attr_pa_combine_type anchor_attr;
8863 enum attr_pa_combine_type floater_attr;
8865 /* We only care about INSNs, JUMP_INSNs, and CALL_INSNs.
8866 Also ignore any special USE insns. */
8867 if ((GET_CODE (anchor) != INSN
8868 && GET_CODE (anchor) != JUMP_INSN
8869 && GET_CODE (anchor) != CALL_INSN)
8870 || GET_CODE (PATTERN (anchor)) == USE
8871 || GET_CODE (PATTERN (anchor)) == CLOBBER
8872 || GET_CODE (PATTERN (anchor)) == ADDR_VEC
8873 || GET_CODE (PATTERN (anchor)) == ADDR_DIFF_VEC)
8876 anchor_attr = get_attr_pa_combine_type (anchor);
8877 /* See if anchor is an insn suitable for combination. */
8878 if (anchor_attr == PA_COMBINE_TYPE_FMPY
8879 || anchor_attr == PA_COMBINE_TYPE_FADDSUB
8880 || (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8881 && ! forward_branch_p (anchor)))
8885 for (floater = PREV_INSN (anchor);
8887 floater = PREV_INSN (floater))
8889 if (GET_CODE (floater) == NOTE
8890 || (GET_CODE (floater) == INSN
8891 && (GET_CODE (PATTERN (floater)) == USE
8892 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8895 /* Anything except a regular INSN will stop our search. */
8896 if (GET_CODE (floater) != INSN
8897 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8898 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8904 /* See if FLOATER is suitable for combination with the
8906 floater_attr = get_attr_pa_combine_type (floater);
8907 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8908 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8909 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8910 && floater_attr == PA_COMBINE_TYPE_FMPY))
8912 /* If ANCHOR and FLOATER can be combined, then we're
8913 done with this pass. */
8914 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
8915 SET_DEST (PATTERN (floater)),
8916 XEXP (SET_SRC (PATTERN (floater)), 0),
8917 XEXP (SET_SRC (PATTERN (floater)), 1)))
8921 else if (anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH
8922 && floater_attr == PA_COMBINE_TYPE_ADDMOVE)
8924 if (GET_CODE (SET_SRC (PATTERN (floater))) == PLUS)
8926 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
8927 SET_DEST (PATTERN (floater)),
8928 XEXP (SET_SRC (PATTERN (floater)), 0),
8929 XEXP (SET_SRC (PATTERN (floater)), 1)))
8934 if (pa_can_combine_p (new_rtx, anchor, floater, 0,
8935 SET_DEST (PATTERN (floater)),
8936 SET_SRC (PATTERN (floater)),
8937 SET_SRC (PATTERN (floater))))
8943 /* If we didn't find anything on the backwards scan try forwards. */
8945 && (anchor_attr == PA_COMBINE_TYPE_FMPY
8946 || anchor_attr == PA_COMBINE_TYPE_FADDSUB))
8948 for (floater = anchor; floater; floater = NEXT_INSN (floater))
8950 if (GET_CODE (floater) == NOTE
8951 || (GET_CODE (floater) == INSN
8952 && (GET_CODE (PATTERN (floater)) == USE
8953 || GET_CODE (PATTERN (floater)) == CLOBBER)))
8957 /* Anything except a regular INSN will stop our search. */
8958 if (GET_CODE (floater) != INSN
8959 || GET_CODE (PATTERN (floater)) == ADDR_VEC
8960 || GET_CODE (PATTERN (floater)) == ADDR_DIFF_VEC)
8966 /* See if FLOATER is suitable for combination with the
8968 floater_attr = get_attr_pa_combine_type (floater);
8969 if ((anchor_attr == PA_COMBINE_TYPE_FMPY
8970 && floater_attr == PA_COMBINE_TYPE_FADDSUB)
8971 || (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8972 && floater_attr == PA_COMBINE_TYPE_FMPY))
8974 /* If ANCHOR and FLOATER can be combined, then we're
8975 done with this pass. */
8976 if (pa_can_combine_p (new_rtx, anchor, floater, 1,
8977 SET_DEST (PATTERN (floater)),
8978 XEXP (SET_SRC (PATTERN (floater)),
8980 XEXP (SET_SRC (PATTERN (floater)),
8987 /* FLOATER will be nonzero if we found a suitable floating
8988 insn for combination with ANCHOR. */
8990 && (anchor_attr == PA_COMBINE_TYPE_FADDSUB
8991 || anchor_attr == PA_COMBINE_TYPE_FMPY))
8993 /* Emit the new instruction and delete the old anchor. */
8994 emit_insn_before (gen_rtx_PARALLEL
8996 gen_rtvec (2, PATTERN (anchor),
8997 PATTERN (floater))),
9000 SET_INSN_DELETED (anchor);
9002 /* Emit a special USE insn for FLOATER, then delete
9003 the floating insn. */
9004 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9005 delete_insn (floater);
9010 && anchor_attr == PA_COMBINE_TYPE_UNCOND_BRANCH)
9013 /* Emit the new_jump instruction and delete the old anchor. */
9015 = emit_jump_insn_before (gen_rtx_PARALLEL
9017 gen_rtvec (2, PATTERN (anchor),
9018 PATTERN (floater))),
9021 JUMP_LABEL (temp) = JUMP_LABEL (anchor);
9022 SET_INSN_DELETED (anchor);
9024 /* Emit a special USE insn for FLOATER, then delete
9025 the floating insn. */
9026 emit_insn_before (gen_rtx_USE (VOIDmode, floater), floater);
9027 delete_insn (floater);
9035 pa_can_combine_p (rtx new_rtx, rtx anchor, rtx floater, int reversed, rtx dest,
9038 int insn_code_number;
9041 /* Create a PARALLEL with the patterns of ANCHOR and
9042 FLOATER, try to recognize it, then test constraints
9043 for the resulting pattern.
9045 If the pattern doesn't match or the constraints
9046 aren't met keep searching for a suitable floater
9048 XVECEXP (PATTERN (new_rtx), 0, 0) = PATTERN (anchor);
9049 XVECEXP (PATTERN (new_rtx), 0, 1) = PATTERN (floater);
9050 INSN_CODE (new_rtx) = -1;
9051 insn_code_number = recog_memoized (new_rtx);
9052 if (insn_code_number < 0
9053 || (extract_insn (new_rtx), ! constrain_operands (1)))
9067 /* There's up to three operands to consider. One
9068 output and two inputs.
9070 The output must not be used between FLOATER & ANCHOR
9071 exclusive. The inputs must not be set between
9072 FLOATER and ANCHOR exclusive. */
9074 if (reg_used_between_p (dest, start, end))
9077 if (reg_set_between_p (src1, start, end))
9080 if (reg_set_between_p (src2, start, end))
9083 /* If we get here, then everything is good. */
9087 /* Return nonzero if references for INSN are delayed.
9089 Millicode insns are actually function calls with some special
9090 constraints on arguments and register usage.
9092 Millicode calls always expect their arguments in the integer argument
9093 registers, and always return their result in %r29 (ret1). They
9094 are expected to clobber their arguments, %r1, %r29, and the return
9095 pointer which is %r31 on 32-bit and %r2 on 64-bit, and nothing else.
9097 This function tells reorg that the references to arguments and
9098 millicode calls do not appear to happen until after the millicode call.
9099 This allows reorg to put insns which set the argument registers into the
9100 delay slot of the millicode call -- thus they act more like traditional
9103 Note we cannot consider side effects of the insn to be delayed because
9104 the branch and link insn will clobber the return pointer. If we happened
9105 to use the return pointer in the delay slot of the call, then we lose.
9107 get_attr_type will try to recognize the given insn, so make sure to
9108 filter out things it will not accept -- SEQUENCE, USE and CLOBBER insns
9111 insn_refs_are_delayed (rtx insn)
9113 return ((GET_CODE (insn) == INSN
9114 && GET_CODE (PATTERN (insn)) != SEQUENCE
9115 && GET_CODE (PATTERN (insn)) != USE
9116 && GET_CODE (PATTERN (insn)) != CLOBBER
9117 && get_attr_type (insn) == TYPE_MILLI));
9120 /* On the HP-PA the value is found in register(s) 28(-29), unless
9121 the mode is SF or DF. Then the value is returned in fr4 (32).
9123 This must perform the same promotions as PROMOTE_MODE, else
9124 TARGET_PROMOTE_FUNCTION_RETURN will not work correctly.
9126 Small structures must be returned in a PARALLEL on PA64 in order
9127 to match the HP Compiler ABI. */
9130 function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED)
9132 enum machine_mode valmode;
9134 if (AGGREGATE_TYPE_P (valtype)
9135 || TREE_CODE (valtype) == COMPLEX_TYPE
9136 || TREE_CODE (valtype) == VECTOR_TYPE)
9140 /* Aggregates with a size less than or equal to 128 bits are
9141 returned in GR 28(-29). They are left justified. The pad
9142 bits are undefined. Larger aggregates are returned in
9146 int ub = int_size_in_bytes (valtype) <= UNITS_PER_WORD ? 1 : 2;
9148 for (i = 0; i < ub; i++)
9150 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9151 gen_rtx_REG (DImode, 28 + i),
9156 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (ub, loc));
9158 else if (int_size_in_bytes (valtype) > UNITS_PER_WORD)
9160 /* Aggregates 5 to 8 bytes in size are returned in general
9161 registers r28-r29 in the same manner as other non
9162 floating-point objects. The data is right-justified and
9163 zero-extended to 64 bits. This is opposite to the normal
9164 justification used on big endian targets and requires
9165 special treatment. */
9166 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9167 gen_rtx_REG (DImode, 28), const0_rtx);
9168 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9172 if ((INTEGRAL_TYPE_P (valtype)
9173 && GET_MODE_BITSIZE (TYPE_MODE (valtype)) < BITS_PER_WORD)
9174 || POINTER_TYPE_P (valtype))
9175 valmode = word_mode;
9177 valmode = TYPE_MODE (valtype);
9179 if (TREE_CODE (valtype) == REAL_TYPE
9180 && !AGGREGATE_TYPE_P (valtype)
9181 && TYPE_MODE (valtype) != TFmode
9182 && !TARGET_SOFT_FLOAT)
9183 return gen_rtx_REG (valmode, 32);
9185 return gen_rtx_REG (valmode, 28);
9188 /* Return the location of a parameter that is passed in a register or NULL
9189 if the parameter has any component that is passed in memory.
9191 This is new code and will be pushed to into the net sources after
9194 ??? We might want to restructure this so that it looks more like other
9197 function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode, tree type,
9198 int named ATTRIBUTE_UNUSED)
9200 int max_arg_words = (TARGET_64BIT ? 8 : 4);
9207 if (mode == VOIDmode)
9210 arg_size = FUNCTION_ARG_SIZE (mode, type);
9212 /* If this arg would be passed partially or totally on the stack, then
9213 this routine should return zero. pa_arg_partial_bytes will
9214 handle arguments which are split between regs and stack slots if
9215 the ABI mandates split arguments. */
9218 /* The 32-bit ABI does not split arguments. */
9219 if (cum->words + arg_size > max_arg_words)
9225 alignment = cum->words & 1;
9226 if (cum->words + alignment >= max_arg_words)
9230 /* The 32bit ABIs and the 64bit ABIs are rather different,
9231 particularly in their handling of FP registers. We might
9232 be able to cleverly share code between them, but I'm not
9233 going to bother in the hope that splitting them up results
9234 in code that is more easily understood. */
9238 /* Advance the base registers to their current locations.
9240 Remember, gprs grow towards smaller register numbers while
9241 fprs grow to higher register numbers. Also remember that
9242 although FP regs are 32-bit addressable, we pretend that
9243 the registers are 64-bits wide. */
9244 gpr_reg_base = 26 - cum->words;
9245 fpr_reg_base = 32 + cum->words;
9247 /* Arguments wider than one word and small aggregates need special
9251 || (type && (AGGREGATE_TYPE_P (type)
9252 || TREE_CODE (type) == COMPLEX_TYPE
9253 || TREE_CODE (type) == VECTOR_TYPE)))
9255 /* Double-extended precision (80-bit), quad-precision (128-bit)
9256 and aggregates including complex numbers are aligned on
9257 128-bit boundaries. The first eight 64-bit argument slots
9258 are associated one-to-one, with general registers r26
9259 through r19, and also with floating-point registers fr4
9260 through fr11. Arguments larger than one word are always
9261 passed in general registers.
9263 Using a PARALLEL with a word mode register results in left
9264 justified data on a big-endian target. */
9267 int i, offset = 0, ub = arg_size;
9269 /* Align the base register. */
9270 gpr_reg_base -= alignment;
9272 ub = MIN (ub, max_arg_words - cum->words - alignment);
9273 for (i = 0; i < ub; i++)
9275 loc[i] = gen_rtx_EXPR_LIST (VOIDmode,
9276 gen_rtx_REG (DImode, gpr_reg_base),
9282 return gen_rtx_PARALLEL (mode, gen_rtvec_v (ub, loc));
9287 /* If the argument is larger than a word, then we know precisely
9288 which registers we must use. */
9302 /* Structures 5 to 8 bytes in size are passed in the general
9303 registers in the same manner as other non floating-point
9304 objects. The data is right-justified and zero-extended
9305 to 64 bits. This is opposite to the normal justification
9306 used on big endian targets and requires special treatment.
9307 We now define BLOCK_REG_PADDING to pad these objects.
9308 Aggregates, complex and vector types are passed in the same
9309 manner as structures. */
9311 || (type && (AGGREGATE_TYPE_P (type)
9312 || TREE_CODE (type) == COMPLEX_TYPE
9313 || TREE_CODE (type) == VECTOR_TYPE)))
9315 rtx loc = gen_rtx_EXPR_LIST (VOIDmode,
9316 gen_rtx_REG (DImode, gpr_reg_base),
9318 return gen_rtx_PARALLEL (BLKmode, gen_rtvec (1, loc));
9323 /* We have a single word (32 bits). A simple computation
9324 will get us the register #s we need. */
9325 gpr_reg_base = 26 - cum->words;
9326 fpr_reg_base = 32 + 2 * cum->words;
9330 /* Determine if the argument needs to be passed in both general and
9331 floating point registers. */
9332 if (((TARGET_PORTABLE_RUNTIME || TARGET_64BIT || TARGET_ELF32)
9333 /* If we are doing soft-float with portable runtime, then there
9334 is no need to worry about FP regs. */
9335 && !TARGET_SOFT_FLOAT
9336 /* The parameter must be some kind of scalar float, else we just
9337 pass it in integer registers. */
9338 && GET_MODE_CLASS (mode) == MODE_FLOAT
9339 /* The target function must not have a prototype. */
9340 && cum->nargs_prototype <= 0
9341 /* libcalls do not need to pass items in both FP and general
9343 && type != NULL_TREE
9344 /* All this hair applies to "outgoing" args only. This includes
9345 sibcall arguments setup with FUNCTION_INCOMING_ARG. */
9347 /* Also pass outgoing floating arguments in both registers in indirect
9348 calls with the 32 bit ABI and the HP assembler since there is no
9349 way to the specify argument locations in static functions. */
9354 && GET_MODE_CLASS (mode) == MODE_FLOAT))
9360 gen_rtx_EXPR_LIST (VOIDmode,
9361 gen_rtx_REG (mode, fpr_reg_base),
9363 gen_rtx_EXPR_LIST (VOIDmode,
9364 gen_rtx_REG (mode, gpr_reg_base),
9369 /* See if we should pass this parameter in a general register. */
9370 if (TARGET_SOFT_FLOAT
9371 /* Indirect calls in the normal 32bit ABI require all arguments
9372 to be passed in general registers. */
9373 || (!TARGET_PORTABLE_RUNTIME
9377 /* If the parameter is not a scalar floating-point parameter,
9378 then it belongs in GPRs. */
9379 || GET_MODE_CLASS (mode) != MODE_FLOAT
9380 /* Structure with single SFmode field belongs in GPR. */
9381 || (type && AGGREGATE_TYPE_P (type)))
9382 retval = gen_rtx_REG (mode, gpr_reg_base);
9384 retval = gen_rtx_REG (mode, fpr_reg_base);
9390 /* If this arg would be passed totally in registers or totally on the stack,
9391 then this routine should return zero. */
9394 pa_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
9395 tree type, bool named ATTRIBUTE_UNUSED)
9397 unsigned int max_arg_words = 8;
9398 unsigned int offset = 0;
9403 if (FUNCTION_ARG_SIZE (mode, type) > 1 && (cum->words & 1))
9406 if (cum->words + offset + FUNCTION_ARG_SIZE (mode, type) <= max_arg_words)
9407 /* Arg fits fully into registers. */
9409 else if (cum->words + offset >= max_arg_words)
9410 /* Arg fully on the stack. */
9414 return (max_arg_words - cum->words - offset) * UNITS_PER_WORD;
9418 /* A get_unnamed_section callback for switching to the text section.
9420 This function is only used with SOM. Because we don't support
9421 named subspaces, we can only create a new subspace or switch back
9422 to the default text subspace. */
9425 som_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
9427 gcc_assert (TARGET_SOM);
9430 if (cfun && cfun->machine && !cfun->machine->in_nsubspa)
9432 /* We only want to emit a .nsubspa directive once at the
9433 start of the function. */
9434 cfun->machine->in_nsubspa = 1;
9436 /* Create a new subspace for the text. This provides
9437 better stub placement and one-only functions. */
9439 && DECL_ONE_ONLY (cfun->decl)
9440 && !DECL_WEAK (cfun->decl))
9442 output_section_asm_op ("\t.SPACE $TEXT$\n"
9443 "\t.NSUBSPA $CODE$,QUAD=0,ALIGN=8,"
9444 "ACCESS=44,SORT=24,COMDAT");
9450 /* There isn't a current function or the body of the current
9451 function has been completed. So, we are changing to the
9452 text section to output debugging information. Thus, we
9453 need to forget that we are in the text section so that
9454 varasm.c will call us when text_section is selected again. */
9455 gcc_assert (!cfun || !cfun->machine
9456 || cfun->machine->in_nsubspa == 2);
9459 output_section_asm_op ("\t.SPACE $TEXT$\n\t.NSUBSPA $CODE$");
9462 output_section_asm_op ("\t.SPACE $TEXT$\n\t.SUBSPA $CODE$");
9465 /* A get_unnamed_section callback for switching to comdat data
9466 sections. This function is only used with SOM. */
9469 som_output_comdat_data_section_asm_op (const void *data)
9472 output_section_asm_op (data);
9475 /* Implement TARGET_ASM_INITIALIZE_SECTIONS */
9478 pa_som_asm_init_sections (void)
9481 = get_unnamed_section (0, som_output_text_section_asm_op, NULL);
9483 /* SOM puts readonly data in the default $LIT$ subspace when PIC code
9484 is not being generated. */
9485 som_readonly_data_section
9486 = get_unnamed_section (0, output_section_asm_op,
9487 "\t.SPACE $TEXT$\n\t.SUBSPA $LIT$");
9489 /* When secondary definitions are not supported, SOM makes readonly
9490 data one-only by creating a new $LIT$ subspace in $TEXT$ with
9492 som_one_only_readonly_data_section
9493 = get_unnamed_section (0, som_output_comdat_data_section_asm_op,
9495 "\t.NSUBSPA $LIT$,QUAD=0,ALIGN=8,"
9496 "ACCESS=0x2c,SORT=16,COMDAT");
9499 /* When secondary definitions are not supported, SOM makes data one-only
9500 by creating a new $DATA$ subspace in $PRIVATE$ with the comdat flag. */
9501 som_one_only_data_section
9502 = get_unnamed_section (SECTION_WRITE,
9503 som_output_comdat_data_section_asm_op,
9504 "\t.SPACE $PRIVATE$\n"
9505 "\t.NSUBSPA $DATA$,QUAD=1,ALIGN=8,"
9506 "ACCESS=31,SORT=24,COMDAT");
9508 /* FIXME: HPUX ld generates incorrect GOT entries for "T" fixups
9509 which reference data within the $TEXT$ space (for example constant
9510 strings in the $LIT$ subspace).
9512 The assemblers (GAS and HP as) both have problems with handling
9513 the difference of two symbols which is the other correct way to
9514 reference constant data during PIC code generation.
9516 So, there's no way to reference constant data which is in the
9517 $TEXT$ space during PIC generation. Instead place all constant
9518 data into the $PRIVATE$ subspace (this reduces sharing, but it
9519 works correctly). */
9520 readonly_data_section = flag_pic ? data_section : som_readonly_data_section;
9522 /* We must not have a reference to an external symbol defined in a
9523 shared library in a readonly section, else the SOM linker will
9526 So, we force exception information into the data section. */
9527 exception_section = data_section;
9530 /* On hpux10, the linker will give an error if we have a reference
9531 in the read-only data section to a symbol defined in a shared
9532 library. Therefore, expressions that might require a reloc can
9533 not be placed in the read-only data section. */
9536 pa_select_section (tree exp, int reloc,
9537 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
9539 if (TREE_CODE (exp) == VAR_DECL
9540 && TREE_READONLY (exp)
9541 && !TREE_THIS_VOLATILE (exp)
9542 && DECL_INITIAL (exp)
9543 && (DECL_INITIAL (exp) == error_mark_node
9544 || TREE_CONSTANT (DECL_INITIAL (exp)))
9548 && DECL_ONE_ONLY (exp)
9549 && !DECL_WEAK (exp))
9550 return som_one_only_readonly_data_section;
9552 return readonly_data_section;
9554 else if (CONSTANT_CLASS_P (exp) && !reloc)
9555 return readonly_data_section;
9557 && TREE_CODE (exp) == VAR_DECL
9558 && DECL_ONE_ONLY (exp)
9559 && !DECL_WEAK (exp))
9560 return som_one_only_data_section;
9562 return data_section;
9566 pa_globalize_label (FILE *stream, const char *name)
9568 /* We only handle DATA objects here, functions are globalized in
9569 ASM_DECLARE_FUNCTION_NAME. */
9570 if (! FUNCTION_NAME_P (name))
9572 fputs ("\t.EXPORT ", stream);
9573 assemble_name (stream, name);
9574 fputs (",DATA\n", stream);
9578 /* Worker function for TARGET_STRUCT_VALUE_RTX. */
9581 pa_struct_value_rtx (tree fntype ATTRIBUTE_UNUSED,
9582 int incoming ATTRIBUTE_UNUSED)
9584 return gen_rtx_REG (Pmode, PA_STRUCT_VALUE_REGNUM);
9587 /* Worker function for TARGET_RETURN_IN_MEMORY. */
9590 pa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
9592 /* SOM ABI says that objects larger than 64 bits are returned in memory.
9593 PA64 ABI says that objects larger than 128 bits are returned in memory.
9594 Note, int_size_in_bytes can return -1 if the size of the object is
9595 variable or larger than the maximum value that can be expressed as
9596 a HOST_WIDE_INT. It can also return zero for an empty type. The
9597 simplest way to handle variable and empty types is to pass them in
9598 memory. This avoids problems in defining the boundaries of argument
9599 slots, allocating registers, etc. */
9600 return (int_size_in_bytes (type) > (TARGET_64BIT ? 16 : 8)
9601 || int_size_in_bytes (type) <= 0);
9604 /* Structure to hold declaration and name of external symbols that are
9605 emitted by GCC. We generate a vector of these symbols and output them
9606 at the end of the file if and only if SYMBOL_REF_REFERENCED_P is true.
9607 This avoids putting out names that are never really used. */
9609 typedef struct GTY(()) extern_symbol
9615 /* Define gc'd vector type for extern_symbol. */
9616 DEF_VEC_O(extern_symbol);
9617 DEF_VEC_ALLOC_O(extern_symbol,gc);
9619 /* Vector of extern_symbol pointers. */
9620 static GTY(()) VEC(extern_symbol,gc) *extern_symbols;
9622 #ifdef ASM_OUTPUT_EXTERNAL_REAL
9623 /* Mark DECL (name NAME) as an external reference (assembler output
9624 file FILE). This saves the names to output at the end of the file
9625 if actually referenced. */
9628 pa_hpux_asm_output_external (FILE *file, tree decl, const char *name)
9630 extern_symbol * p = VEC_safe_push (extern_symbol, gc, extern_symbols, NULL);
9632 gcc_assert (file == asm_out_file);
9637 /* Output text required at the end of an assembler file.
9638 This includes deferred plabels and .import directives for
9639 all external symbols that were actually referenced. */
9642 pa_hpux_file_end (void)
9647 if (!NO_DEFERRED_PROFILE_COUNTERS)
9648 output_deferred_profile_counters ();
9650 output_deferred_plabels ();
9652 for (i = 0; VEC_iterate (extern_symbol, extern_symbols, i, p); i++)
9654 tree decl = p->decl;
9656 if (!TREE_ASM_WRITTEN (decl)
9657 && SYMBOL_REF_REFERENCED_P (XEXP (DECL_RTL (decl), 0)))
9658 ASM_OUTPUT_EXTERNAL_REAL (asm_out_file, decl, p->name);
9661 VEC_free (extern_symbol, gc, extern_symbols);
9665 /* Return true if a change from mode FROM to mode TO for a register
9666 in register class RCLASS is invalid. */
9669 pa_cannot_change_mode_class (enum machine_mode from, enum machine_mode to,
9670 enum reg_class rclass)
9675 /* Reject changes to/from complex and vector modes. */
9676 if (COMPLEX_MODE_P (from) || VECTOR_MODE_P (from)
9677 || COMPLEX_MODE_P (to) || VECTOR_MODE_P (to))
9680 if (GET_MODE_SIZE (from) == GET_MODE_SIZE (to))
9683 /* There is no way to load QImode or HImode values directly from
9684 memory. SImode loads to the FP registers are not zero extended.
9685 On the 64-bit target, this conflicts with the definition of
9686 LOAD_EXTEND_OP. Thus, we can't allow changing between modes
9687 with different sizes in the floating-point registers. */
9688 if (MAYBE_FP_REG_CLASS_P (rclass))
9691 /* HARD_REGNO_MODE_OK places modes with sizes larger than a word
9692 in specific sets of registers. Thus, we cannot allow changing
9693 to a larger mode when it's larger than a word. */
9694 if (GET_MODE_SIZE (to) > UNITS_PER_WORD
9695 && GET_MODE_SIZE (to) > GET_MODE_SIZE (from))
9701 /* Returns TRUE if it is a good idea to tie two pseudo registers
9702 when one has mode MODE1 and one has mode MODE2.
9703 If HARD_REGNO_MODE_OK could produce different values for MODE1 and MODE2,
9704 for any hard reg, then this must be FALSE for correct output.
9706 We should return FALSE for QImode and HImode because these modes
9707 are not ok in the floating-point registers. However, this prevents
9708 tieing these modes to SImode and DImode in the general registers.
9709 So, this isn't a good idea. We rely on HARD_REGNO_MODE_OK and
9710 CANNOT_CHANGE_MODE_CLASS to prevent these modes from being used
9711 in the floating-point registers. */
9714 pa_modes_tieable_p (enum machine_mode mode1, enum machine_mode mode2)
9716 /* Don't tie modes in different classes. */
9717 if (GET_MODE_CLASS (mode1) != GET_MODE_CLASS (mode2))