1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001,
3 2002, 2003, 2004, 2005, 2006, 2007 Free Software Foundation, Inc.
4 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
5 and Martin Simmons (@harleqn.co.uk).
6 More major hacks by Richard Earnshaw (rearnsha@arm.com).
8 This file is part of GCC.
10 GCC is free software; you can redistribute it and/or modify it
11 under the terms of the GNU General Public License as published
12 by the Free Software Foundation; either version 2, or (at your
13 option) any later version.
15 GCC is distributed in the hope that it will be useful, but WITHOUT
16 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
17 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
18 License for more details.
20 You should have received a copy of the GNU General Public License
21 along with GCC; see the file COPYING. If not, write to
22 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
23 Boston, MA 02110-1301, USA. */
27 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
38 #include "insn-attr.h"
49 #include "integrate.h"
52 #include "target-def.h"
54 #include "langhooks.h"
56 /* Forward definitions of types. */
57 typedef struct minipool_node Mnode;
58 typedef struct minipool_fixup Mfix;
60 const struct attribute_spec arm_attribute_table[];
62 /* Forward function declarations. */
63 static arm_stack_offsets *arm_get_frame_offsets (void);
64 static void arm_add_gc_roots (void);
65 static int arm_gen_constant (enum rtx_code, enum machine_mode, rtx,
66 HOST_WIDE_INT, rtx, rtx, int, int);
67 static unsigned bit_count (unsigned long);
68 static int arm_address_register_rtx_p (rtx, int);
69 static int arm_legitimate_index_p (enum machine_mode, rtx, RTX_CODE, int);
70 static int thumb2_legitimate_index_p (enum machine_mode, rtx, int);
71 static int thumb1_base_register_rtx_p (rtx, enum machine_mode, int);
72 inline static int thumb1_index_register_rtx_p (rtx, int);
73 static int thumb_far_jump_used_p (void);
74 static bool thumb_force_lr_save (void);
75 static unsigned long thumb1_compute_save_reg_mask (void);
76 static int const_ok_for_op (HOST_WIDE_INT, enum rtx_code);
77 static rtx emit_sfm (int, int);
78 static int arm_size_return_regs (void);
80 static bool arm_assemble_integer (rtx, unsigned int, int);
82 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
83 static arm_cc get_arm_condition_code (rtx);
84 static HOST_WIDE_INT int_log2 (HOST_WIDE_INT);
85 static rtx is_jump_table (rtx);
86 static const char *output_multi_immediate (rtx *, const char *, const char *,
88 static const char *shift_op (rtx, HOST_WIDE_INT *);
89 static struct machine_function *arm_init_machine_status (void);
90 static void thumb_exit (FILE *, int);
91 static rtx is_jump_table (rtx);
92 static HOST_WIDE_INT get_jump_table_size (rtx);
93 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
94 static Mnode *add_minipool_forward_ref (Mfix *);
95 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
96 static Mnode *add_minipool_backward_ref (Mfix *);
97 static void assign_minipool_offsets (Mfix *);
98 static void arm_print_value (FILE *, rtx);
99 static void dump_minipool (rtx);
100 static int arm_barrier_cost (rtx);
101 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
102 static void push_minipool_barrier (rtx, HOST_WIDE_INT);
103 static void push_minipool_fix (rtx, HOST_WIDE_INT, rtx *, enum machine_mode,
105 static void arm_reorg (void);
106 static bool note_invalid_constants (rtx, HOST_WIDE_INT, int);
107 static int current_file_function_operand (rtx);
108 static unsigned long arm_compute_save_reg0_reg12_mask (void);
109 static unsigned long arm_compute_save_reg_mask (void);
110 static unsigned long arm_isr_value (tree);
111 static unsigned long arm_compute_func_type (void);
112 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
113 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
114 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
115 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
117 static void arm_output_function_epilogue (FILE *, HOST_WIDE_INT);
118 static void arm_output_function_prologue (FILE *, HOST_WIDE_INT);
119 static void thumb1_output_function_prologue (FILE *, HOST_WIDE_INT);
120 static int arm_comp_type_attributes (tree, tree);
121 static void arm_set_default_type_attributes (tree);
122 static int arm_adjust_cost (rtx, rtx, rtx, int);
123 static int count_insns_for_constant (HOST_WIDE_INT, int);
124 static int arm_get_strip_length (int);
125 static bool arm_function_ok_for_sibcall (tree, tree);
126 static void arm_internal_label (FILE *, const char *, unsigned long);
127 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
129 static int arm_rtx_costs_1 (rtx, enum rtx_code, enum rtx_code);
130 static bool arm_size_rtx_costs (rtx, int, int, int *);
131 static bool arm_slowmul_rtx_costs (rtx, int, int, int *);
132 static bool arm_fastmul_rtx_costs (rtx, int, int, int *);
133 static bool arm_xscale_rtx_costs (rtx, int, int, int *);
134 static bool arm_9e_rtx_costs (rtx, int, int, int *);
135 static int arm_address_cost (rtx);
136 static bool arm_memory_load_p (rtx);
137 static bool arm_cirrus_insn_p (rtx);
138 static void cirrus_reorg (rtx);
139 static void arm_init_builtins (void);
140 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
141 static void arm_init_iwmmxt_builtins (void);
142 static rtx safe_vector_operand (rtx, enum machine_mode);
143 static rtx arm_expand_binop_builtin (enum insn_code, tree, rtx);
144 static rtx arm_expand_unop_builtin (enum insn_code, tree, rtx, int);
145 static rtx arm_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
146 static void emit_constant_insn (rtx cond, rtx pattern);
147 static rtx emit_set_insn (rtx, rtx);
148 static int arm_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode,
151 #ifdef OBJECT_FORMAT_ELF
152 static void arm_elf_asm_constructor (rtx, int);
153 static void arm_elf_asm_destructor (rtx, int);
156 static void arm_encode_section_info (tree, rtx, int);
159 static void arm_file_end (void);
160 static void arm_file_start (void);
163 static void aof_globalize_label (FILE *, const char *);
164 static void aof_dump_imports (FILE *);
165 static void aof_dump_pic_table (FILE *);
166 static void aof_file_start (void);
167 static void aof_file_end (void);
168 static void aof_asm_init_sections (void);
170 static void arm_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
172 static bool arm_pass_by_reference (CUMULATIVE_ARGS *,
173 enum machine_mode, tree, bool);
174 static bool arm_promote_prototypes (tree);
175 static bool arm_default_short_enums (void);
176 static bool arm_align_anon_bitfield (void);
177 static bool arm_return_in_msb (tree);
178 static bool arm_must_pass_in_stack (enum machine_mode, tree);
179 #ifdef TARGET_UNWIND_INFO
180 static void arm_unwind_emit (FILE *, rtx);
181 static bool arm_output_ttype (rtx);
183 static void arm_dwarf_handle_frame_unspec (const char *, rtx, int);
185 static tree arm_cxx_guard_type (void);
186 static bool arm_cxx_guard_mask_bit (void);
187 static tree arm_get_cookie_size (tree);
188 static bool arm_cookie_has_size (void);
189 static bool arm_cxx_cdtor_returns_this (void);
190 static bool arm_cxx_key_method_may_be_inline (void);
191 static void arm_cxx_determine_class_data_visibility (tree);
192 static bool arm_cxx_class_data_always_comdat (void);
193 static bool arm_cxx_use_aeabi_atexit (void);
194 static void arm_init_libfuncs (void);
195 static bool arm_handle_option (size_t, const char *, int);
196 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (enum machine_mode);
197 static bool arm_cannot_copy_insn_p (rtx);
198 static bool arm_tls_symbol_p (rtx x);
201 /* Initialize the GCC target structure. */
202 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
203 #undef TARGET_MERGE_DECL_ATTRIBUTES
204 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
207 #undef TARGET_ATTRIBUTE_TABLE
208 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
210 #undef TARGET_ASM_FILE_START
211 #define TARGET_ASM_FILE_START arm_file_start
212 #undef TARGET_ASM_FILE_END
213 #define TARGET_ASM_FILE_END arm_file_end
216 #undef TARGET_ASM_BYTE_OP
217 #define TARGET_ASM_BYTE_OP "\tDCB\t"
218 #undef TARGET_ASM_ALIGNED_HI_OP
219 #define TARGET_ASM_ALIGNED_HI_OP "\tDCW\t"
220 #undef TARGET_ASM_ALIGNED_SI_OP
221 #define TARGET_ASM_ALIGNED_SI_OP "\tDCD\t"
222 #undef TARGET_ASM_GLOBALIZE_LABEL
223 #define TARGET_ASM_GLOBALIZE_LABEL aof_globalize_label
224 #undef TARGET_ASM_FILE_START
225 #define TARGET_ASM_FILE_START aof_file_start
226 #undef TARGET_ASM_FILE_END
227 #define TARGET_ASM_FILE_END aof_file_end
229 #undef TARGET_ASM_ALIGNED_SI_OP
230 #define TARGET_ASM_ALIGNED_SI_OP NULL
231 #undef TARGET_ASM_INTEGER
232 #define TARGET_ASM_INTEGER arm_assemble_integer
235 #undef TARGET_ASM_FUNCTION_PROLOGUE
236 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
238 #undef TARGET_ASM_FUNCTION_EPILOGUE
239 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
241 #undef TARGET_DEFAULT_TARGET_FLAGS
242 #define TARGET_DEFAULT_TARGET_FLAGS (TARGET_DEFAULT | MASK_SCHED_PROLOG)
243 #undef TARGET_HANDLE_OPTION
244 #define TARGET_HANDLE_OPTION arm_handle_option
246 #undef TARGET_COMP_TYPE_ATTRIBUTES
247 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
249 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
250 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
252 #undef TARGET_SCHED_ADJUST_COST
253 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
255 #undef TARGET_ENCODE_SECTION_INFO
257 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
259 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
262 #undef TARGET_STRIP_NAME_ENCODING
263 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
265 #undef TARGET_ASM_INTERNAL_LABEL
266 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
268 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
269 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
271 #undef TARGET_ASM_OUTPUT_MI_THUNK
272 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
273 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
274 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK default_can_output_mi_thunk_no_vcall
276 /* This will be overridden in arm_override_options. */
277 #undef TARGET_RTX_COSTS
278 #define TARGET_RTX_COSTS arm_slowmul_rtx_costs
279 #undef TARGET_ADDRESS_COST
280 #define TARGET_ADDRESS_COST arm_address_cost
282 #undef TARGET_SHIFT_TRUNCATION_MASK
283 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
284 #undef TARGET_VECTOR_MODE_SUPPORTED_P
285 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
287 #undef TARGET_MACHINE_DEPENDENT_REORG
288 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
290 #undef TARGET_INIT_BUILTINS
291 #define TARGET_INIT_BUILTINS arm_init_builtins
292 #undef TARGET_EXPAND_BUILTIN
293 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
295 #undef TARGET_INIT_LIBFUNCS
296 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
298 #undef TARGET_PROMOTE_FUNCTION_ARGS
299 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
300 #undef TARGET_PROMOTE_FUNCTION_RETURN
301 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
302 #undef TARGET_PROMOTE_PROTOTYPES
303 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
304 #undef TARGET_PASS_BY_REFERENCE
305 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
306 #undef TARGET_ARG_PARTIAL_BYTES
307 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
309 #undef TARGET_SETUP_INCOMING_VARARGS
310 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
312 #undef TARGET_DEFAULT_SHORT_ENUMS
313 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
315 #undef TARGET_ALIGN_ANON_BITFIELD
316 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
318 #undef TARGET_NARROW_VOLATILE_BITFIELD
319 #define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
321 #undef TARGET_CXX_GUARD_TYPE
322 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
324 #undef TARGET_CXX_GUARD_MASK_BIT
325 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
327 #undef TARGET_CXX_GET_COOKIE_SIZE
328 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
330 #undef TARGET_CXX_COOKIE_HAS_SIZE
331 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
333 #undef TARGET_CXX_CDTOR_RETURNS_THIS
334 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
336 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
337 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
339 #undef TARGET_CXX_USE_AEABI_ATEXIT
340 #define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
342 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
343 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
344 arm_cxx_determine_class_data_visibility
346 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
347 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
349 #undef TARGET_RETURN_IN_MSB
350 #define TARGET_RETURN_IN_MSB arm_return_in_msb
352 #undef TARGET_MUST_PASS_IN_STACK
353 #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
355 #ifdef TARGET_UNWIND_INFO
356 #undef TARGET_UNWIND_EMIT
357 #define TARGET_UNWIND_EMIT arm_unwind_emit
359 /* EABI unwinding tables use a different format for the typeinfo tables. */
360 #undef TARGET_ASM_TTYPE
361 #define TARGET_ASM_TTYPE arm_output_ttype
363 #undef TARGET_ARM_EABI_UNWINDER
364 #define TARGET_ARM_EABI_UNWINDER true
365 #endif /* TARGET_UNWIND_INFO */
367 #undef TARGET_DWARF_HANDLE_FRAME_UNSPEC
368 #define TARGET_DWARF_HANDLE_FRAME_UNSPEC arm_dwarf_handle_frame_unspec
370 #undef TARGET_CANNOT_COPY_INSN_P
371 #define TARGET_CANNOT_COPY_INSN_P arm_cannot_copy_insn_p
374 #undef TARGET_HAVE_TLS
375 #define TARGET_HAVE_TLS true
378 #undef TARGET_CANNOT_FORCE_CONST_MEM
379 #define TARGET_CANNOT_FORCE_CONST_MEM arm_tls_referenced_p
381 struct gcc_target targetm = TARGET_INITIALIZER;
383 /* Obstack for minipool constant handling. */
384 static struct obstack minipool_obstack;
385 static char * minipool_startobj;
387 /* The maximum number of insns skipped which
388 will be conditionalised if possible. */
389 static int max_insns_skipped = 5;
391 extern FILE * asm_out_file;
393 /* True if we are currently building a constant table. */
394 int making_const_table;
396 /* Define the information needed to generate branch insns. This is
397 stored from the compare operation. */
398 rtx arm_compare_op0, arm_compare_op1;
400 /* The processor for which instructions should be scheduled. */
401 enum processor_type arm_tune = arm_none;
403 /* The default processor used if not overridden by commandline. */
404 static enum processor_type arm_default_cpu = arm_none;
406 /* Which floating point model to use. */
407 enum arm_fp_model arm_fp_model;
409 /* Which floating point hardware is available. */
410 enum fputype arm_fpu_arch;
412 /* Which floating point hardware to schedule for. */
413 enum fputype arm_fpu_tune;
415 /* Whether to use floating point hardware. */
416 enum float_abi_type arm_float_abi;
418 /* Which ABI to use. */
419 enum arm_abi_type arm_abi;
421 /* Which thread pointer model to use. */
422 enum arm_tp_type target_thread_pointer = TP_AUTO;
424 /* Used to parse -mstructure_size_boundary command line option. */
425 int arm_structure_size_boundary = DEFAULT_STRUCTURE_SIZE_BOUNDARY;
427 /* Used for Thumb call_via trampolines. */
428 rtx thumb_call_via_label[14];
429 static int thumb_call_reg_needed;
431 /* Bit values used to identify processor capabilities. */
432 #define FL_CO_PROC (1 << 0) /* Has external co-processor bus */
433 #define FL_ARCH3M (1 << 1) /* Extended multiply */
434 #define FL_MODE26 (1 << 2) /* 26-bit mode support */
435 #define FL_MODE32 (1 << 3) /* 32-bit mode support */
436 #define FL_ARCH4 (1 << 4) /* Architecture rel 4 */
437 #define FL_ARCH5 (1 << 5) /* Architecture rel 5 */
438 #define FL_THUMB (1 << 6) /* Thumb aware */
439 #define FL_LDSCHED (1 << 7) /* Load scheduling necessary */
440 #define FL_STRONG (1 << 8) /* StrongARM */
441 #define FL_ARCH5E (1 << 9) /* DSP extensions to v5 */
442 #define FL_XSCALE (1 << 10) /* XScale */
443 #define FL_CIRRUS (1 << 11) /* Cirrus/DSP. */
444 #define FL_ARCH6 (1 << 12) /* Architecture rel 6. Adds
445 media instructions. */
446 #define FL_VFPV2 (1 << 13) /* Vector Floating Point V2. */
447 #define FL_WBUF (1 << 14) /* Schedule for write buffer ops.
448 Note: ARM6 & 7 derivatives only. */
449 #define FL_ARCH6K (1 << 15) /* Architecture rel 6 K extensions. */
450 #define FL_THUMB2 (1 << 16) /* Thumb-2. */
451 #define FL_NOTM (1 << 17) /* Instructions not present in the 'M'
453 #define FL_DIV (1 << 18) /* Hardware divide. */
455 #define FL_IWMMXT (1 << 29) /* XScale v2 or "Intel Wireless MMX technology". */
457 #define FL_FOR_ARCH2 FL_NOTM
458 #define FL_FOR_ARCH3 (FL_FOR_ARCH2 | FL_MODE32)
459 #define FL_FOR_ARCH3M (FL_FOR_ARCH3 | FL_ARCH3M)
460 #define FL_FOR_ARCH4 (FL_FOR_ARCH3M | FL_ARCH4)
461 #define FL_FOR_ARCH4T (FL_FOR_ARCH4 | FL_THUMB)
462 #define FL_FOR_ARCH5 (FL_FOR_ARCH4 | FL_ARCH5)
463 #define FL_FOR_ARCH5T (FL_FOR_ARCH5 | FL_THUMB)
464 #define FL_FOR_ARCH5E (FL_FOR_ARCH5 | FL_ARCH5E)
465 #define FL_FOR_ARCH5TE (FL_FOR_ARCH5E | FL_THUMB)
466 #define FL_FOR_ARCH5TEJ FL_FOR_ARCH5TE
467 #define FL_FOR_ARCH6 (FL_FOR_ARCH5TE | FL_ARCH6)
468 #define FL_FOR_ARCH6J FL_FOR_ARCH6
469 #define FL_FOR_ARCH6K (FL_FOR_ARCH6 | FL_ARCH6K)
470 #define FL_FOR_ARCH6Z FL_FOR_ARCH6
471 #define FL_FOR_ARCH6ZK FL_FOR_ARCH6K
472 #define FL_FOR_ARCH6T2 (FL_FOR_ARCH6 | FL_THUMB2)
473 #define FL_FOR_ARCH7 (FL_FOR_ARCH6T2 &~ FL_NOTM)
474 #define FL_FOR_ARCH7A (FL_FOR_ARCH7 | FL_NOTM)
475 #define FL_FOR_ARCH7R (FL_FOR_ARCH7A | FL_DIV)
476 #define FL_FOR_ARCH7M (FL_FOR_ARCH7 | FL_DIV)
478 /* The bits in this mask specify which
479 instructions we are allowed to generate. */
480 static unsigned long insn_flags = 0;
482 /* The bits in this mask specify which instruction scheduling options should
484 static unsigned long tune_flags = 0;
486 /* The following are used in the arm.md file as equivalents to bits
487 in the above two flag variables. */
489 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
492 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
495 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
498 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
501 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
504 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
507 /* Nonzero if this chip supports the ARM 6K extensions. */
510 /* Nonzero if instructions not present in the 'M' profile can be used. */
511 int arm_arch_notm = 0;
513 /* Nonzero if this chip can benefit from load scheduling. */
514 int arm_ld_sched = 0;
516 /* Nonzero if this chip is a StrongARM. */
517 int arm_tune_strongarm = 0;
519 /* Nonzero if this chip is a Cirrus variant. */
520 int arm_arch_cirrus = 0;
522 /* Nonzero if this chip supports Intel Wireless MMX technology. */
523 int arm_arch_iwmmxt = 0;
525 /* Nonzero if this chip is an XScale. */
526 int arm_arch_xscale = 0;
528 /* Nonzero if tuning for XScale */
529 int arm_tune_xscale = 0;
531 /* Nonzero if we want to tune for stores that access the write-buffer.
532 This typically means an ARM6 or ARM7 with MMU or MPU. */
533 int arm_tune_wbuf = 0;
535 /* Nonzero if generating Thumb instructions. */
538 /* Nonzero if we should define __THUMB_INTERWORK__ in the
540 XXX This is a bit of a hack, it's intended to help work around
541 problems in GLD which doesn't understand that armv5t code is
542 interworking clean. */
543 int arm_cpp_interwork = 0;
545 /* Nonzero if chip supports Thumb 2. */
548 /* Nonzero if chip supports integer division instruction. */
551 /* In case of a PRE_INC, POST_INC, PRE_DEC, POST_DEC memory reference, we
552 must report the mode of the memory reference from PRINT_OPERAND to
553 PRINT_OPERAND_ADDRESS. */
554 enum machine_mode output_memory_reference_mode;
556 /* The register number to be used for the PIC offset register. */
557 unsigned arm_pic_register = INVALID_REGNUM;
559 /* Set to 1 when a return insn is output, this means that the epilogue
561 int return_used_this_function;
563 /* Set to 1 after arm_reorg has started. Reset to start at the start of
564 the next function. */
565 static int after_arm_reorg = 0;
567 /* The maximum number of insns to be used when loading a constant. */
568 static int arm_constant_limit = 3;
570 /* For an explanation of these variables, see final_prescan_insn below. */
572 /* arm_current_cc is also used for Thumb-2 cond_exec blocks. */
573 enum arm_cond_code arm_current_cc;
575 int arm_target_label;
576 /* The number of conditionally executed insns, including the current insn. */
577 int arm_condexec_count = 0;
578 /* A bitmask specifying the patterns for the IT block.
579 Zero means do not output an IT block before this insn. */
580 int arm_condexec_mask = 0;
581 /* The number of bits used in arm_condexec_mask. */
582 int arm_condexec_masklen = 0;
584 /* The condition codes of the ARM, and the inverse function. */
585 static const char * const arm_condition_codes[] =
587 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
588 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
591 #define ARM_LSL_NAME (TARGET_UNIFIED_ASM ? "lsl" : "asl")
592 #define streq(string1, string2) (strcmp (string1, string2) == 0)
594 #define THUMB2_WORK_REGS (0xff & ~( (1 << THUMB_HARD_FRAME_POINTER_REGNUM) \
595 | (1 << SP_REGNUM) | (1 << PC_REGNUM) \
596 | (1 << PIC_OFFSET_TABLE_REGNUM)))
598 /* Initialization code. */
602 const char *const name;
603 enum processor_type core;
605 const unsigned long flags;
606 bool (* rtx_costs) (rtx, int, int, int *);
609 /* Not all of these give usefully different compilation alternatives,
610 but there is no simple way of generalizing them. */
611 static const struct processors all_cores[] =
614 #define ARM_CORE(NAME, IDENT, ARCH, FLAGS, COSTS) \
615 {NAME, arm_none, #ARCH, FLAGS | FL_FOR_ARCH##ARCH, arm_##COSTS##_rtx_costs},
616 #include "arm-cores.def"
618 {NULL, arm_none, NULL, 0, NULL}
621 static const struct processors all_architectures[] =
623 /* ARM Architectures */
624 /* We don't specify rtx_costs here as it will be figured out
627 {"armv2", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
628 {"armv2a", arm2, "2", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH2, NULL},
629 {"armv3", arm6, "3", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3, NULL},
630 {"armv3m", arm7m, "3M", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH3M, NULL},
631 {"armv4", arm7tdmi, "4", FL_CO_PROC | FL_MODE26 | FL_FOR_ARCH4, NULL},
632 /* Strictly, FL_MODE26 is a permitted option for v4t, but there are no
633 implementations that support it, so we will leave it out for now. */
634 {"armv4t", arm7tdmi, "4T", FL_CO_PROC | FL_FOR_ARCH4T, NULL},
635 {"armv5", arm10tdmi, "5", FL_CO_PROC | FL_FOR_ARCH5, NULL},
636 {"armv5t", arm10tdmi, "5T", FL_CO_PROC | FL_FOR_ARCH5T, NULL},
637 {"armv5e", arm1026ejs, "5E", FL_CO_PROC | FL_FOR_ARCH5E, NULL},
638 {"armv5te", arm1026ejs, "5TE", FL_CO_PROC | FL_FOR_ARCH5TE, NULL},
639 {"armv6", arm1136js, "6", FL_CO_PROC | FL_FOR_ARCH6, NULL},
640 {"armv6j", arm1136js, "6J", FL_CO_PROC | FL_FOR_ARCH6J, NULL},
641 {"armv6k", mpcore, "6K", FL_CO_PROC | FL_FOR_ARCH6K, NULL},
642 {"armv6z", arm1176jzs, "6Z", FL_CO_PROC | FL_FOR_ARCH6Z, NULL},
643 {"armv6zk", arm1176jzs, "6ZK", FL_CO_PROC | FL_FOR_ARCH6ZK, NULL},
644 {"armv6t2", arm1156t2s, "6T2", FL_CO_PROC | FL_FOR_ARCH6T2, NULL},
645 {"armv7", cortexa8, "7", FL_CO_PROC | FL_FOR_ARCH7, NULL},
646 {"armv7-a", cortexa8, "7A", FL_CO_PROC | FL_FOR_ARCH7A, NULL},
647 {"armv7-r", cortexr4, "7R", FL_CO_PROC | FL_FOR_ARCH7R, NULL},
648 {"armv7-m", cortexm3, "7M", FL_CO_PROC | FL_FOR_ARCH7M, NULL},
649 {"ep9312", ep9312, "4T", FL_LDSCHED | FL_CIRRUS | FL_FOR_ARCH4, NULL},
650 {"iwmmxt", iwmmxt, "5TE", FL_LDSCHED | FL_STRONG | FL_FOR_ARCH5TE | FL_XSCALE | FL_IWMMXT , NULL},
651 {NULL, arm_none, NULL, 0 , NULL}
654 struct arm_cpu_select
658 const struct processors * processors;
661 /* This is a magic structure. The 'string' field is magically filled in
662 with a pointer to the value specified by the user on the command line
663 assuming that the user has specified such a value. */
665 static struct arm_cpu_select arm_select[] =
667 /* string name processors */
668 { NULL, "-mcpu=", all_cores },
669 { NULL, "-march=", all_architectures },
670 { NULL, "-mtune=", all_cores }
673 /* Defines representing the indexes into the above table. */
674 #define ARM_OPT_SET_CPU 0
675 #define ARM_OPT_SET_ARCH 1
676 #define ARM_OPT_SET_TUNE 2
678 /* The name of the preprocessor macro to define for this architecture. */
680 char arm_arch_name[] = "__ARM_ARCH_0UNK__";
689 /* Available values for -mfpu=. */
691 static const struct fpu_desc all_fpus[] =
693 {"fpa", FPUTYPE_FPA},
694 {"fpe2", FPUTYPE_FPA_EMU2},
695 {"fpe3", FPUTYPE_FPA_EMU2},
696 {"maverick", FPUTYPE_MAVERICK},
701 /* Floating point models used by the different hardware.
702 See fputype in arm.h. */
704 static const enum fputype fp_model_for_fpu[] =
706 /* No FP hardware. */
707 ARM_FP_MODEL_UNKNOWN, /* FPUTYPE_NONE */
708 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA */
709 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU2 */
710 ARM_FP_MODEL_FPA, /* FPUTYPE_FPA_EMU3 */
711 ARM_FP_MODEL_MAVERICK, /* FPUTYPE_MAVERICK */
712 ARM_FP_MODEL_VFP /* FPUTYPE_VFP */
719 enum float_abi_type abi_type;
723 /* Available values for -mfloat-abi=. */
725 static const struct float_abi all_float_abis[] =
727 {"soft", ARM_FLOAT_ABI_SOFT},
728 {"softfp", ARM_FLOAT_ABI_SOFTFP},
729 {"hard", ARM_FLOAT_ABI_HARD}
736 enum arm_abi_type abi_type;
740 /* Available values for -mabi=. */
742 static const struct abi_name arm_all_abis[] =
744 {"apcs-gnu", ARM_ABI_APCS},
745 {"atpcs", ARM_ABI_ATPCS},
746 {"aapcs", ARM_ABI_AAPCS},
747 {"iwmmxt", ARM_ABI_IWMMXT},
748 {"aapcs-linux", ARM_ABI_AAPCS_LINUX}
751 /* Supported TLS relocations. */
761 /* Emit an insn that's a simple single-set. Both the operands must be known
764 emit_set_insn (rtx x, rtx y)
766 return emit_insn (gen_rtx_SET (VOIDmode, x, y));
769 /* Return the number of bits set in VALUE. */
771 bit_count (unsigned long value)
773 unsigned long count = 0;
778 value &= value - 1; /* Clear the least-significant set bit. */
784 /* Set up library functions unique to ARM. */
787 arm_init_libfuncs (void)
789 /* There are no special library functions unless we are using the
794 /* The functions below are described in Section 4 of the "Run-Time
795 ABI for the ARM architecture", Version 1.0. */
797 /* Double-precision floating-point arithmetic. Table 2. */
798 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
799 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
800 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
801 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
802 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
804 /* Double-precision comparisons. Table 3. */
805 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
806 set_optab_libfunc (ne_optab, DFmode, NULL);
807 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
808 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
809 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
810 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
811 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
813 /* Single-precision floating-point arithmetic. Table 4. */
814 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
815 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
816 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
817 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
818 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
820 /* Single-precision comparisons. Table 5. */
821 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
822 set_optab_libfunc (ne_optab, SFmode, NULL);
823 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
824 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
825 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
826 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
827 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
829 /* Floating-point to integer conversions. Table 6. */
830 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
831 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
832 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
833 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
834 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
835 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
836 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
837 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
839 /* Conversions between floating types. Table 7. */
840 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
841 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
843 /* Integer to floating-point conversions. Table 8. */
844 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
845 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
846 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
847 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
848 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
849 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
850 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
851 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
853 /* Long long. Table 9. */
854 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
855 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
856 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
857 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
858 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
859 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
860 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
861 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
863 /* Integer (32/32->32) division. \S 4.3.1. */
864 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
865 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
867 /* The divmod functions are designed so that they can be used for
868 plain division, even though they return both the quotient and the
869 remainder. The quotient is returned in the usual location (i.e.,
870 r0 for SImode, {r0, r1} for DImode), just as would be expected
871 for an ordinary division routine. Because the AAPCS calling
872 conventions specify that all of { r0, r1, r2, r3 } are
873 callee-saved registers, there is no need to tell the compiler
874 explicitly that those registers are clobbered by these
876 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
877 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
879 /* For SImode division the ABI provides div-without-mod routines,
881 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idiv");
882 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidiv");
884 /* We don't have mod libcalls. Fortunately gcc knows how to use the
885 divmod libcalls instead. */
886 set_optab_libfunc (smod_optab, DImode, NULL);
887 set_optab_libfunc (umod_optab, DImode, NULL);
888 set_optab_libfunc (smod_optab, SImode, NULL);
889 set_optab_libfunc (umod_optab, SImode, NULL);
892 /* Implement TARGET_HANDLE_OPTION. */
895 arm_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
900 arm_select[1].string = arg;
904 arm_select[0].string = arg;
907 case OPT_mhard_float:
908 target_float_abi_name = "hard";
911 case OPT_msoft_float:
912 target_float_abi_name = "soft";
916 arm_select[2].string = arg;
924 /* Fix up any incompatible options that the user has specified.
925 This has now turned into a maze. */
927 arm_override_options (void)
930 enum processor_type target_arch_cpu = arm_none;
932 /* Set up the flags based on the cpu/architecture selected by the user. */
933 for (i = ARRAY_SIZE (arm_select); i--;)
935 struct arm_cpu_select * ptr = arm_select + i;
937 if (ptr->string != NULL && ptr->string[0] != '\0')
939 const struct processors * sel;
941 for (sel = ptr->processors; sel->name != NULL; sel++)
942 if (streq (ptr->string, sel->name))
944 /* Set the architecture define. */
945 if (i != ARM_OPT_SET_TUNE)
946 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
948 /* Determine the processor core for which we should
949 tune code-generation. */
950 if (/* -mcpu= is a sensible default. */
952 /* -mtune= overrides -mcpu= and -march=. */
953 || i == ARM_OPT_SET_TUNE)
954 arm_tune = (enum processor_type) (sel - ptr->processors);
956 /* Remember the CPU associated with this architecture.
957 If no other option is used to set the CPU type,
958 we'll use this to guess the most suitable tuning
960 if (i == ARM_OPT_SET_ARCH)
961 target_arch_cpu = sel->core;
963 if (i != ARM_OPT_SET_TUNE)
965 /* If we have been given an architecture and a processor
966 make sure that they are compatible. We only generate
967 a warning though, and we prefer the CPU over the
969 if (insn_flags != 0 && (insn_flags ^ sel->flags))
970 warning (0, "switch -mcpu=%s conflicts with -march= switch",
973 insn_flags = sel->flags;
979 if (sel->name == NULL)
980 error ("bad value (%s) for %s switch", ptr->string, ptr->name);
984 /* Guess the tuning options from the architecture if necessary. */
985 if (arm_tune == arm_none)
986 arm_tune = target_arch_cpu;
988 /* If the user did not specify a processor, choose one for them. */
991 const struct processors * sel;
993 enum processor_type cpu;
995 cpu = TARGET_CPU_DEFAULT;
998 #ifdef SUBTARGET_CPU_DEFAULT
999 /* Use the subtarget default CPU if none was specified by
1001 cpu = SUBTARGET_CPU_DEFAULT;
1003 /* Default to ARM6. */
1004 if (cpu == arm_none)
1007 sel = &all_cores[cpu];
1009 insn_flags = sel->flags;
1011 /* Now check to see if the user has specified some command line
1012 switch that require certain abilities from the cpu. */
1015 if (TARGET_INTERWORK || TARGET_THUMB)
1017 sought |= (FL_THUMB | FL_MODE32);
1019 /* There are no ARM processors that support both APCS-26 and
1020 interworking. Therefore we force FL_MODE26 to be removed
1021 from insn_flags here (if it was set), so that the search
1022 below will always be able to find a compatible processor. */
1023 insn_flags &= ~FL_MODE26;
1026 if (sought != 0 && ((sought & insn_flags) != sought))
1028 /* Try to locate a CPU type that supports all of the abilities
1029 of the default CPU, plus the extra abilities requested by
1031 for (sel = all_cores; sel->name != NULL; sel++)
1032 if ((sel->flags & sought) == (sought | insn_flags))
1035 if (sel->name == NULL)
1037 unsigned current_bit_count = 0;
1038 const struct processors * best_fit = NULL;
1040 /* Ideally we would like to issue an error message here
1041 saying that it was not possible to find a CPU compatible
1042 with the default CPU, but which also supports the command
1043 line options specified by the programmer, and so they
1044 ought to use the -mcpu=<name> command line option to
1045 override the default CPU type.
1047 If we cannot find a cpu that has both the
1048 characteristics of the default cpu and the given
1049 command line options we scan the array again looking
1050 for a best match. */
1051 for (sel = all_cores; sel->name != NULL; sel++)
1052 if ((sel->flags & sought) == sought)
1056 count = bit_count (sel->flags & insn_flags);
1058 if (count >= current_bit_count)
1061 current_bit_count = count;
1065 gcc_assert (best_fit);
1069 insn_flags = sel->flags;
1071 sprintf (arm_arch_name, "__ARM_ARCH_%s__", sel->arch);
1072 arm_default_cpu = (enum processor_type) (sel - all_cores);
1073 if (arm_tune == arm_none)
1074 arm_tune = arm_default_cpu;
1077 /* The processor for which we should tune should now have been
1079 gcc_assert (arm_tune != arm_none);
1081 tune_flags = all_cores[(int)arm_tune].flags;
1083 targetm.rtx_costs = arm_size_rtx_costs;
1085 targetm.rtx_costs = all_cores[(int)arm_tune].rtx_costs;
1087 /* Make sure that the processor choice does not conflict with any of the
1088 other command line choices. */
1089 if (TARGET_ARM && !(insn_flags & FL_NOTM))
1090 error ("target CPU does not support ARM mode");
1092 if (TARGET_INTERWORK && !(insn_flags & FL_THUMB))
1094 warning (0, "target CPU does not support interworking" );
1095 target_flags &= ~MASK_INTERWORK;
1098 if (TARGET_THUMB && !(insn_flags & FL_THUMB))
1100 warning (0, "target CPU does not support THUMB instructions");
1101 target_flags &= ~MASK_THUMB;
1104 if (TARGET_APCS_FRAME && TARGET_THUMB)
1106 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
1107 target_flags &= ~MASK_APCS_FRAME;
1110 /* Callee super interworking implies thumb interworking. Adding
1111 this to the flags here simplifies the logic elsewhere. */
1112 if (TARGET_THUMB && TARGET_CALLEE_INTERWORKING)
1113 target_flags |= MASK_INTERWORK;
1115 /* TARGET_BACKTRACE calls leaf_function_p, which causes a crash if done
1116 from here where no function is being compiled currently. */
1117 if ((TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME) && TARGET_ARM)
1118 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
1120 if (TARGET_ARM && TARGET_CALLEE_INTERWORKING)
1121 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
1123 if (TARGET_ARM && TARGET_CALLER_INTERWORKING)
1124 warning (0, "enabling caller interworking support is only meaningful when compiling for the Thumb");
1126 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
1128 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
1129 target_flags |= MASK_APCS_FRAME;
1132 if (TARGET_POKE_FUNCTION_NAME)
1133 target_flags |= MASK_APCS_FRAME;
1135 if (TARGET_APCS_REENT && flag_pic)
1136 error ("-fpic and -mapcs-reent are incompatible");
1138 if (TARGET_APCS_REENT)
1139 warning (0, "APCS reentrant code not supported. Ignored");
1141 /* If this target is normally configured to use APCS frames, warn if they
1142 are turned off and debugging is turned on. */
1144 && write_symbols != NO_DEBUG
1145 && !TARGET_APCS_FRAME
1146 && (TARGET_DEFAULT & MASK_APCS_FRAME))
1147 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
1149 if (TARGET_APCS_FLOAT)
1150 warning (0, "passing floating point arguments in fp regs not yet supported");
1152 /* Initialize boolean versions of the flags, for use in the arm.md file. */
1153 arm_arch3m = (insn_flags & FL_ARCH3M) != 0;
1154 arm_arch4 = (insn_flags & FL_ARCH4) != 0;
1155 arm_arch4t = arm_arch4 & ((insn_flags & FL_THUMB) != 0);
1156 arm_arch5 = (insn_flags & FL_ARCH5) != 0;
1157 arm_arch5e = (insn_flags & FL_ARCH5E) != 0;
1158 arm_arch6 = (insn_flags & FL_ARCH6) != 0;
1159 arm_arch6k = (insn_flags & FL_ARCH6K) != 0;
1160 arm_arch_notm = (insn_flags & FL_NOTM) != 0;
1161 arm_arch_thumb2 = (insn_flags & FL_THUMB2) != 0;
1162 arm_arch_xscale = (insn_flags & FL_XSCALE) != 0;
1163 arm_arch_cirrus = (insn_flags & FL_CIRRUS) != 0;
1165 arm_ld_sched = (tune_flags & FL_LDSCHED) != 0;
1166 arm_tune_strongarm = (tune_flags & FL_STRONG) != 0;
1167 thumb_code = (TARGET_ARM == 0);
1168 arm_tune_wbuf = (tune_flags & FL_WBUF) != 0;
1169 arm_tune_xscale = (tune_flags & FL_XSCALE) != 0;
1170 arm_arch_iwmmxt = (insn_flags & FL_IWMMXT) != 0;
1171 arm_arch_hwdiv = (insn_flags & FL_DIV) != 0;
1173 /* V5 code we generate is completely interworking capable, so we turn off
1174 TARGET_INTERWORK here to avoid many tests later on. */
1176 /* XXX However, we must pass the right pre-processor defines to CPP
1177 or GLD can get confused. This is a hack. */
1178 if (TARGET_INTERWORK)
1179 arm_cpp_interwork = 1;
1182 target_flags &= ~MASK_INTERWORK;
1184 if (target_abi_name)
1186 for (i = 0; i < ARRAY_SIZE (arm_all_abis); i++)
1188 if (streq (arm_all_abis[i].name, target_abi_name))
1190 arm_abi = arm_all_abis[i].abi_type;
1194 if (i == ARRAY_SIZE (arm_all_abis))
1195 error ("invalid ABI option: -mabi=%s", target_abi_name);
1198 arm_abi = ARM_DEFAULT_ABI;
1200 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
1201 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
1203 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
1204 error ("iwmmxt abi requires an iwmmxt capable cpu");
1206 arm_fp_model = ARM_FP_MODEL_UNKNOWN;
1207 if (target_fpu_name == NULL && target_fpe_name != NULL)
1209 if (streq (target_fpe_name, "2"))
1210 target_fpu_name = "fpe2";
1211 else if (streq (target_fpe_name, "3"))
1212 target_fpu_name = "fpe3";
1214 error ("invalid floating point emulation option: -mfpe=%s",
1217 if (target_fpu_name != NULL)
1219 /* The user specified a FPU. */
1220 for (i = 0; i < ARRAY_SIZE (all_fpus); i++)
1222 if (streq (all_fpus[i].name, target_fpu_name))
1224 arm_fpu_arch = all_fpus[i].fpu;
1225 arm_fpu_tune = arm_fpu_arch;
1226 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1230 if (arm_fp_model == ARM_FP_MODEL_UNKNOWN)
1231 error ("invalid floating point option: -mfpu=%s", target_fpu_name);
1235 #ifdef FPUTYPE_DEFAULT
1236 /* Use the default if it is specified for this platform. */
1237 arm_fpu_arch = FPUTYPE_DEFAULT;
1238 arm_fpu_tune = FPUTYPE_DEFAULT;
1240 /* Pick one based on CPU type. */
1241 /* ??? Some targets assume FPA is the default.
1242 if ((insn_flags & FL_VFP) != 0)
1243 arm_fpu_arch = FPUTYPE_VFP;
1246 if (arm_arch_cirrus)
1247 arm_fpu_arch = FPUTYPE_MAVERICK;
1249 arm_fpu_arch = FPUTYPE_FPA_EMU2;
1251 if (tune_flags & FL_CO_PROC && arm_fpu_arch == FPUTYPE_FPA_EMU2)
1252 arm_fpu_tune = FPUTYPE_FPA;
1254 arm_fpu_tune = arm_fpu_arch;
1255 arm_fp_model = fp_model_for_fpu[arm_fpu_arch];
1256 gcc_assert (arm_fp_model != ARM_FP_MODEL_UNKNOWN);
1259 if (target_float_abi_name != NULL)
1261 /* The user specified a FP ABI. */
1262 for (i = 0; i < ARRAY_SIZE (all_float_abis); i++)
1264 if (streq (all_float_abis[i].name, target_float_abi_name))
1266 arm_float_abi = all_float_abis[i].abi_type;
1270 if (i == ARRAY_SIZE (all_float_abis))
1271 error ("invalid floating point abi: -mfloat-abi=%s",
1272 target_float_abi_name);
1275 arm_float_abi = TARGET_DEFAULT_FLOAT_ABI;
1277 if (arm_float_abi == ARM_FLOAT_ABI_HARD && TARGET_VFP)
1278 sorry ("-mfloat-abi=hard and VFP");
1280 /* FPA and iWMMXt are incompatible because the insn encodings overlap.
1281 VFP and iWMMXt can theoretically coexist, but it's unlikely such silicon
1282 will ever exist. GCC makes no attempt to support this combination. */
1283 if (TARGET_IWMMXT && !TARGET_SOFT_FLOAT)
1284 sorry ("iWMMXt and hardware floating point");
1286 /* ??? iWMMXt insn patterns need auditing for Thumb-2. */
1287 if (TARGET_THUMB2 && TARGET_IWMMXT)
1288 sorry ("Thumb-2 iWMMXt");
1290 /* If soft-float is specified then don't use FPU. */
1291 if (TARGET_SOFT_FLOAT)
1292 arm_fpu_arch = FPUTYPE_NONE;
1294 /* For arm2/3 there is no need to do any scheduling if there is only
1295 a floating point emulator, or we are doing software floating-point. */
1296 if ((TARGET_SOFT_FLOAT
1297 || arm_fpu_tune == FPUTYPE_FPA_EMU2
1298 || arm_fpu_tune == FPUTYPE_FPA_EMU3)
1299 && (tune_flags & FL_MODE32) == 0)
1300 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
1302 if (target_thread_switch)
1304 if (strcmp (target_thread_switch, "soft") == 0)
1305 target_thread_pointer = TP_SOFT;
1306 else if (strcmp (target_thread_switch, "auto") == 0)
1307 target_thread_pointer = TP_AUTO;
1308 else if (strcmp (target_thread_switch, "cp15") == 0)
1309 target_thread_pointer = TP_CP15;
1311 error ("invalid thread pointer option: -mtp=%s", target_thread_switch);
1314 /* Use the cp15 method if it is available. */
1315 if (target_thread_pointer == TP_AUTO)
1317 if (arm_arch6k && !TARGET_THUMB)
1318 target_thread_pointer = TP_CP15;
1320 target_thread_pointer = TP_SOFT;
1323 if (TARGET_HARD_TP && TARGET_THUMB1)
1324 error ("can not use -mtp=cp15 with 16-bit Thumb");
1326 /* Override the default structure alignment for AAPCS ABI. */
1327 if (TARGET_AAPCS_BASED)
1328 arm_structure_size_boundary = 8;
1330 if (structure_size_string != NULL)
1332 int size = strtol (structure_size_string, NULL, 0);
1334 if (size == 8 || size == 32
1335 || (ARM_DOUBLEWORD_ALIGN && size == 64))
1336 arm_structure_size_boundary = size;
1338 warning (0, "structure size boundary can only be set to %s",
1339 ARM_DOUBLEWORD_ALIGN ? "8, 32 or 64": "8 or 32");
1342 /* If stack checking is disabled, we can use r10 as the PIC register,
1343 which keeps r9 available. The EABI specifies r9 as the PIC register. */
1344 if (flag_pic && TARGET_SINGLE_PIC_BASE)
1345 arm_pic_register = (TARGET_APCS_STACK || TARGET_AAPCS_BASED) ? 9 : 10;
1347 if (arm_pic_register_string != NULL)
1349 int pic_register = decode_reg_name (arm_pic_register_string);
1352 warning (0, "-mpic-register= is useless without -fpic");
1354 /* Prevent the user from choosing an obviously stupid PIC register. */
1355 else if (pic_register < 0 || call_used_regs[pic_register]
1356 || pic_register == HARD_FRAME_POINTER_REGNUM
1357 || pic_register == STACK_POINTER_REGNUM
1358 || pic_register >= PC_REGNUM)
1359 error ("unable to use '%s' for PIC register", arm_pic_register_string);
1361 arm_pic_register = pic_register;
1364 /* ??? We might want scheduling for thumb2. */
1365 if (TARGET_THUMB && flag_schedule_insns)
1367 /* Don't warn since it's on by default in -O2. */
1368 flag_schedule_insns = 0;
1373 arm_constant_limit = 1;
1375 /* If optimizing for size, bump the number of instructions that we
1376 are prepared to conditionally execute (even on a StrongARM). */
1377 max_insns_skipped = 6;
1381 /* For processors with load scheduling, it never costs more than
1382 2 cycles to load a constant, and the load scheduler may well
1383 reduce that to 1. */
1385 arm_constant_limit = 1;
1387 /* On XScale the longer latency of a load makes it more difficult
1388 to achieve a good schedule, so it's faster to synthesize
1389 constants that can be done in two insns. */
1390 if (arm_tune_xscale)
1391 arm_constant_limit = 2;
1393 /* StrongARM has early execution of branches, so a sequence
1394 that is worth skipping is shorter. */
1395 if (arm_tune_strongarm)
1396 max_insns_skipped = 3;
1399 /* Register global variables with the garbage collector. */
1400 arm_add_gc_roots ();
1404 arm_add_gc_roots (void)
1406 gcc_obstack_init(&minipool_obstack);
1407 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
1410 /* A table of known ARM exception types.
1411 For use with the interrupt function attribute. */
1415 const char *const arg;
1416 const unsigned long return_value;
1420 static const isr_attribute_arg isr_attribute_args [] =
1422 { "IRQ", ARM_FT_ISR },
1423 { "irq", ARM_FT_ISR },
1424 { "FIQ", ARM_FT_FIQ },
1425 { "fiq", ARM_FT_FIQ },
1426 { "ABORT", ARM_FT_ISR },
1427 { "abort", ARM_FT_ISR },
1428 { "ABORT", ARM_FT_ISR },
1429 { "abort", ARM_FT_ISR },
1430 { "UNDEF", ARM_FT_EXCEPTION },
1431 { "undef", ARM_FT_EXCEPTION },
1432 { "SWI", ARM_FT_EXCEPTION },
1433 { "swi", ARM_FT_EXCEPTION },
1434 { NULL, ARM_FT_NORMAL }
1437 /* Returns the (interrupt) function type of the current
1438 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
1440 static unsigned long
1441 arm_isr_value (tree argument)
1443 const isr_attribute_arg * ptr;
1447 return ARM_FT_NORMAL | ARM_FT_STACKALIGN;
1449 /* No argument - default to IRQ. */
1450 if (argument == NULL_TREE)
1453 /* Get the value of the argument. */
1454 if (TREE_VALUE (argument) == NULL_TREE
1455 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
1456 return ARM_FT_UNKNOWN;
1458 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
1460 /* Check it against the list of known arguments. */
1461 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
1462 if (streq (arg, ptr->arg))
1463 return ptr->return_value;
1465 /* An unrecognized interrupt type. */
1466 return ARM_FT_UNKNOWN;
1469 /* Computes the type of the current function. */
1471 static unsigned long
1472 arm_compute_func_type (void)
1474 unsigned long type = ARM_FT_UNKNOWN;
1478 gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
1480 /* Decide if the current function is volatile. Such functions
1481 never return, and many memory cycles can be saved by not storing
1482 register values that will never be needed again. This optimization
1483 was added to speed up context switching in a kernel application. */
1485 && (TREE_NOTHROW (current_function_decl)
1486 || !(flag_unwind_tables
1487 || (flag_exceptions && !USING_SJLJ_EXCEPTIONS)))
1488 && TREE_THIS_VOLATILE (current_function_decl))
1489 type |= ARM_FT_VOLATILE;
1491 if (cfun->static_chain_decl != NULL)
1492 type |= ARM_FT_NESTED;
1494 attr = DECL_ATTRIBUTES (current_function_decl);
1496 a = lookup_attribute ("naked", attr);
1498 type |= ARM_FT_NAKED;
1500 a = lookup_attribute ("isr", attr);
1502 a = lookup_attribute ("interrupt", attr);
1505 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
1507 type |= arm_isr_value (TREE_VALUE (a));
1512 /* Returns the type of the current function. */
1515 arm_current_func_type (void)
1517 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
1518 cfun->machine->func_type = arm_compute_func_type ();
1520 return cfun->machine->func_type;
1523 /* Return 1 if it is possible to return using a single instruction.
1524 If SIBLING is non-null, this is a test for a return before a sibling
1525 call. SIBLING is the call insn, so we can examine its register usage. */
1528 use_return_insn (int iscond, rtx sibling)
1531 unsigned int func_type;
1532 unsigned long saved_int_regs;
1533 unsigned HOST_WIDE_INT stack_adjust;
1534 arm_stack_offsets *offsets;
1536 /* Never use a return instruction before reload has run. */
1537 if (!reload_completed)
1540 func_type = arm_current_func_type ();
1542 /* Naked, volatile and stack alignment functions need special
1544 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED | ARM_FT_STACKALIGN))
1547 /* So do interrupt functions that use the frame pointer and Thumb
1548 interrupt functions. */
1549 if (IS_INTERRUPT (func_type) && (frame_pointer_needed || TARGET_THUMB))
1552 offsets = arm_get_frame_offsets ();
1553 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
1555 /* As do variadic functions. */
1556 if (current_function_pretend_args_size
1557 || cfun->machine->uses_anonymous_args
1558 /* Or if the function calls __builtin_eh_return () */
1559 || current_function_calls_eh_return
1560 /* Or if the function calls alloca */
1561 || current_function_calls_alloca
1562 /* Or if there is a stack adjustment. However, if the stack pointer
1563 is saved on the stack, we can use a pre-incrementing stack load. */
1564 || !(stack_adjust == 0 || (frame_pointer_needed && stack_adjust == 4)))
1567 saved_int_regs = arm_compute_save_reg_mask ();
1569 /* Unfortunately, the insn
1571 ldmib sp, {..., sp, ...}
1573 triggers a bug on most SA-110 based devices, such that the stack
1574 pointer won't be correctly restored if the instruction takes a
1575 page fault. We work around this problem by popping r3 along with
1576 the other registers, since that is never slower than executing
1577 another instruction.
1579 We test for !arm_arch5 here, because code for any architecture
1580 less than this could potentially be run on one of the buggy
1582 if (stack_adjust == 4 && !arm_arch5 && TARGET_ARM)
1584 /* Validate that r3 is a call-clobbered register (always true in
1585 the default abi) ... */
1586 if (!call_used_regs[3])
1589 /* ... that it isn't being used for a return value ... */
1590 if (arm_size_return_regs () >= (4 * UNITS_PER_WORD))
1593 /* ... or for a tail-call argument ... */
1596 gcc_assert (GET_CODE (sibling) == CALL_INSN);
1598 if (find_regno_fusage (sibling, USE, 3))
1602 /* ... and that there are no call-saved registers in r0-r2
1603 (always true in the default ABI). */
1604 if (saved_int_regs & 0x7)
1608 /* Can't be done if interworking with Thumb, and any registers have been
1610 if (TARGET_INTERWORK && saved_int_regs != 0 && !IS_INTERRUPT(func_type))
1613 /* On StrongARM, conditional returns are expensive if they aren't
1614 taken and multiple registers have been stacked. */
1615 if (iscond && arm_tune_strongarm)
1617 /* Conditional return when just the LR is stored is a simple
1618 conditional-load instruction, that's not expensive. */
1619 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
1623 && arm_pic_register != INVALID_REGNUM
1624 && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
1628 /* If there are saved registers but the LR isn't saved, then we need
1629 two instructions for the return. */
1630 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
1633 /* Can't be done if any of the FPA regs are pushed,
1634 since this also requires an insn. */
1635 if (TARGET_HARD_FLOAT && TARGET_FPA)
1636 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
1637 if (regs_ever_live[regno] && !call_used_regs[regno])
1640 /* Likewise VFP regs. */
1641 if (TARGET_HARD_FLOAT && TARGET_VFP)
1642 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
1643 if (regs_ever_live[regno] && !call_used_regs[regno])
1646 if (TARGET_REALLY_IWMMXT)
1647 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
1648 if (regs_ever_live[regno] && ! call_used_regs [regno])
1654 /* Return TRUE if int I is a valid immediate ARM constant. */
1657 const_ok_for_arm (HOST_WIDE_INT i)
1661 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
1662 be all zero, or all one. */
1663 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
1664 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
1665 != ((~(unsigned HOST_WIDE_INT) 0)
1666 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
1669 i &= (unsigned HOST_WIDE_INT) 0xffffffff;
1671 /* Fast return for 0 and small values. We must do this for zero, since
1672 the code below can't handle that one case. */
1673 if ((i & ~(unsigned HOST_WIDE_INT) 0xff) == 0)
1676 /* Get the number of trailing zeros. */
1677 lowbit = ffs((int) i) - 1;
1679 /* Only even shifts are allowed in ARM mode so round down to the
1680 nearest even number. */
1684 if ((i & ~(((unsigned HOST_WIDE_INT) 0xff) << lowbit)) == 0)
1689 /* Allow rotated constants in ARM mode. */
1691 && ((i & ~0xc000003f) == 0
1692 || (i & ~0xf000000f) == 0
1693 || (i & ~0xfc000003) == 0))
1700 /* Allow repeated pattern. */
1703 if (i == v || i == (v | (v << 8)))
1710 /* Return true if I is a valid constant for the operation CODE. */
1712 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
1714 if (const_ok_for_arm (i))
1720 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
1722 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
1728 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
1735 /* Emit a sequence of insns to handle a large constant.
1736 CODE is the code of the operation required, it can be any of SET, PLUS,
1737 IOR, AND, XOR, MINUS;
1738 MODE is the mode in which the operation is being performed;
1739 VAL is the integer to operate on;
1740 SOURCE is the other operand (a register, or a null-pointer for SET);
1741 SUBTARGETS means it is safe to create scratch registers if that will
1742 either produce a simpler sequence, or we will want to cse the values.
1743 Return value is the number of insns emitted. */
1745 /* ??? Tweak this for thumb2. */
1747 arm_split_constant (enum rtx_code code, enum machine_mode mode, rtx insn,
1748 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
1752 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
1753 cond = COND_EXEC_TEST (PATTERN (insn));
1757 if (subtargets || code == SET
1758 || (GET_CODE (target) == REG && GET_CODE (source) == REG
1759 && REGNO (target) != REGNO (source)))
1761 /* After arm_reorg has been called, we can't fix up expensive
1762 constants by pushing them into memory so we must synthesize
1763 them in-line, regardless of the cost. This is only likely to
1764 be more costly on chips that have load delay slots and we are
1765 compiling without running the scheduler (so no splitting
1766 occurred before the final instruction emission).
1768 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
1770 if (!after_arm_reorg
1772 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
1774 > arm_constant_limit + (code != SET)))
1778 /* Currently SET is the only monadic value for CODE, all
1779 the rest are diadic. */
1780 emit_set_insn (target, GEN_INT (val));
1785 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
1787 emit_set_insn (temp, GEN_INT (val));
1788 /* For MINUS, the value is subtracted from, since we never
1789 have subtraction of a constant. */
1791 emit_set_insn (target, gen_rtx_MINUS (mode, temp, source));
1793 emit_set_insn (target,
1794 gen_rtx_fmt_ee (code, mode, source, temp));
1800 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
1804 /* Return the number of ARM instructions required to synthesize the given
1807 count_insns_for_constant (HOST_WIDE_INT remainder, int i)
1809 HOST_WIDE_INT temp1;
1817 if (remainder & (3 << (i - 2)))
1822 temp1 = remainder & ((0x0ff << end)
1823 | ((i < end) ? (0xff >> (32 - end)) : 0));
1824 remainder &= ~temp1;
1829 } while (remainder);
1833 /* Emit an instruction with the indicated PATTERN. If COND is
1834 non-NULL, conditionalize the execution of the instruction on COND
1838 emit_constant_insn (rtx cond, rtx pattern)
1841 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
1842 emit_insn (pattern);
1845 /* As above, but extra parameter GENERATE which, if clear, suppresses
1847 /* ??? This needs more work for thumb2. */
1850 arm_gen_constant (enum rtx_code code, enum machine_mode mode, rtx cond,
1851 HOST_WIDE_INT val, rtx target, rtx source, int subtargets,
1856 int can_negate_initial = 0;
1859 int num_bits_set = 0;
1860 int set_sign_bit_copies = 0;
1861 int clear_sign_bit_copies = 0;
1862 int clear_zero_bit_copies = 0;
1863 int set_zero_bit_copies = 0;
1865 unsigned HOST_WIDE_INT temp1, temp2;
1866 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
1868 /* Find out which operations are safe for a given CODE. Also do a quick
1869 check for degenerate cases; these can occur when DImode operations
1881 can_negate_initial = 1;
1885 if (remainder == 0xffffffff)
1888 emit_constant_insn (cond,
1889 gen_rtx_SET (VOIDmode, target,
1890 GEN_INT (ARM_SIGN_EXTEND (val))));
1895 if (reload_completed && rtx_equal_p (target, source))
1898 emit_constant_insn (cond,
1899 gen_rtx_SET (VOIDmode, target, source));
1908 emit_constant_insn (cond,
1909 gen_rtx_SET (VOIDmode, target, const0_rtx));
1912 if (remainder == 0xffffffff)
1914 if (reload_completed && rtx_equal_p (target, source))
1917 emit_constant_insn (cond,
1918 gen_rtx_SET (VOIDmode, target, source));
1927 if (reload_completed && rtx_equal_p (target, source))
1930 emit_constant_insn (cond,
1931 gen_rtx_SET (VOIDmode, target, source));
1935 /* We don't know how to handle other cases yet. */
1936 gcc_assert (remainder == 0xffffffff);
1939 emit_constant_insn (cond,
1940 gen_rtx_SET (VOIDmode, target,
1941 gen_rtx_NOT (mode, source)));
1945 /* We treat MINUS as (val - source), since (source - val) is always
1946 passed as (source + (-val)). */
1950 emit_constant_insn (cond,
1951 gen_rtx_SET (VOIDmode, target,
1952 gen_rtx_NEG (mode, source)));
1955 if (const_ok_for_arm (val))
1958 emit_constant_insn (cond,
1959 gen_rtx_SET (VOIDmode, target,
1960 gen_rtx_MINUS (mode, GEN_INT (val),
1972 /* If we can do it in one insn get out quickly. */
1973 if (const_ok_for_arm (val)
1974 || (can_negate_initial && const_ok_for_arm (-val))
1975 || (can_invert && const_ok_for_arm (~val)))
1978 emit_constant_insn (cond,
1979 gen_rtx_SET (VOIDmode, target,
1981 ? gen_rtx_fmt_ee (code, mode, source,
1987 /* Calculate a few attributes that may be useful for specific
1989 for (i = 31; i >= 0; i--)
1991 if ((remainder & (1 << i)) == 0)
1992 clear_sign_bit_copies++;
1997 for (i = 31; i >= 0; i--)
1999 if ((remainder & (1 << i)) != 0)
2000 set_sign_bit_copies++;
2005 for (i = 0; i <= 31; i++)
2007 if ((remainder & (1 << i)) == 0)
2008 clear_zero_bit_copies++;
2013 for (i = 0; i <= 31; i++)
2015 if ((remainder & (1 << i)) != 0)
2016 set_zero_bit_copies++;
2024 /* See if we can use movw. */
2025 if (arm_arch_thumb2 && (remainder & 0xffff0000) == 0)
2028 emit_constant_insn (cond, gen_rtx_SET (VOIDmode, target,
2033 /* See if we can do this by sign_extending a constant that is known
2034 to be negative. This is a good, way of doing it, since the shift
2035 may well merge into a subsequent insn. */
2036 if (set_sign_bit_copies > 1)
2038 if (const_ok_for_arm
2039 (temp1 = ARM_SIGN_EXTEND (remainder
2040 << (set_sign_bit_copies - 1))))
2044 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2045 emit_constant_insn (cond,
2046 gen_rtx_SET (VOIDmode, new_src,
2048 emit_constant_insn (cond,
2049 gen_ashrsi3 (target, new_src,
2050 GEN_INT (set_sign_bit_copies - 1)));
2054 /* For an inverted constant, we will need to set the low bits,
2055 these will be shifted out of harm's way. */
2056 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
2057 if (const_ok_for_arm (~temp1))
2061 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2062 emit_constant_insn (cond,
2063 gen_rtx_SET (VOIDmode, new_src,
2065 emit_constant_insn (cond,
2066 gen_ashrsi3 (target, new_src,
2067 GEN_INT (set_sign_bit_copies - 1)));
2073 /* See if we can calculate the value as the difference between two
2074 valid immediates. */
2075 if (clear_sign_bit_copies + clear_zero_bit_copies <= 16)
2077 int topshift = clear_sign_bit_copies & ~1;
2079 temp1 = ARM_SIGN_EXTEND ((remainder + (0x00800000 >> topshift))
2080 & (0xff000000 >> topshift));
2082 /* If temp1 is zero, then that means the 9 most significant
2083 bits of remainder were 1 and we've caused it to overflow.
2084 When topshift is 0 we don't need to do anything since we
2085 can borrow from 'bit 32'. */
2086 if (temp1 == 0 && topshift != 0)
2087 temp1 = 0x80000000 >> (topshift - 1);
2089 temp2 = ARM_SIGN_EXTEND (temp1 - remainder);
2091 if (const_ok_for_arm (temp2))
2095 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2096 emit_constant_insn (cond,
2097 gen_rtx_SET (VOIDmode, new_src,
2099 emit_constant_insn (cond,
2100 gen_addsi3 (target, new_src,
2108 /* See if we can generate this by setting the bottom (or the top)
2109 16 bits, and then shifting these into the other half of the
2110 word. We only look for the simplest cases, to do more would cost
2111 too much. Be careful, however, not to generate this when the
2112 alternative would take fewer insns. */
2113 if (val & 0xffff0000)
2115 temp1 = remainder & 0xffff0000;
2116 temp2 = remainder & 0x0000ffff;
2118 /* Overlaps outside this range are best done using other methods. */
2119 for (i = 9; i < 24; i++)
2121 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
2122 && !const_ok_for_arm (temp2))
2124 rtx new_src = (subtargets
2125 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
2127 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
2128 source, subtargets, generate);
2136 gen_rtx_ASHIFT (mode, source,
2143 /* Don't duplicate cases already considered. */
2144 for (i = 17; i < 24; i++)
2146 if (((temp1 | (temp1 >> i)) == remainder)
2147 && !const_ok_for_arm (temp1))
2149 rtx new_src = (subtargets
2150 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
2152 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
2153 source, subtargets, generate);
2158 gen_rtx_SET (VOIDmode, target,
2161 gen_rtx_LSHIFTRT (mode, source,
2172 /* If we have IOR or XOR, and the constant can be loaded in a
2173 single instruction, and we can find a temporary to put it in,
2174 then this can be done in two instructions instead of 3-4. */
2176 /* TARGET can't be NULL if SUBTARGETS is 0 */
2177 || (reload_completed && !reg_mentioned_p (target, source)))
2179 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
2183 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2185 emit_constant_insn (cond,
2186 gen_rtx_SET (VOIDmode, sub,
2188 emit_constant_insn (cond,
2189 gen_rtx_SET (VOIDmode, target,
2190 gen_rtx_fmt_ee (code, mode,
2200 if (set_sign_bit_copies > 8
2201 && (val & (-1 << (32 - set_sign_bit_copies))) == val)
2205 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2206 rtx shift = GEN_INT (set_sign_bit_copies);
2210 gen_rtx_SET (VOIDmode, sub,
2212 gen_rtx_ASHIFT (mode,
2217 gen_rtx_SET (VOIDmode, target,
2219 gen_rtx_LSHIFTRT (mode, sub,
2225 if (set_zero_bit_copies > 8
2226 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
2230 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2231 rtx shift = GEN_INT (set_zero_bit_copies);
2235 gen_rtx_SET (VOIDmode, sub,
2237 gen_rtx_LSHIFTRT (mode,
2242 gen_rtx_SET (VOIDmode, target,
2244 gen_rtx_ASHIFT (mode, sub,
2250 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
2254 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
2255 emit_constant_insn (cond,
2256 gen_rtx_SET (VOIDmode, sub,
2257 gen_rtx_NOT (mode, source)));
2260 sub = gen_reg_rtx (mode);
2261 emit_constant_insn (cond,
2262 gen_rtx_SET (VOIDmode, sub,
2263 gen_rtx_AND (mode, source,
2265 emit_constant_insn (cond,
2266 gen_rtx_SET (VOIDmode, target,
2267 gen_rtx_NOT (mode, sub)));
2274 /* See if two shifts will do 2 or more insn's worth of work. */
2275 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
2277 HOST_WIDE_INT shift_mask = ((0xffffffff
2278 << (32 - clear_sign_bit_copies))
2281 if ((remainder | shift_mask) != 0xffffffff)
2285 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2286 insns = arm_gen_constant (AND, mode, cond,
2287 remainder | shift_mask,
2288 new_src, source, subtargets, 1);
2293 rtx targ = subtargets ? NULL_RTX : target;
2294 insns = arm_gen_constant (AND, mode, cond,
2295 remainder | shift_mask,
2296 targ, source, subtargets, 0);
2302 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2303 rtx shift = GEN_INT (clear_sign_bit_copies);
2305 emit_insn (gen_ashlsi3 (new_src, source, shift));
2306 emit_insn (gen_lshrsi3 (target, new_src, shift));
2312 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
2314 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
2316 if ((remainder | shift_mask) != 0xffffffff)
2320 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2322 insns = arm_gen_constant (AND, mode, cond,
2323 remainder | shift_mask,
2324 new_src, source, subtargets, 1);
2329 rtx targ = subtargets ? NULL_RTX : target;
2331 insns = arm_gen_constant (AND, mode, cond,
2332 remainder | shift_mask,
2333 targ, source, subtargets, 0);
2339 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
2340 rtx shift = GEN_INT (clear_zero_bit_copies);
2342 emit_insn (gen_lshrsi3 (new_src, source, shift));
2343 emit_insn (gen_ashlsi3 (target, new_src, shift));
2355 for (i = 0; i < 32; i++)
2356 if (remainder & (1 << i))
2359 if (code == AND || (can_invert && num_bits_set > 16))
2360 remainder = (~remainder) & 0xffffffff;
2361 else if (code == PLUS && num_bits_set > 16)
2362 remainder = (-remainder) & 0xffffffff;
2369 /* Now try and find a way of doing the job in either two or three
2371 We start by looking for the largest block of zeros that are aligned on
2372 a 2-bit boundary, we then fill up the temps, wrapping around to the
2373 top of the word when we drop off the bottom.
2374 In the worst case this code should produce no more than four insns.
2375 Thumb-2 constants are shifted, not rotated, so the MSB is always the
2376 best place to start. */
2378 /* ??? Use thumb2 replicated constants when the high and low halfwords are
2384 int best_consecutive_zeros = 0;
2386 for (i = 0; i < 32; i += 2)
2388 int consecutive_zeros = 0;
2390 if (!(remainder & (3 << i)))
2392 while ((i < 32) && !(remainder & (3 << i)))
2394 consecutive_zeros += 2;
2397 if (consecutive_zeros > best_consecutive_zeros)
2399 best_consecutive_zeros = consecutive_zeros;
2400 best_start = i - consecutive_zeros;
2406 /* So long as it won't require any more insns to do so, it's
2407 desirable to emit a small constant (in bits 0...9) in the last
2408 insn. This way there is more chance that it can be combined with
2409 a later addressing insn to form a pre-indexed load or store
2410 operation. Consider:
2412 *((volatile int *)0xe0000100) = 1;
2413 *((volatile int *)0xe0000110) = 2;
2415 We want this to wind up as:
2419 str rB, [rA, #0x100]
2421 str rB, [rA, #0x110]
2423 rather than having to synthesize both large constants from scratch.
2425 Therefore, we calculate how many insns would be required to emit
2426 the constant starting from `best_start', and also starting from
2427 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
2428 yield a shorter sequence, we may as well use zero. */
2430 && ((((unsigned HOST_WIDE_INT) 1) << best_start) < remainder)
2431 && (count_insns_for_constant (remainder, 0) <=
2432 count_insns_for_constant (remainder, best_start)))
2436 /* Now start emitting the insns. */
2444 if (remainder & (3 << (i - 2)))
2449 temp1 = remainder & ((0x0ff << end)
2450 | ((i < end) ? (0xff >> (32 - end)) : 0));
2451 remainder &= ~temp1;
2455 rtx new_src, temp1_rtx;
2457 if (code == SET || code == MINUS)
2459 new_src = (subtargets ? gen_reg_rtx (mode) : target);
2460 if (can_invert && code != MINUS)
2465 if (remainder && subtargets)
2466 new_src = gen_reg_rtx (mode);
2471 else if (can_negate)
2475 temp1 = trunc_int_for_mode (temp1, mode);
2476 temp1_rtx = GEN_INT (temp1);
2480 else if (code == MINUS)
2481 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
2483 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
2485 emit_constant_insn (cond,
2486 gen_rtx_SET (VOIDmode, new_src,
2496 else if (code == MINUS)
2505 /* Arm allows rotates by a multiple of two. Thumb-2 allows arbitrary
2518 /* Canonicalize a comparison so that we are more likely to recognize it.
2519 This can be done for a few constant compares, where we can make the
2520 immediate value easier to load. */
2523 arm_canonicalize_comparison (enum rtx_code code, enum machine_mode mode,
2526 unsigned HOST_WIDE_INT i = INTVAL (*op1);
2527 unsigned HOST_WIDE_INT maxval;
2528 maxval = (((unsigned HOST_WIDE_INT) 1) << (GET_MODE_BITSIZE(mode) - 1)) - 1;
2539 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2541 *op1 = GEN_INT (i + 1);
2542 return code == GT ? GE : LT;
2549 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2551 *op1 = GEN_INT (i - 1);
2552 return code == GE ? GT : LE;
2558 if (i != ~((unsigned HOST_WIDE_INT) 0)
2559 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
2561 *op1 = GEN_INT (i + 1);
2562 return code == GTU ? GEU : LTU;
2569 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
2571 *op1 = GEN_INT (i - 1);
2572 return code == GEU ? GTU : LEU;
2584 /* Define how to find the value returned by a function. */
2587 arm_function_value(tree type, tree func ATTRIBUTE_UNUSED)
2589 enum machine_mode mode;
2590 int unsignedp ATTRIBUTE_UNUSED;
2591 rtx r ATTRIBUTE_UNUSED;
2593 mode = TYPE_MODE (type);
2594 /* Promote integer types. */
2595 if (INTEGRAL_TYPE_P (type))
2596 PROMOTE_FUNCTION_MODE (mode, unsignedp, type);
2598 /* Promotes small structs returned in a register to full-word size
2599 for big-endian AAPCS. */
2600 if (arm_return_in_msb (type))
2602 HOST_WIDE_INT size = int_size_in_bytes (type);
2603 if (size % UNITS_PER_WORD != 0)
2605 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
2606 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
2610 return LIBCALL_VALUE(mode);
2613 /* Determine the amount of memory needed to store the possible return
2614 registers of an untyped call. */
2616 arm_apply_result_size (void)
2622 if (TARGET_HARD_FLOAT_ABI)
2626 if (TARGET_MAVERICK)
2629 if (TARGET_IWMMXT_ABI)
2636 /* Decide whether a type should be returned in memory (true)
2637 or in a register (false). This is called by the macro
2638 RETURN_IN_MEMORY. */
2640 arm_return_in_memory (tree type)
2644 if (!AGGREGATE_TYPE_P (type) &&
2645 (TREE_CODE (type) != VECTOR_TYPE) &&
2646 !(TARGET_AAPCS_BASED && TREE_CODE (type) == COMPLEX_TYPE))
2647 /* All simple types are returned in registers.
2648 For AAPCS, complex types are treated the same as aggregates. */
2651 size = int_size_in_bytes (type);
2653 if (arm_abi != ARM_ABI_APCS)
2655 /* ATPCS and later return aggregate types in memory only if they are
2656 larger than a word (or are variable size). */
2657 return (size < 0 || size > UNITS_PER_WORD);
2660 /* To maximize backwards compatibility with previous versions of gcc,
2661 return vectors up to 4 words in registers. */
2662 if (TREE_CODE (type) == VECTOR_TYPE)
2663 return (size < 0 || size > (4 * UNITS_PER_WORD));
2665 /* For the arm-wince targets we choose to be compatible with Microsoft's
2666 ARM and Thumb compilers, which always return aggregates in memory. */
2668 /* All structures/unions bigger than one word are returned in memory.
2669 Also catch the case where int_size_in_bytes returns -1. In this case
2670 the aggregate is either huge or of variable size, and in either case
2671 we will want to return it via memory and not in a register. */
2672 if (size < 0 || size > UNITS_PER_WORD)
2675 if (TREE_CODE (type) == RECORD_TYPE)
2679 /* For a struct the APCS says that we only return in a register
2680 if the type is 'integer like' and every addressable element
2681 has an offset of zero. For practical purposes this means
2682 that the structure can have at most one non bit-field element
2683 and that this element must be the first one in the structure. */
2685 /* Find the first field, ignoring non FIELD_DECL things which will
2686 have been created by C++. */
2687 for (field = TYPE_FIELDS (type);
2688 field && TREE_CODE (field) != FIELD_DECL;
2689 field = TREE_CHAIN (field))
2693 return 0; /* An empty structure. Allowed by an extension to ANSI C. */
2695 /* Check that the first field is valid for returning in a register. */
2697 /* ... Floats are not allowed */
2698 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2701 /* ... Aggregates that are not themselves valid for returning in
2702 a register are not allowed. */
2703 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2706 /* Now check the remaining fields, if any. Only bitfields are allowed,
2707 since they are not addressable. */
2708 for (field = TREE_CHAIN (field);
2710 field = TREE_CHAIN (field))
2712 if (TREE_CODE (field) != FIELD_DECL)
2715 if (!DECL_BIT_FIELD_TYPE (field))
2722 if (TREE_CODE (type) == UNION_TYPE)
2726 /* Unions can be returned in registers if every element is
2727 integral, or can be returned in an integer register. */
2728 for (field = TYPE_FIELDS (type);
2730 field = TREE_CHAIN (field))
2732 if (TREE_CODE (field) != FIELD_DECL)
2735 if (FLOAT_TYPE_P (TREE_TYPE (field)))
2738 if (RETURN_IN_MEMORY (TREE_TYPE (field)))
2744 #endif /* not ARM_WINCE */
2746 /* Return all other types in memory. */
2750 /* Indicate whether or not words of a double are in big-endian order. */
2753 arm_float_words_big_endian (void)
2755 if (TARGET_MAVERICK)
2758 /* For FPA, float words are always big-endian. For VFP, floats words
2759 follow the memory system mode. */
2767 return (TARGET_BIG_END ? 1 : 0);
2772 /* Initialize a variable CUM of type CUMULATIVE_ARGS
2773 for a call to a function whose data type is FNTYPE.
2774 For a library call, FNTYPE is NULL. */
2776 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
2777 rtx libname ATTRIBUTE_UNUSED,
2778 tree fndecl ATTRIBUTE_UNUSED)
2780 /* On the ARM, the offset starts at 0. */
2782 pcum->iwmmxt_nregs = 0;
2783 pcum->can_split = true;
2785 pcum->call_cookie = CALL_NORMAL;
2787 if (TARGET_LONG_CALLS)
2788 pcum->call_cookie = CALL_LONG;
2790 /* Check for long call/short call attributes. The attributes
2791 override any command line option. */
2794 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (fntype)))
2795 pcum->call_cookie = CALL_SHORT;
2796 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (fntype)))
2797 pcum->call_cookie = CALL_LONG;
2800 /* Varargs vectors are treated the same as long long.
2801 named_count avoids having to change the way arm handles 'named' */
2802 pcum->named_count = 0;
2805 if (TARGET_REALLY_IWMMXT && fntype)
2809 for (fn_arg = TYPE_ARG_TYPES (fntype);
2811 fn_arg = TREE_CHAIN (fn_arg))
2812 pcum->named_count += 1;
2814 if (! pcum->named_count)
2815 pcum->named_count = INT_MAX;
2820 /* Return true if mode/type need doubleword alignment. */
2822 arm_needs_doubleword_align (enum machine_mode mode, tree type)
2824 return (GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY
2825 || (type && TYPE_ALIGN (type) > PARM_BOUNDARY));
2829 /* Determine where to put an argument to a function.
2830 Value is zero to push the argument on the stack,
2831 or a hard register in which to store the argument.
2833 MODE is the argument's machine mode.
2834 TYPE is the data type of the argument (as a tree).
2835 This is null for libcalls where that information may
2837 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2838 the preceding args and about the function being called.
2839 NAMED is nonzero if this argument is a named parameter
2840 (otherwise it is an extra parameter matching an ellipsis). */
2843 arm_function_arg (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2844 tree type, int named)
2848 /* Varargs vectors are treated the same as long long.
2849 named_count avoids having to change the way arm handles 'named' */
2850 if (TARGET_IWMMXT_ABI
2851 && arm_vector_mode_supported_p (mode)
2852 && pcum->named_count > pcum->nargs + 1)
2854 if (pcum->iwmmxt_nregs <= 9)
2855 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
2858 pcum->can_split = false;
2863 /* Put doubleword aligned quantities in even register pairs. */
2865 && ARM_DOUBLEWORD_ALIGN
2866 && arm_needs_doubleword_align (mode, type))
2869 if (mode == VOIDmode)
2870 /* Compute operand 2 of the call insn. */
2871 return GEN_INT (pcum->call_cookie);
2873 /* Only allow splitting an arg between regs and memory if all preceding
2874 args were allocated to regs. For args passed by reference we only count
2875 the reference pointer. */
2876 if (pcum->can_split)
2879 nregs = ARM_NUM_REGS2 (mode, type);
2881 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
2884 return gen_rtx_REG (mode, pcum->nregs);
2888 arm_arg_partial_bytes (CUMULATIVE_ARGS *pcum, enum machine_mode mode,
2889 tree type, bool named ATTRIBUTE_UNUSED)
2891 int nregs = pcum->nregs;
2893 if (arm_vector_mode_supported_p (mode))
2896 if (NUM_ARG_REGS > nregs
2897 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
2899 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
2904 /* Variable sized types are passed by reference. This is a GCC
2905 extension to the ARM ABI. */
2908 arm_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
2909 enum machine_mode mode ATTRIBUTE_UNUSED,
2910 tree type, bool named ATTRIBUTE_UNUSED)
2912 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
2915 /* Encode the current state of the #pragma [no_]long_calls. */
2918 OFF, /* No #pragma [no_]long_calls is in effect. */
2919 LONG, /* #pragma long_calls is in effect. */
2920 SHORT /* #pragma no_long_calls is in effect. */
2923 static arm_pragma_enum arm_pragma_long_calls = OFF;
2926 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2928 arm_pragma_long_calls = LONG;
2932 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2934 arm_pragma_long_calls = SHORT;
2938 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
2940 arm_pragma_long_calls = OFF;
2943 /* Table of machine attributes. */
2944 const struct attribute_spec arm_attribute_table[] =
2946 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
2947 /* Function calls made to this symbol must be done indirectly, because
2948 it may lie outside of the 26 bit addressing range of a normal function
2950 { "long_call", 0, 0, false, true, true, NULL },
2951 /* Whereas these functions are always known to reside within the 26 bit
2952 addressing range. */
2953 { "short_call", 0, 0, false, true, true, NULL },
2954 /* Interrupt Service Routines have special prologue and epilogue requirements. */
2955 { "isr", 0, 1, false, false, false, arm_handle_isr_attribute },
2956 { "interrupt", 0, 1, false, false, false, arm_handle_isr_attribute },
2957 { "naked", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2959 /* ARM/PE has three new attributes:
2961 dllexport - for exporting a function/variable that will live in a dll
2962 dllimport - for importing a function/variable from a dll
2964 Microsoft allows multiple declspecs in one __declspec, separating
2965 them with spaces. We do NOT support this. Instead, use __declspec
2968 { "dllimport", 0, 0, true, false, false, NULL },
2969 { "dllexport", 0, 0, true, false, false, NULL },
2970 { "interfacearm", 0, 0, true, false, false, arm_handle_fndecl_attribute },
2971 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
2972 { "dllimport", 0, 0, false, false, false, handle_dll_attribute },
2973 { "dllexport", 0, 0, false, false, false, handle_dll_attribute },
2974 { "notshared", 0, 0, false, true, false, arm_handle_notshared_attribute },
2976 { NULL, 0, 0, false, false, false, NULL }
2979 /* Handle an attribute requiring a FUNCTION_DECL;
2980 arguments as in struct attribute_spec.handler. */
2982 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
2983 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
2985 if (TREE_CODE (*node) != FUNCTION_DECL)
2987 warning (OPT_Wattributes, "%qs attribute only applies to functions",
2988 IDENTIFIER_POINTER (name));
2989 *no_add_attrs = true;
2995 /* Handle an "interrupt" or "isr" attribute;
2996 arguments as in struct attribute_spec.handler. */
2998 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
3003 if (TREE_CODE (*node) != FUNCTION_DECL)
3005 warning (OPT_Wattributes, "%qs attribute only applies to functions",
3006 IDENTIFIER_POINTER (name));
3007 *no_add_attrs = true;
3009 /* FIXME: the argument if any is checked for type attributes;
3010 should it be checked for decl ones? */
3014 if (TREE_CODE (*node) == FUNCTION_TYPE
3015 || TREE_CODE (*node) == METHOD_TYPE)
3017 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
3019 warning (OPT_Wattributes, "%qs attribute ignored",
3020 IDENTIFIER_POINTER (name));
3021 *no_add_attrs = true;
3024 else if (TREE_CODE (*node) == POINTER_TYPE
3025 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
3026 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
3027 && arm_isr_value (args) != ARM_FT_UNKNOWN)
3029 *node = build_variant_type_copy (*node);
3030 TREE_TYPE (*node) = build_type_attribute_variant
3032 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
3033 *no_add_attrs = true;
3037 /* Possibly pass this attribute on from the type to a decl. */
3038 if (flags & ((int) ATTR_FLAG_DECL_NEXT
3039 | (int) ATTR_FLAG_FUNCTION_NEXT
3040 | (int) ATTR_FLAG_ARRAY_NEXT))
3042 *no_add_attrs = true;
3043 return tree_cons (name, args, NULL_TREE);
3047 warning (OPT_Wattributes, "%qs attribute ignored",
3048 IDENTIFIER_POINTER (name));
3056 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
3057 /* Handle the "notshared" attribute. This attribute is another way of
3058 requesting hidden visibility. ARM's compiler supports
3059 "__declspec(notshared)"; we support the same thing via an
3063 arm_handle_notshared_attribute (tree *node,
3064 tree name ATTRIBUTE_UNUSED,
3065 tree args ATTRIBUTE_UNUSED,
3066 int flags ATTRIBUTE_UNUSED,
3069 tree decl = TYPE_NAME (*node);
3073 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
3074 DECL_VISIBILITY_SPECIFIED (decl) = 1;
3075 *no_add_attrs = false;
3081 /* Return 0 if the attributes for two types are incompatible, 1 if they
3082 are compatible, and 2 if they are nearly compatible (which causes a
3083 warning to be generated). */
3085 arm_comp_type_attributes (tree type1, tree type2)
3089 /* Check for mismatch of non-default calling convention. */
3090 if (TREE_CODE (type1) != FUNCTION_TYPE)
3093 /* Check for mismatched call attributes. */
3094 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
3095 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
3096 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
3097 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
3099 /* Only bother to check if an attribute is defined. */
3100 if (l1 | l2 | s1 | s2)
3102 /* If one type has an attribute, the other must have the same attribute. */
3103 if ((l1 != l2) || (s1 != s2))
3106 /* Disallow mixed attributes. */
3107 if ((l1 & s2) || (l2 & s1))
3111 /* Check for mismatched ISR attribute. */
3112 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
3114 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
3115 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
3117 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
3124 /* Encode long_call or short_call attribute by prefixing
3125 symbol name in DECL with a special character FLAG. */
3127 arm_encode_call_attribute (tree decl, int flag)
3129 const char * str = XSTR (XEXP (DECL_RTL (decl), 0), 0);
3130 int len = strlen (str);
3133 /* Do not allow weak functions to be treated as short call. */
3134 if (DECL_WEAK (decl) && flag == SHORT_CALL_FLAG_CHAR)
3137 newstr = alloca (len + 2);
3139 strcpy (newstr + 1, str);
3141 newstr = (char *) ggc_alloc_string (newstr, len + 1);
3142 XSTR (XEXP (DECL_RTL (decl), 0), 0) = newstr;
3145 /* Assigns default attributes to newly defined type. This is used to
3146 set short_call/long_call attributes for function types of
3147 functions defined inside corresponding #pragma scopes. */
3149 arm_set_default_type_attributes (tree type)
3151 /* Add __attribute__ ((long_call)) to all functions, when
3152 inside #pragma long_calls or __attribute__ ((short_call)),
3153 when inside #pragma no_long_calls. */
3154 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
3156 tree type_attr_list, attr_name;
3157 type_attr_list = TYPE_ATTRIBUTES (type);
3159 if (arm_pragma_long_calls == LONG)
3160 attr_name = get_identifier ("long_call");
3161 else if (arm_pragma_long_calls == SHORT)
3162 attr_name = get_identifier ("short_call");
3166 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
3167 TYPE_ATTRIBUTES (type) = type_attr_list;
3171 /* Return 1 if the operand is a SYMBOL_REF for a function known to be
3172 defined within the current compilation unit. If this cannot be
3173 determined, then 0 is returned. */
3175 current_file_function_operand (rtx sym_ref)
3177 /* This is a bit of a fib. A function will have a short call flag
3178 applied to its name if it has the short call attribute, or it has
3179 already been defined within the current compilation unit. */
3180 if (ENCODED_SHORT_CALL_ATTR_P (XSTR (sym_ref, 0)))
3183 /* The current function is always defined within the current compilation
3184 unit. If it s a weak definition however, then this may not be the real
3185 definition of the function, and so we have to say no. */
3186 if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
3187 && !DECL_WEAK (current_function_decl))
3190 /* We cannot make the determination - default to returning 0. */
3194 /* Return nonzero if a 32-bit "long_call" should be generated for
3195 this call. We generate a long_call if the function:
3197 a. has an __attribute__((long call))
3198 or b. is within the scope of a #pragma long_calls
3199 or c. the -mlong-calls command line switch has been specified
3201 1. -ffunction-sections is in effect
3202 or 2. the current function has __attribute__ ((section))
3203 or 3. the target function has __attribute__ ((section))
3205 However we do not generate a long call if the function:
3207 d. has an __attribute__ ((short_call))
3208 or e. is inside the scope of a #pragma no_long_calls
3209 or f. is defined within the current compilation unit.
3211 This function will be called by C fragments contained in the machine
3212 description file. SYM_REF and CALL_COOKIE correspond to the matched
3213 rtl operands. CALL_SYMBOL is used to distinguish between
3214 two different callers of the function. It is set to 1 in the
3215 "call_symbol" and "call_symbol_value" patterns and to 0 in the "call"
3216 and "call_value" patterns. This is because of the difference in the
3217 SYM_REFs passed by these patterns. */
3219 arm_is_longcall_p (rtx sym_ref, int call_cookie, int call_symbol)
3223 if (GET_CODE (sym_ref) != MEM)
3226 sym_ref = XEXP (sym_ref, 0);
3229 if (GET_CODE (sym_ref) != SYMBOL_REF)
3232 if (call_cookie & CALL_SHORT)
3235 if (TARGET_LONG_CALLS)
3237 if (flag_function_sections
3238 || DECL_SECTION_NAME (current_function_decl))
3239 /* c.3 is handled by the definition of the
3240 ARM_DECLARE_FUNCTION_SIZE macro. */
3244 if (current_file_function_operand (sym_ref))
3247 return (call_cookie & CALL_LONG)
3248 || ENCODED_LONG_CALL_ATTR_P (XSTR (sym_ref, 0))
3249 || TARGET_LONG_CALLS;
3252 /* Return nonzero if it is ok to make a tail-call to DECL. */
3254 arm_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
3256 int call_type = TARGET_LONG_CALLS ? CALL_LONG : CALL_NORMAL;
3257 unsigned long func_type;
3259 if (cfun->machine->sibcall_blocked)
3262 /* Never tailcall something for which we have no decl, or if we
3263 are in Thumb mode. */
3264 if (decl == NULL || TARGET_THUMB)
3267 /* Get the calling method. */
3268 if (lookup_attribute ("short_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3269 call_type = CALL_SHORT;
3270 else if (lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
3271 call_type = CALL_LONG;
3273 /* Cannot tail-call to long calls, since these are out of range of
3274 a branch instruction. However, if not compiling PIC, we know
3275 we can reach the symbol if it is in this compilation unit. */
3276 if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
3279 /* If we are interworking and the function is not declared static
3280 then we can't tail-call it unless we know that it exists in this
3281 compilation unit (since it might be a Thumb routine). */
3282 if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
3285 func_type = arm_current_func_type ();
3286 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
3287 if (IS_INTERRUPT (func_type))
3290 /* Never tailcall if function may be called with a misaligned SP. */
3291 if (IS_STACKALIGN (func_type))
3294 /* Everything else is ok. */
3299 /* Addressing mode support functions. */
3301 /* Return nonzero if X is a legitimate immediate operand when compiling
3302 for PIC. We know that X satisfies CONSTANT_P and flag_pic is true. */
3304 legitimate_pic_operand_p (rtx x)
3306 if (GET_CODE (x) == SYMBOL_REF
3307 || (GET_CODE (x) == CONST
3308 && GET_CODE (XEXP (x, 0)) == PLUS
3309 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
3316 legitimize_pic_address (rtx orig, enum machine_mode mode, rtx reg)
3318 if (GET_CODE (orig) == SYMBOL_REF
3319 || GET_CODE (orig) == LABEL_REF)
3321 #ifndef AOF_ASSEMBLER
3322 rtx pic_ref, address;
3327 /* If this function doesn't have a pic register, create one now.
3328 A lot of the logic here is made obscure by the fact that this
3329 routine gets called as part of the rtx cost estimation
3330 process. We don't want those calls to affect any assumptions
3331 about the real function; and further, we can't call
3332 entry_of_function() until we start the real expansion
3334 if (!current_function_uses_pic_offset_table)
3336 gcc_assert (!no_new_pseudos);
3337 if (arm_pic_register != INVALID_REGNUM)
3339 cfun->machine->pic_reg = gen_rtx_REG (Pmode, arm_pic_register);
3341 /* Play games to avoid marking the function as needing pic
3342 if we are being called as part of the cost-estimation
3344 if (current_ir_type () != IR_GIMPLE)
3345 current_function_uses_pic_offset_table = 1;
3351 cfun->machine->pic_reg = gen_reg_rtx (Pmode);
3353 /* Play games to avoid marking the function as needing pic
3354 if we are being called as part of the cost-estimation
3356 if (current_ir_type () != IR_GIMPLE)
3358 current_function_uses_pic_offset_table = 1;
3361 arm_load_pic_register (0UL);
3365 emit_insn_after (seq, entry_of_function ());
3372 gcc_assert (!no_new_pseudos);
3373 reg = gen_reg_rtx (Pmode);
3378 #ifdef AOF_ASSEMBLER
3379 /* The AOF assembler can generate relocations for these directly, and
3380 understands that the PIC register has to be added into the offset. */
3381 insn = emit_insn (gen_pic_load_addr_based (reg, orig));
3384 address = gen_reg_rtx (Pmode);
3389 emit_insn (gen_pic_load_addr_arm (address, orig));
3390 else if (TARGET_THUMB2)
3391 emit_insn (gen_pic_load_addr_thumb2 (address, orig));
3392 else /* TARGET_THUMB1 */
3393 emit_insn (gen_pic_load_addr_thumb1 (address, orig));
3395 if ((GET_CODE (orig) == LABEL_REF
3396 || (GET_CODE (orig) == SYMBOL_REF &&
3397 SYMBOL_REF_LOCAL_P (orig)))
3399 pic_ref = gen_rtx_PLUS (Pmode, cfun->machine->pic_reg, address);
3402 pic_ref = gen_const_mem (Pmode,
3403 gen_rtx_PLUS (Pmode, cfun->machine->pic_reg,
3407 insn = emit_move_insn (reg, pic_ref);
3409 /* Put a REG_EQUAL note on this insn, so that it can be optimized
3411 set_unique_reg_note (insn, REG_EQUAL, orig);
3415 else if (GET_CODE (orig) == CONST)
3419 if (GET_CODE (XEXP (orig, 0)) == PLUS
3420 && XEXP (XEXP (orig, 0), 0) == cfun->machine->pic_reg)
3423 if (GET_CODE (XEXP (orig, 0)) == UNSPEC
3424 && XINT (XEXP (orig, 0), 1) == UNSPEC_TLS)
3429 gcc_assert (!no_new_pseudos);
3430 reg = gen_reg_rtx (Pmode);
3433 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
3435 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
3436 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
3437 base == reg ? 0 : reg);
3439 if (GET_CODE (offset) == CONST_INT)
3441 /* The base register doesn't really matter, we only want to
3442 test the index for the appropriate mode. */
3443 if (!arm_legitimate_index_p (mode, offset, SET, 0))
3445 gcc_assert (!no_new_pseudos);
3446 offset = force_reg (Pmode, offset);
3449 if (GET_CODE (offset) == CONST_INT)
3450 return plus_constant (base, INTVAL (offset));
3453 if (GET_MODE_SIZE (mode) > 4
3454 && (GET_MODE_CLASS (mode) == MODE_INT
3455 || TARGET_SOFT_FLOAT))
3457 emit_insn (gen_addsi3 (reg, base, offset));
3461 return gen_rtx_PLUS (Pmode, base, offset);
3468 /* Find a spare register to use during the prolog of a function. */
3471 thumb_find_work_register (unsigned long pushed_regs_mask)
3475 /* Check the argument registers first as these are call-used. The
3476 register allocation order means that sometimes r3 might be used
3477 but earlier argument registers might not, so check them all. */
3478 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
3479 if (!regs_ever_live[reg])
3482 /* Before going on to check the call-saved registers we can try a couple
3483 more ways of deducing that r3 is available. The first is when we are
3484 pushing anonymous arguments onto the stack and we have less than 4
3485 registers worth of fixed arguments(*). In this case r3 will be part of
3486 the variable argument list and so we can be sure that it will be
3487 pushed right at the start of the function. Hence it will be available
3488 for the rest of the prologue.
3489 (*): ie current_function_pretend_args_size is greater than 0. */
3490 if (cfun->machine->uses_anonymous_args
3491 && current_function_pretend_args_size > 0)
3492 return LAST_ARG_REGNUM;
3494 /* The other case is when we have fixed arguments but less than 4 registers
3495 worth. In this case r3 might be used in the body of the function, but
3496 it is not being used to convey an argument into the function. In theory
3497 we could just check current_function_args_size to see how many bytes are
3498 being passed in argument registers, but it seems that it is unreliable.
3499 Sometimes it will have the value 0 when in fact arguments are being
3500 passed. (See testcase execute/20021111-1.c for an example). So we also
3501 check the args_info.nregs field as well. The problem with this field is
3502 that it makes no allowances for arguments that are passed to the
3503 function but which are not used. Hence we could miss an opportunity
3504 when a function has an unused argument in r3. But it is better to be
3505 safe than to be sorry. */
3506 if (! cfun->machine->uses_anonymous_args
3507 && current_function_args_size >= 0
3508 && current_function_args_size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
3509 && cfun->args_info.nregs < 4)
3510 return LAST_ARG_REGNUM;
3512 /* Otherwise look for a call-saved register that is going to be pushed. */
3513 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
3514 if (pushed_regs_mask & (1 << reg))
3519 /* Thumb-2 can use high regs. */
3520 for (reg = FIRST_HI_REGNUM; reg < 15; reg ++)
3521 if (pushed_regs_mask & (1 << reg))
3524 /* Something went wrong - thumb_compute_save_reg_mask()
3525 should have arranged for a suitable register to be pushed. */
3529 static GTY(()) int pic_labelno;
3531 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
3535 arm_load_pic_register (unsigned long saved_regs ATTRIBUTE_UNUSED)
3537 #ifndef AOF_ASSEMBLER
3538 rtx l1, labelno, pic_tmp, pic_tmp2, pic_rtx;
3539 rtx global_offset_table;
3541 if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
3544 gcc_assert (flag_pic);
3546 /* We use an UNSPEC rather than a LABEL_REF because this label never appears
3547 in the code stream. */
3549 labelno = GEN_INT (pic_labelno++);
3550 l1 = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
3551 l1 = gen_rtx_CONST (VOIDmode, l1);
3553 global_offset_table = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
3554 /* On the ARM the PC register contains 'dot + 8' at the time of the
3555 addition, on the Thumb it is 'dot + 4'. */
3556 pic_tmp = plus_constant (l1, TARGET_ARM ? 8 : 4);
3558 pic_tmp2 = gen_rtx_CONST (VOIDmode,
3559 gen_rtx_PLUS (Pmode, global_offset_table, pc_rtx));
3561 pic_tmp2 = gen_rtx_CONST (VOIDmode, global_offset_table);
3563 pic_rtx = gen_rtx_CONST (Pmode, gen_rtx_MINUS (Pmode, pic_tmp2, pic_tmp));
3567 emit_insn (gen_pic_load_addr_arm (cfun->machine->pic_reg, pic_rtx));
3568 emit_insn (gen_pic_add_dot_plus_eight (cfun->machine->pic_reg,
3569 cfun->machine->pic_reg, labelno));
3571 else if (TARGET_THUMB2)
3573 /* Thumb-2 only allows very limited access to the PC. Calculate the
3574 address in a temporary register. */
3575 if (arm_pic_register != INVALID_REGNUM)
3577 pic_tmp = gen_rtx_REG (SImode,
3578 thumb_find_work_register (saved_regs));
3582 gcc_assert (!no_new_pseudos);
3583 pic_tmp = gen_reg_rtx (Pmode);
3586 emit_insn (gen_pic_load_addr_thumb2 (cfun->machine->pic_reg, pic_rtx));
3587 emit_insn (gen_pic_load_dot_plus_four (pic_tmp, labelno));
3588 emit_insn (gen_addsi3(cfun->machine->pic_reg, cfun->machine->pic_reg,
3591 else /* TARGET_THUMB1 */
3593 if (arm_pic_register != INVALID_REGNUM
3594 && REGNO (cfun->machine->pic_reg) > LAST_LO_REGNUM)
3596 /* We will have pushed the pic register, so we should always be
3597 able to find a work register. */
3598 pic_tmp = gen_rtx_REG (SImode,
3599 thumb_find_work_register (saved_regs));
3600 emit_insn (gen_pic_load_addr_thumb1 (pic_tmp, pic_rtx));
3601 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
3604 emit_insn (gen_pic_load_addr_thumb1 (cfun->machine->pic_reg, pic_rtx));
3605 emit_insn (gen_pic_add_dot_plus_four (cfun->machine->pic_reg,
3606 cfun->machine->pic_reg, labelno));
3609 /* Need to emit this whether or not we obey regdecls,
3610 since setjmp/longjmp can cause life info to screw up. */
3611 emit_insn (gen_rtx_USE (VOIDmode, cfun->machine->pic_reg));
3612 #endif /* AOF_ASSEMBLER */
3616 /* Return nonzero if X is valid as an ARM state addressing register. */
3618 arm_address_register_rtx_p (rtx x, int strict_p)
3622 if (GET_CODE (x) != REG)
3628 return ARM_REGNO_OK_FOR_BASE_P (regno);
3630 return (regno <= LAST_ARM_REGNUM
3631 || regno >= FIRST_PSEUDO_REGISTER
3632 || regno == FRAME_POINTER_REGNUM
3633 || regno == ARG_POINTER_REGNUM);
3636 /* Return TRUE if this rtx is the difference of a symbol and a label,
3637 and will reduce to a PC-relative relocation in the object file.
3638 Expressions like this can be left alone when generating PIC, rather
3639 than forced through the GOT. */
3641 pcrel_constant_p (rtx x)
3643 if (GET_CODE (x) == MINUS)
3644 return symbol_mentioned_p (XEXP (x, 0)) && label_mentioned_p (XEXP (x, 1));
3649 /* Return nonzero if X is a valid ARM state address operand. */
3651 arm_legitimate_address_p (enum machine_mode mode, rtx x, RTX_CODE outer,
3655 enum rtx_code code = GET_CODE (x);
3657 if (arm_address_register_rtx_p (x, strict_p))
3660 use_ldrd = (TARGET_LDRD
3662 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3664 if (code == POST_INC || code == PRE_DEC
3665 || ((code == PRE_INC || code == POST_DEC)
3666 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3667 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3669 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3670 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3671 && GET_CODE (XEXP (x, 1)) == PLUS
3672 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3674 rtx addend = XEXP (XEXP (x, 1), 1);
3676 /* Don't allow ldrd post increment by register because it's hard
3677 to fixup invalid register choices. */
3679 && GET_CODE (x) == POST_MODIFY
3680 && GET_CODE (addend) == REG)
3683 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
3684 && arm_legitimate_index_p (mode, addend, outer, strict_p));
3687 /* After reload constants split into minipools will have addresses
3688 from a LABEL_REF. */
3689 else if (reload_completed
3690 && (code == LABEL_REF
3692 && GET_CODE (XEXP (x, 0)) == PLUS
3693 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3694 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3697 else if (mode == TImode)
3700 else if (code == PLUS)
3702 rtx xop0 = XEXP (x, 0);
3703 rtx xop1 = XEXP (x, 1);
3705 return ((arm_address_register_rtx_p (xop0, strict_p)
3706 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
3707 || (arm_address_register_rtx_p (xop1, strict_p)
3708 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
3712 /* Reload currently can't handle MINUS, so disable this for now */
3713 else if (GET_CODE (x) == MINUS)
3715 rtx xop0 = XEXP (x, 0);
3716 rtx xop1 = XEXP (x, 1);
3718 return (arm_address_register_rtx_p (xop0, strict_p)
3719 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
3723 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3724 && code == SYMBOL_REF
3725 && CONSTANT_POOL_ADDRESS_P (x)
3727 && symbol_mentioned_p (get_pool_constant (x))
3728 && ! pcrel_constant_p (get_pool_constant (x))))
3734 /* Return nonzero if X is a valid Thumb-2 address operand. */
3736 thumb2_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
3739 enum rtx_code code = GET_CODE (x);
3741 if (arm_address_register_rtx_p (x, strict_p))
3744 use_ldrd = (TARGET_LDRD
3746 || (mode == DFmode && (TARGET_SOFT_FLOAT || TARGET_VFP))));
3748 if (code == POST_INC || code == PRE_DEC
3749 || ((code == PRE_INC || code == POST_DEC)
3750 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
3751 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
3753 else if ((code == POST_MODIFY || code == PRE_MODIFY)
3754 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
3755 && GET_CODE (XEXP (x, 1)) == PLUS
3756 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
3758 /* Thumb-2 only has autoincrement by constant. */
3759 rtx addend = XEXP (XEXP (x, 1), 1);
3760 HOST_WIDE_INT offset;
3762 if (GET_CODE (addend) != CONST_INT)
3765 offset = INTVAL(addend);
3766 if (GET_MODE_SIZE (mode) <= 4)
3767 return (offset > -256 && offset < 256);
3769 return (use_ldrd && offset > -1024 && offset < 1024
3770 && (offset & 3) == 0);
3773 /* After reload constants split into minipools will have addresses
3774 from a LABEL_REF. */
3775 else if (reload_completed
3776 && (code == LABEL_REF
3778 && GET_CODE (XEXP (x, 0)) == PLUS
3779 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
3780 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
3783 else if (mode == TImode)
3786 else if (code == PLUS)
3788 rtx xop0 = XEXP (x, 0);
3789 rtx xop1 = XEXP (x, 1);
3791 return ((arm_address_register_rtx_p (xop0, strict_p)
3792 && thumb2_legitimate_index_p (mode, xop1, strict_p))
3793 || (arm_address_register_rtx_p (xop1, strict_p)
3794 && thumb2_legitimate_index_p (mode, xop0, strict_p)));
3797 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
3798 && code == SYMBOL_REF
3799 && CONSTANT_POOL_ADDRESS_P (x)
3801 && symbol_mentioned_p (get_pool_constant (x))
3802 && ! pcrel_constant_p (get_pool_constant (x))))
3808 /* Return nonzero if INDEX is valid for an address index operand in
3811 arm_legitimate_index_p (enum machine_mode mode, rtx index, RTX_CODE outer,
3814 HOST_WIDE_INT range;
3815 enum rtx_code code = GET_CODE (index);
3817 /* Standard coprocessor addressing modes. */
3818 if (TARGET_HARD_FLOAT
3819 && (TARGET_FPA || TARGET_MAVERICK)
3820 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3821 || (TARGET_MAVERICK && mode == DImode)))
3822 return (code == CONST_INT && INTVAL (index) < 1024
3823 && INTVAL (index) > -1024
3824 && (INTVAL (index) & 3) == 0);
3826 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3827 return (code == CONST_INT
3828 && INTVAL (index) < 1024
3829 && INTVAL (index) > -1024
3830 && (INTVAL (index) & 3) == 0);
3832 if (arm_address_register_rtx_p (index, strict_p)
3833 && (GET_MODE_SIZE (mode) <= 4))
3836 if (mode == DImode || mode == DFmode)
3838 if (code == CONST_INT)
3840 HOST_WIDE_INT val = INTVAL (index);
3843 return val > -256 && val < 256;
3845 return val > -4096 && val < 4092;
3848 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
3851 if (GET_MODE_SIZE (mode) <= 4
3854 || (mode == QImode && outer == SIGN_EXTEND))))
3858 rtx xiop0 = XEXP (index, 0);
3859 rtx xiop1 = XEXP (index, 1);
3861 return ((arm_address_register_rtx_p (xiop0, strict_p)
3862 && power_of_two_operand (xiop1, SImode))
3863 || (arm_address_register_rtx_p (xiop1, strict_p)
3864 && power_of_two_operand (xiop0, SImode)));
3866 else if (code == LSHIFTRT || code == ASHIFTRT
3867 || code == ASHIFT || code == ROTATERT)
3869 rtx op = XEXP (index, 1);
3871 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3872 && GET_CODE (op) == CONST_INT
3874 && INTVAL (op) <= 31);
3878 /* For ARM v4 we may be doing a sign-extend operation during the
3882 if (mode == HImode || (outer == SIGN_EXTEND && mode == QImode))
3888 range = (mode == HImode) ? 4095 : 4096;
3890 return (code == CONST_INT
3891 && INTVAL (index) < range
3892 && INTVAL (index) > -range);
3895 /* Return true if OP is a valid index scaling factor for Thumb-2 address
3896 index operand. i.e. 1, 2, 4 or 8. */
3898 thumb2_index_mul_operand (rtx op)
3902 if (GET_CODE(op) != CONST_INT)
3906 return (val == 1 || val == 2 || val == 4 || val == 8);
3909 /* Return nonzero if INDEX is a valid Thumb-2 address index operand. */
3911 thumb2_legitimate_index_p (enum machine_mode mode, rtx index, int strict_p)
3913 enum rtx_code code = GET_CODE (index);
3915 /* ??? Combine arm and thumb2 coprocessor addressing modes. */
3916 /* Standard coprocessor addressing modes. */
3917 if (TARGET_HARD_FLOAT
3918 && (TARGET_FPA || TARGET_MAVERICK)
3919 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3920 || (TARGET_MAVERICK && mode == DImode)))
3921 return (code == CONST_INT && INTVAL (index) < 1024
3922 && INTVAL (index) > -1024
3923 && (INTVAL (index) & 3) == 0);
3925 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
3927 /* For DImode assume values will usually live in core regs
3928 and only allow LDRD addressing modes. */
3929 if (!TARGET_LDRD || mode != DImode)
3930 return (code == CONST_INT
3931 && INTVAL (index) < 1024
3932 && INTVAL (index) > -1024
3933 && (INTVAL (index) & 3) == 0);
3936 if (arm_address_register_rtx_p (index, strict_p)
3937 && (GET_MODE_SIZE (mode) <= 4))
3940 if (mode == DImode || mode == DFmode)
3942 HOST_WIDE_INT val = INTVAL (index);
3943 /* ??? Can we assume ldrd for thumb2? */
3944 /* Thumb-2 ldrd only has reg+const addressing modes. */
3945 if (code != CONST_INT)
3948 /* ldrd supports offsets of +-1020.
3949 However the ldr fallback does not. */
3950 return val > -256 && val < 256 && (val & 3) == 0;
3955 rtx xiop0 = XEXP (index, 0);
3956 rtx xiop1 = XEXP (index, 1);
3958 return ((arm_address_register_rtx_p (xiop0, strict_p)
3959 && thumb2_index_mul_operand (xiop1))
3960 || (arm_address_register_rtx_p (xiop1, strict_p)
3961 && thumb2_index_mul_operand (xiop0)));
3963 else if (code == ASHIFT)
3965 rtx op = XEXP (index, 1);
3967 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
3968 && GET_CODE (op) == CONST_INT
3970 && INTVAL (op) <= 3);
3973 return (code == CONST_INT
3974 && INTVAL (index) < 4096
3975 && INTVAL (index) > -256);
3978 /* Return nonzero if X is valid as a 16-bit Thumb state base register. */
3980 thumb1_base_register_rtx_p (rtx x, enum machine_mode mode, int strict_p)
3984 if (GET_CODE (x) != REG)
3990 return THUMB1_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
3992 return (regno <= LAST_LO_REGNUM
3993 || regno > LAST_VIRTUAL_REGISTER
3994 || regno == FRAME_POINTER_REGNUM
3995 || (GET_MODE_SIZE (mode) >= 4
3996 && (regno == STACK_POINTER_REGNUM
3997 || regno >= FIRST_PSEUDO_REGISTER
3998 || x == hard_frame_pointer_rtx
3999 || x == arg_pointer_rtx)));
4002 /* Return nonzero if x is a legitimate index register. This is the case
4003 for any base register that can access a QImode object. */
4005 thumb1_index_register_rtx_p (rtx x, int strict_p)
4007 return thumb1_base_register_rtx_p (x, QImode, strict_p);
4010 /* Return nonzero if x is a legitimate 16-bit Thumb-state address.
4012 The AP may be eliminated to either the SP or the FP, so we use the
4013 least common denominator, e.g. SImode, and offsets from 0 to 64.
4015 ??? Verify whether the above is the right approach.
4017 ??? Also, the FP may be eliminated to the SP, so perhaps that
4018 needs special handling also.
4020 ??? Look at how the mips16 port solves this problem. It probably uses
4021 better ways to solve some of these problems.
4023 Although it is not incorrect, we don't accept QImode and HImode
4024 addresses based on the frame pointer or arg pointer until the
4025 reload pass starts. This is so that eliminating such addresses
4026 into stack based ones won't produce impossible code. */
4028 thumb1_legitimate_address_p (enum machine_mode mode, rtx x, int strict_p)
4030 /* ??? Not clear if this is right. Experiment. */
4031 if (GET_MODE_SIZE (mode) < 4
4032 && !(reload_in_progress || reload_completed)
4033 && (reg_mentioned_p (frame_pointer_rtx, x)
4034 || reg_mentioned_p (arg_pointer_rtx, x)
4035 || reg_mentioned_p (virtual_incoming_args_rtx, x)
4036 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
4037 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
4038 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
4041 /* Accept any base register. SP only in SImode or larger. */
4042 else if (thumb1_base_register_rtx_p (x, mode, strict_p))
4045 /* This is PC relative data before arm_reorg runs. */
4046 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
4047 && GET_CODE (x) == SYMBOL_REF
4048 && CONSTANT_POOL_ADDRESS_P (x) && !flag_pic)
4051 /* This is PC relative data after arm_reorg runs. */
4052 else if (GET_MODE_SIZE (mode) >= 4 && reload_completed
4053 && (GET_CODE (x) == LABEL_REF
4054 || (GET_CODE (x) == CONST
4055 && GET_CODE (XEXP (x, 0)) == PLUS
4056 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
4057 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT)))
4060 /* Post-inc indexing only supported for SImode and larger. */
4061 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
4062 && thumb1_index_register_rtx_p (XEXP (x, 0), strict_p))
4065 else if (GET_CODE (x) == PLUS)
4067 /* REG+REG address can be any two index registers. */
4068 /* We disallow FRAME+REG addressing since we know that FRAME
4069 will be replaced with STACK, and SP relative addressing only
4070 permits SP+OFFSET. */
4071 if (GET_MODE_SIZE (mode) <= 4
4072 && XEXP (x, 0) != frame_pointer_rtx
4073 && XEXP (x, 1) != frame_pointer_rtx
4074 && thumb1_index_register_rtx_p (XEXP (x, 0), strict_p)
4075 && thumb1_index_register_rtx_p (XEXP (x, 1), strict_p))
4078 /* REG+const has 5-7 bit offset for non-SP registers. */
4079 else if ((thumb1_index_register_rtx_p (XEXP (x, 0), strict_p)
4080 || XEXP (x, 0) == arg_pointer_rtx)
4081 && GET_CODE (XEXP (x, 1)) == CONST_INT
4082 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
4085 /* REG+const has 10-bit offset for SP, but only SImode and
4086 larger is supported. */
4087 /* ??? Should probably check for DI/DFmode overflow here
4088 just like GO_IF_LEGITIMATE_OFFSET does. */
4089 else if (GET_CODE (XEXP (x, 0)) == REG
4090 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
4091 && GET_MODE_SIZE (mode) >= 4
4092 && GET_CODE (XEXP (x, 1)) == CONST_INT
4093 && INTVAL (XEXP (x, 1)) >= 0
4094 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
4095 && (INTVAL (XEXP (x, 1)) & 3) == 0)
4098 else if (GET_CODE (XEXP (x, 0)) == REG
4099 && (REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
4100 || REGNO (XEXP (x, 0)) == ARG_POINTER_REGNUM
4101 || (REGNO (XEXP (x, 0)) >= FIRST_VIRTUAL_REGISTER
4102 && REGNO (XEXP (x, 0)) <= LAST_VIRTUAL_REGISTER))
4103 && GET_MODE_SIZE (mode) >= 4
4104 && GET_CODE (XEXP (x, 1)) == CONST_INT
4105 && (INTVAL (XEXP (x, 1)) & 3) == 0)
4109 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
4110 && GET_MODE_SIZE (mode) == 4
4111 && GET_CODE (x) == SYMBOL_REF
4112 && CONSTANT_POOL_ADDRESS_P (x)
4114 && symbol_mentioned_p (get_pool_constant (x))
4115 && ! pcrel_constant_p (get_pool_constant (x))))
4121 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
4122 instruction of mode MODE. */
4124 thumb_legitimate_offset_p (enum machine_mode mode, HOST_WIDE_INT val)
4126 switch (GET_MODE_SIZE (mode))
4129 return val >= 0 && val < 32;
4132 return val >= 0 && val < 64 && (val & 1) == 0;
4136 && (val + GET_MODE_SIZE (mode)) <= 128
4141 /* Build the SYMBOL_REF for __tls_get_addr. */
4143 static GTY(()) rtx tls_get_addr_libfunc;
4146 get_tls_get_addr (void)
4148 if (!tls_get_addr_libfunc)
4149 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
4150 return tls_get_addr_libfunc;
4154 arm_load_tp (rtx target)
4157 target = gen_reg_rtx (SImode);
4161 /* Can return in any reg. */
4162 emit_insn (gen_load_tp_hard (target));
4166 /* Always returned in r0. Immediately copy the result into a pseudo,
4167 otherwise other uses of r0 (e.g. setting up function arguments) may
4168 clobber the value. */
4172 emit_insn (gen_load_tp_soft ());
4174 tmp = gen_rtx_REG (SImode, 0);
4175 emit_move_insn (target, tmp);
4181 load_tls_operand (rtx x, rtx reg)
4185 if (reg == NULL_RTX)
4186 reg = gen_reg_rtx (SImode);
4188 tmp = gen_rtx_CONST (SImode, x);
4190 emit_move_insn (reg, tmp);
4196 arm_call_tls_get_addr (rtx x, rtx reg, rtx *valuep, int reloc)
4198 rtx insns, label, labelno, sum;
4202 labelno = GEN_INT (pic_labelno++);
4203 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
4204 label = gen_rtx_CONST (VOIDmode, label);
4206 sum = gen_rtx_UNSPEC (Pmode,
4207 gen_rtvec (4, x, GEN_INT (reloc), label,
4208 GEN_INT (TARGET_ARM ? 8 : 4)),
4210 reg = load_tls_operand (sum, reg);
4213 emit_insn (gen_pic_add_dot_plus_eight (reg, reg, labelno));
4214 else if (TARGET_THUMB2)
4217 /* Thumb-2 only allows very limited access to the PC. Calculate
4218 the address in a temporary register. */
4219 tmp = gen_reg_rtx (SImode);
4220 emit_insn (gen_pic_load_dot_plus_four (tmp, labelno));
4221 emit_insn (gen_addsi3(reg, reg, tmp));
4223 else /* TARGET_THUMB1 */
4224 emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
4226 *valuep = emit_library_call_value (get_tls_get_addr (), NULL_RTX, LCT_PURE, /* LCT_CONST? */
4227 Pmode, 1, reg, Pmode);
4229 insns = get_insns ();
4236 legitimize_tls_address (rtx x, rtx reg)
4238 rtx dest, tp, label, labelno, sum, insns, ret, eqv, addend;
4239 unsigned int model = SYMBOL_REF_TLS_MODEL (x);
4243 case TLS_MODEL_GLOBAL_DYNAMIC:
4244 insns = arm_call_tls_get_addr (x, reg, &ret, TLS_GD32);
4245 dest = gen_reg_rtx (Pmode);
4246 emit_libcall_block (insns, dest, ret, x);
4249 case TLS_MODEL_LOCAL_DYNAMIC:
4250 insns = arm_call_tls_get_addr (x, reg, &ret, TLS_LDM32);
4252 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
4253 share the LDM result with other LD model accesses. */
4254 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx),
4256 dest = gen_reg_rtx (Pmode);
4257 emit_libcall_block (insns, dest, ret, eqv);
4259 /* Load the addend. */
4260 addend = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (TLS_LDO32)),
4262 addend = force_reg (SImode, gen_rtx_CONST (SImode, addend));
4263 return gen_rtx_PLUS (Pmode, dest, addend);
4265 case TLS_MODEL_INITIAL_EXEC:
4266 labelno = GEN_INT (pic_labelno++);
4267 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
4268 label = gen_rtx_CONST (VOIDmode, label);
4269 sum = gen_rtx_UNSPEC (Pmode,
4270 gen_rtvec (4, x, GEN_INT (TLS_IE32), label,
4271 GEN_INT (TARGET_ARM ? 8 : 4)),
4273 reg = load_tls_operand (sum, reg);
4276 emit_insn (gen_tls_load_dot_plus_eight (reg, reg, labelno));
4277 else if (TARGET_THUMB2)
4280 /* Thumb-2 only allows very limited access to the PC. Calculate
4281 the address in a temporary register. */
4282 tmp = gen_reg_rtx (SImode);
4283 emit_insn (gen_pic_load_dot_plus_four (tmp, labelno));
4284 emit_insn (gen_addsi3(reg, reg, tmp));
4285 emit_move_insn (reg, gen_const_mem (SImode, reg));
4289 emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
4290 emit_move_insn (reg, gen_const_mem (SImode, reg));
4293 tp = arm_load_tp (NULL_RTX);
4295 return gen_rtx_PLUS (Pmode, tp, reg);
4297 case TLS_MODEL_LOCAL_EXEC:
4298 tp = arm_load_tp (NULL_RTX);
4300 reg = gen_rtx_UNSPEC (Pmode,
4301 gen_rtvec (2, x, GEN_INT (TLS_LE32)),
4303 reg = force_reg (SImode, gen_rtx_CONST (SImode, reg));
4305 return gen_rtx_PLUS (Pmode, tp, reg);
4312 /* Try machine-dependent ways of modifying an illegitimate address
4313 to be legitimate. If we find one, return the new, valid address. */
4315 arm_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
4317 if (arm_tls_symbol_p (x))
4318 return legitimize_tls_address (x, NULL_RTX);
4320 if (GET_CODE (x) == PLUS)
4322 rtx xop0 = XEXP (x, 0);
4323 rtx xop1 = XEXP (x, 1);
4325 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
4326 xop0 = force_reg (SImode, xop0);
4328 if (CONSTANT_P (xop1) && !symbol_mentioned_p (xop1))
4329 xop1 = force_reg (SImode, xop1);
4331 if (ARM_BASE_REGISTER_RTX_P (xop0)
4332 && GET_CODE (xop1) == CONST_INT)
4334 HOST_WIDE_INT n, low_n;
4338 /* VFP addressing modes actually allow greater offsets, but for
4339 now we just stick with the lowest common denominator. */
4341 || ((TARGET_SOFT_FLOAT || TARGET_VFP) && mode == DFmode))
4353 low_n = ((mode) == TImode ? 0
4354 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
4358 base_reg = gen_reg_rtx (SImode);
4359 val = force_operand (plus_constant (xop0, n), NULL_RTX);
4360 emit_move_insn (base_reg, val);
4361 x = plus_constant (base_reg, low_n);
4363 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
4364 x = gen_rtx_PLUS (SImode, xop0, xop1);
4367 /* XXX We don't allow MINUS any more -- see comment in
4368 arm_legitimate_address_p (). */
4369 else if (GET_CODE (x) == MINUS)
4371 rtx xop0 = XEXP (x, 0);
4372 rtx xop1 = XEXP (x, 1);
4374 if (CONSTANT_P (xop0))
4375 xop0 = force_reg (SImode, xop0);
4377 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
4378 xop1 = force_reg (SImode, xop1);
4380 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
4381 x = gen_rtx_MINUS (SImode, xop0, xop1);
4384 /* Make sure to take full advantage of the pre-indexed addressing mode
4385 with absolute addresses which often allows for the base register to
4386 be factorized for multiple adjacent memory references, and it might
4387 even allows for the mini pool to be avoided entirely. */
4388 else if (GET_CODE (x) == CONST_INT && optimize > 0)
4391 HOST_WIDE_INT mask, base, index;
4394 /* ldr and ldrb can use a 12-bit index, ldrsb and the rest can only
4395 use a 8-bit index. So let's use a 12-bit index for SImode only and
4396 hope that arm_gen_constant will enable ldrb to use more bits. */
4397 bits = (mode == SImode) ? 12 : 8;
4398 mask = (1 << bits) - 1;
4399 base = INTVAL (x) & ~mask;
4400 index = INTVAL (x) & mask;
4401 if (bit_count (base & 0xffffffff) > (32 - bits)/2)
4403 /* It'll most probably be more efficient to generate the base
4404 with more bits set and use a negative index instead. */
4408 base_reg = force_reg (SImode, GEN_INT (base));
4409 x = plus_constant (base_reg, index);
4414 /* We need to find and carefully transform any SYMBOL and LABEL
4415 references; so go back to the original address expression. */
4416 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
4418 if (new_x != orig_x)
4426 /* Try machine-dependent ways of modifying an illegitimate Thumb address
4427 to be legitimate. If we find one, return the new, valid address. */
4429 thumb_legitimize_address (rtx x, rtx orig_x, enum machine_mode mode)
4431 if (arm_tls_symbol_p (x))
4432 return legitimize_tls_address (x, NULL_RTX);
4434 if (GET_CODE (x) == PLUS
4435 && GET_CODE (XEXP (x, 1)) == CONST_INT
4436 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
4437 || INTVAL (XEXP (x, 1)) < 0))
4439 rtx xop0 = XEXP (x, 0);
4440 rtx xop1 = XEXP (x, 1);
4441 HOST_WIDE_INT offset = INTVAL (xop1);
4443 /* Try and fold the offset into a biasing of the base register and
4444 then offsetting that. Don't do this when optimizing for space
4445 since it can cause too many CSEs. */
4446 if (optimize_size && offset >= 0
4447 && offset < 256 + 31 * GET_MODE_SIZE (mode))
4449 HOST_WIDE_INT delta;
4452 delta = offset - (256 - GET_MODE_SIZE (mode));
4453 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
4454 delta = 31 * GET_MODE_SIZE (mode);
4456 delta = offset & (~31 * GET_MODE_SIZE (mode));
4458 xop0 = force_operand (plus_constant (xop0, offset - delta),
4460 x = plus_constant (xop0, delta);
4462 else if (offset < 0 && offset > -256)
4463 /* Small negative offsets are best done with a subtract before the
4464 dereference, forcing these into a register normally takes two
4466 x = force_operand (x, NULL_RTX);
4469 /* For the remaining cases, force the constant into a register. */
4470 xop1 = force_reg (SImode, xop1);
4471 x = gen_rtx_PLUS (SImode, xop0, xop1);
4474 else if (GET_CODE (x) == PLUS
4475 && s_register_operand (XEXP (x, 1), SImode)
4476 && !s_register_operand (XEXP (x, 0), SImode))
4478 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
4480 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
4485 /* We need to find and carefully transform any SYMBOL and LABEL
4486 references; so go back to the original address expression. */
4487 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
4489 if (new_x != orig_x)
4497 thumb_legitimize_reload_address (rtx *x_p,
4498 enum machine_mode mode,
4499 int opnum, int type,
4500 int ind_levels ATTRIBUTE_UNUSED)
4504 if (GET_CODE (x) == PLUS
4505 && GET_MODE_SIZE (mode) < 4
4506 && REG_P (XEXP (x, 0))
4507 && XEXP (x, 0) == stack_pointer_rtx
4508 && GET_CODE (XEXP (x, 1)) == CONST_INT
4509 && !thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
4514 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
4515 Pmode, VOIDmode, 0, 0, opnum, type);
4519 /* If both registers are hi-regs, then it's better to reload the
4520 entire expression rather than each register individually. That
4521 only requires one reload register rather than two. */
4522 if (GET_CODE (x) == PLUS
4523 && REG_P (XEXP (x, 0))
4524 && REG_P (XEXP (x, 1))
4525 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 0), mode)
4526 && !REG_MODE_OK_FOR_REG_BASE_P (XEXP (x, 1), mode))
4531 push_reload (orig_x, NULL_RTX, x_p, NULL, MODE_BASE_REG_CLASS (mode),
4532 Pmode, VOIDmode, 0, 0, opnum, type);
4539 /* Test for various thread-local symbols. */
4541 /* Return TRUE if X is a thread-local symbol. */
4544 arm_tls_symbol_p (rtx x)
4546 if (! TARGET_HAVE_TLS)
4549 if (GET_CODE (x) != SYMBOL_REF)
4552 return SYMBOL_REF_TLS_MODEL (x) != 0;
4555 /* Helper for arm_tls_referenced_p. */
4558 arm_tls_operand_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
4560 if (GET_CODE (*x) == SYMBOL_REF)
4561 return SYMBOL_REF_TLS_MODEL (*x) != 0;
4563 /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
4564 TLS offsets, not real symbol references. */
4565 if (GET_CODE (*x) == UNSPEC
4566 && XINT (*x, 1) == UNSPEC_TLS)
4572 /* Return TRUE if X contains any TLS symbol references. */
4575 arm_tls_referenced_p (rtx x)
4577 if (! TARGET_HAVE_TLS)
4580 return for_each_rtx (&x, arm_tls_operand_p_1, NULL);
4583 #define REG_OR_SUBREG_REG(X) \
4584 (GET_CODE (X) == REG \
4585 || (GET_CODE (X) == SUBREG && GET_CODE (SUBREG_REG (X)) == REG))
4587 #define REG_OR_SUBREG_RTX(X) \
4588 (GET_CODE (X) == REG ? (X) : SUBREG_REG (X))
4590 #ifndef COSTS_N_INSNS
4591 #define COSTS_N_INSNS(N) ((N) * 4 - 2)
4594 thumb1_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
4596 enum machine_mode mode = GET_MODE (x);
4609 return COSTS_N_INSNS (1);
4612 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
4615 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
4622 return COSTS_N_INSNS (2) + cycles;
4624 return COSTS_N_INSNS (1) + 16;
4627 return (COSTS_N_INSNS (1)
4628 + 4 * ((GET_CODE (SET_SRC (x)) == MEM)
4629 + GET_CODE (SET_DEST (x)) == MEM));
4634 if ((unsigned HOST_WIDE_INT) INTVAL (x) < 256)
4636 if (thumb_shiftable_const (INTVAL (x)))
4637 return COSTS_N_INSNS (2);
4638 return COSTS_N_INSNS (3);
4640 else if ((outer == PLUS || outer == COMPARE)
4641 && INTVAL (x) < 256 && INTVAL (x) > -256)
4643 else if (outer == AND
4644 && INTVAL (x) < 256 && INTVAL (x) >= -256)
4645 return COSTS_N_INSNS (1);
4646 else if (outer == ASHIFT || outer == ASHIFTRT
4647 || outer == LSHIFTRT)
4649 return COSTS_N_INSNS (2);
4655 return COSTS_N_INSNS (3);
4673 /* XXX another guess. */
4674 /* Memory costs quite a lot for the first word, but subsequent words
4675 load at the equivalent of a single insn each. */
4676 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4677 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
4682 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4687 /* XXX still guessing. */
4688 switch (GET_MODE (XEXP (x, 0)))
4691 return (1 + (mode == DImode ? 4 : 0)
4692 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4695 return (4 + (mode == DImode ? 4 : 0)
4696 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4699 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4711 /* Worker routine for arm_rtx_costs. */
4712 /* ??? This needs updating for thumb2. */
4714 arm_rtx_costs_1 (rtx x, enum rtx_code code, enum rtx_code outer)
4716 enum machine_mode mode = GET_MODE (x);
4717 enum rtx_code subcode;
4723 /* Memory costs quite a lot for the first word, but subsequent words
4724 load at the equivalent of a single insn each. */
4725 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
4726 + (GET_CODE (x) == SYMBOL_REF
4727 && CONSTANT_POOL_ADDRESS_P (x) ? 4 : 0));
4733 return optimize_size ? COSTS_N_INSNS (2) : 100;
4736 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4743 case ASHIFT: case LSHIFTRT: case ASHIFTRT:
4745 return (8 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : 8)
4746 + ((GET_CODE (XEXP (x, 0)) == REG
4747 || (GET_CODE (XEXP (x, 0)) == SUBREG
4748 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4750 return (1 + ((GET_CODE (XEXP (x, 0)) == REG
4751 || (GET_CODE (XEXP (x, 0)) == SUBREG
4752 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == REG))
4754 + ((GET_CODE (XEXP (x, 1)) == REG
4755 || (GET_CODE (XEXP (x, 1)) == SUBREG
4756 && GET_CODE (SUBREG_REG (XEXP (x, 1))) == REG)
4757 || (GET_CODE (XEXP (x, 1)) == CONST_INT))
4761 if (GET_CODE (XEXP (x, 1)) == MULT && mode == SImode && arm_arch_thumb2)
4763 extra_cost = rtx_cost (XEXP (x, 1), code);
4764 if (!REG_OR_SUBREG_REG (XEXP (x, 0)))
4765 extra_cost += 4 * ARM_NUM_REGS (mode);
4770 return (4 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 8)
4771 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4772 || (GET_CODE (XEXP (x, 0)) == CONST_INT
4773 && const_ok_for_arm (INTVAL (XEXP (x, 0)))))
4776 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4777 return (2 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4778 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4779 && arm_const_double_rtx (XEXP (x, 1))))
4781 + ((REG_OR_SUBREG_REG (XEXP (x, 0))
4782 || (GET_CODE (XEXP (x, 0)) == CONST_DOUBLE
4783 && arm_const_double_rtx (XEXP (x, 0))))
4786 if (((GET_CODE (XEXP (x, 0)) == CONST_INT
4787 && const_ok_for_arm (INTVAL (XEXP (x, 0)))
4788 && REG_OR_SUBREG_REG (XEXP (x, 1))))
4789 || (((subcode = GET_CODE (XEXP (x, 1))) == ASHIFT
4790 || subcode == ASHIFTRT || subcode == LSHIFTRT
4791 || subcode == ROTATE || subcode == ROTATERT
4793 && GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT
4794 && ((INTVAL (XEXP (XEXP (x, 1), 1)) &
4795 (INTVAL (XEXP (XEXP (x, 1), 1)) - 1)) == 0)))
4796 && REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 0))
4797 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 1), 1))
4798 || GET_CODE (XEXP (XEXP (x, 1), 1)) == CONST_INT)
4799 && REG_OR_SUBREG_REG (XEXP (x, 0))))
4804 if (GET_CODE (XEXP (x, 0)) == MULT)
4806 extra_cost = rtx_cost (XEXP (x, 0), code);
4807 if (!REG_OR_SUBREG_REG (XEXP (x, 1)))
4808 extra_cost += 4 * ARM_NUM_REGS (mode);
4812 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4813 return (2 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4814 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4815 || (GET_CODE (XEXP (x, 1)) == CONST_DOUBLE
4816 && arm_const_double_rtx (XEXP (x, 1))))
4820 case AND: case XOR: case IOR:
4823 /* Normally the frame registers will be spilt into reg+const during
4824 reload, so it is a bad idea to combine them with other instructions,
4825 since then they might not be moved outside of loops. As a compromise
4826 we allow integration with ops that have a constant as their second
4828 if ((REG_OR_SUBREG_REG (XEXP (x, 0))
4829 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))
4830 && GET_CODE (XEXP (x, 1)) != CONST_INT)
4831 || (REG_OR_SUBREG_REG (XEXP (x, 0))
4832 && ARM_FRAME_RTX (REG_OR_SUBREG_RTX (XEXP (x, 0)))))
4836 return (4 + extra_cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 8)
4837 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4838 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4839 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4842 if (REG_OR_SUBREG_REG (XEXP (x, 0)))
4843 return (1 + (GET_CODE (XEXP (x, 1)) == CONST_INT ? 0 : extra_cost)
4844 + ((REG_OR_SUBREG_REG (XEXP (x, 1))
4845 || (GET_CODE (XEXP (x, 1)) == CONST_INT
4846 && const_ok_for_op (INTVAL (XEXP (x, 1)), code)))
4849 else if (REG_OR_SUBREG_REG (XEXP (x, 1)))
4850 return (1 + extra_cost
4851 + ((((subcode = GET_CODE (XEXP (x, 0))) == ASHIFT
4852 || subcode == LSHIFTRT || subcode == ASHIFTRT
4853 || subcode == ROTATE || subcode == ROTATERT
4855 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
4856 && ((INTVAL (XEXP (XEXP (x, 0), 1)) &
4857 (INTVAL (XEXP (XEXP (x, 0), 1)) - 1)) == 0)))
4858 && (REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 0)))
4859 && ((REG_OR_SUBREG_REG (XEXP (XEXP (x, 0), 1)))
4860 || GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT))
4866 /* This should have been handled by the CPU specific routines. */
4870 if (arm_arch3m && mode == SImode
4871 && GET_CODE (XEXP (x, 0)) == LSHIFTRT
4872 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
4873 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0))
4874 == GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)))
4875 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
4876 || GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND))
4881 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
4882 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 6);
4886 return 4 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4888 return 1 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4);
4891 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
4899 return 4 + (mode == DImode ? 4 : 0);
4902 /* ??? value extensions are cheaper on armv6. */
4903 if (GET_MODE (XEXP (x, 0)) == QImode)
4904 return (4 + (mode == DImode ? 4 : 0)
4905 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4908 switch (GET_MODE (XEXP (x, 0)))
4911 return (1 + (mode == DImode ? 4 : 0)
4912 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4915 return (4 + (mode == DImode ? 4 : 0)
4916 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4919 return (1 + (GET_CODE (XEXP (x, 0)) == MEM ? 10 : 0));
4934 if (const_ok_for_arm (INTVAL (x)))
4935 return outer == SET ? 2 : -1;
4936 else if (outer == AND
4937 && const_ok_for_arm (~INTVAL (x)))
4939 else if ((outer == COMPARE
4940 || outer == PLUS || outer == MINUS)
4941 && const_ok_for_arm (-INTVAL (x)))
4952 if (arm_const_double_rtx (x))
4953 return outer == SET ? 2 : -1;
4954 else if ((outer == COMPARE || outer == PLUS)
4955 && neg_const_double_rtx_ok_for_fpa (x))
4964 /* RTX costs when optimizing for size. */
4966 arm_size_rtx_costs (rtx x, int code, int outer_code, int *total)
4968 enum machine_mode mode = GET_MODE (x);
4972 /* XXX TBD. For now, use the standard costs. */
4973 *total = thumb1_rtx_costs (x, code, outer_code);
4980 /* A memory access costs 1 insn if the mode is small, or the address is
4981 a single register, otherwise it costs one insn per word. */
4982 if (REG_P (XEXP (x, 0)))
4983 *total = COSTS_N_INSNS (1);
4985 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
4992 /* Needs a libcall, so it costs about this. */
4993 *total = COSTS_N_INSNS (2);
4997 if (mode == SImode && GET_CODE (XEXP (x, 1)) == REG)
4999 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), code);
5007 if (mode == DImode && GET_CODE (XEXP (x, 1)) == CONST_INT)
5009 *total = COSTS_N_INSNS (3) + rtx_cost (XEXP (x, 0), code);
5012 else if (mode == SImode)
5014 *total = COSTS_N_INSNS (1) + rtx_cost (XEXP (x, 0), code);
5015 /* Slightly disparage register shifts, but not by much. */
5016 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
5017 *total += 1 + rtx_cost (XEXP (x, 1), code);
5021 /* Needs a libcall. */
5022 *total = COSTS_N_INSNS (2);
5026 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
5028 *total = COSTS_N_INSNS (1);
5034 enum rtx_code subcode0 = GET_CODE (XEXP (x, 0));
5035 enum rtx_code subcode1 = GET_CODE (XEXP (x, 1));
5037 if (subcode0 == ROTATE || subcode0 == ROTATERT || subcode0 == ASHIFT
5038 || subcode0 == LSHIFTRT || subcode0 == ASHIFTRT
5039 || subcode1 == ROTATE || subcode1 == ROTATERT
5040 || subcode1 == ASHIFT || subcode1 == LSHIFTRT
5041 || subcode1 == ASHIFTRT)
5043 /* It's just the cost of the two operands. */
5048 *total = COSTS_N_INSNS (1);
5052 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
5056 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
5058 *total = COSTS_N_INSNS (1);
5063 case AND: case XOR: case IOR:
5066 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
5068 if (subcode == ROTATE || subcode == ROTATERT || subcode == ASHIFT
5069 || subcode == LSHIFTRT || subcode == ASHIFTRT
5070 || (code == AND && subcode == NOT))
5072 /* It's just the cost of the two operands. */
5078 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
5082 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
5086 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
5087 *total = COSTS_N_INSNS (1);
5090 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
5099 if (cc_register (XEXP (x, 0), VOIDmode))
5102 *total = COSTS_N_INSNS (1);
5106 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT)
5107 *total = COSTS_N_INSNS (1);
5109 *total = COSTS_N_INSNS (1 + ARM_NUM_REGS (mode));
5114 if (GET_MODE_SIZE (GET_MODE (XEXP (x, 0))) < 4)
5116 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
5117 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
5120 *total += COSTS_N_INSNS (1);
5125 if (!(arm_arch4 && MEM_P (XEXP (x, 0))))
5127 switch (GET_MODE (XEXP (x, 0)))
5130 *total += COSTS_N_INSNS (1);
5134 *total += COSTS_N_INSNS (arm_arch6 ? 1 : 2);
5140 *total += COSTS_N_INSNS (2);
5145 *total += COSTS_N_INSNS (1);
5150 if (const_ok_for_arm (INTVAL (x)))
5151 *total = COSTS_N_INSNS (outer_code == SET ? 1 : 0);
5152 else if (const_ok_for_arm (~INTVAL (x)))
5153 *total = COSTS_N_INSNS (outer_code == AND ? 0 : 1);
5154 else if (const_ok_for_arm (-INTVAL (x)))
5156 if (outer_code == COMPARE || outer_code == PLUS
5157 || outer_code == MINUS)
5160 *total = COSTS_N_INSNS (1);
5163 *total = COSTS_N_INSNS (2);
5169 *total = COSTS_N_INSNS (2);
5173 *total = COSTS_N_INSNS (4);
5177 if (mode != VOIDmode)
5178 *total = COSTS_N_INSNS (ARM_NUM_REGS (mode));
5180 *total = COSTS_N_INSNS (4); /* How knows? */
5185 /* RTX costs for cores with a slow MUL implementation. Thumb-2 is not
5186 supported on any "slowmul" cores, so it can be ignored. */
5189 arm_slowmul_rtx_costs (rtx x, int code, int outer_code, int *total)
5191 enum machine_mode mode = GET_MODE (x);
5195 *total = thumb1_rtx_costs (x, code, outer_code);
5202 if (GET_MODE_CLASS (mode) == MODE_FLOAT
5209 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5211 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
5212 & (unsigned HOST_WIDE_INT) 0xffffffff);
5213 int cost, const_ok = const_ok_for_arm (i);
5214 int j, booth_unit_size;
5216 /* Tune as appropriate. */
5217 cost = const_ok ? 4 : 8;
5218 booth_unit_size = 2;
5219 for (j = 0; i && j < 32; j += booth_unit_size)
5221 i >>= booth_unit_size;
5229 *total = 30 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
5230 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
5234 *total = arm_rtx_costs_1 (x, code, outer_code);
5240 /* RTX cost for cores with a fast multiply unit (M variants). */
5243 arm_fastmul_rtx_costs (rtx x, int code, int outer_code, int *total)
5245 enum machine_mode mode = GET_MODE (x);
5249 *total = thumb1_rtx_costs (x, code, outer_code);
5253 /* ??? should thumb2 use different costs? */
5257 /* There is no point basing this on the tuning, since it is always the
5258 fast variant if it exists at all. */
5260 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
5261 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5262 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
5269 if (GET_MODE_CLASS (mode) == MODE_FLOAT
5276 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5278 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
5279 & (unsigned HOST_WIDE_INT) 0xffffffff);
5280 int cost, const_ok = const_ok_for_arm (i);
5281 int j, booth_unit_size;
5283 /* Tune as appropriate. */
5284 cost = const_ok ? 4 : 8;
5285 booth_unit_size = 8;
5286 for (j = 0; i && j < 32; j += booth_unit_size)
5288 i >>= booth_unit_size;
5296 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
5297 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
5301 *total = arm_rtx_costs_1 (x, code, outer_code);
5307 /* RTX cost for XScale CPUs. Thumb-2 is not supported on any xscale cores,
5308 so it can be ignored. */
5311 arm_xscale_rtx_costs (rtx x, int code, int outer_code, int *total)
5313 enum machine_mode mode = GET_MODE (x);
5317 *total = thumb1_rtx_costs (x, code, outer_code);
5324 /* There is no point basing this on the tuning, since it is always the
5325 fast variant if it exists at all. */
5327 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
5328 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5329 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
5336 if (GET_MODE_CLASS (mode) == MODE_FLOAT
5343 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5345 unsigned HOST_WIDE_INT i = (INTVAL (XEXP (x, 1))
5346 & (unsigned HOST_WIDE_INT) 0xffffffff);
5347 int cost, const_ok = const_ok_for_arm (i);
5348 unsigned HOST_WIDE_INT masked_const;
5350 /* The cost will be related to two insns.
5351 First a load of the constant (MOV or LDR), then a multiply. */
5354 cost += 1; /* LDR is probably more expensive because
5355 of longer result latency. */
5356 masked_const = i & 0xffff8000;
5357 if (masked_const != 0 && masked_const != 0xffff8000)
5359 masked_const = i & 0xf8000000;
5360 if (masked_const == 0 || masked_const == 0xf8000000)
5369 *total = 8 + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : 4)
5370 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : 4);
5374 /* A COMPARE of a MULT is slow on XScale; the muls instruction
5375 will stall until the multiplication is complete. */
5376 if (GET_CODE (XEXP (x, 0)) == MULT)
5377 *total = 4 + rtx_cost (XEXP (x, 0), code);
5379 *total = arm_rtx_costs_1 (x, code, outer_code);
5383 *total = arm_rtx_costs_1 (x, code, outer_code);
5389 /* RTX costs for 9e (and later) cores. */
5392 arm_9e_rtx_costs (rtx x, int code, int outer_code, int *total)
5394 enum machine_mode mode = GET_MODE (x);
5403 *total = COSTS_N_INSNS (3);
5407 *total = thumb1_rtx_costs (x, code, outer_code);
5415 /* There is no point basing this on the tuning, since it is always the
5416 fast variant if it exists at all. */
5418 && (GET_CODE (XEXP (x, 0)) == GET_CODE (XEXP (x, 1)))
5419 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
5420 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
5427 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5444 *total = cost + (REG_OR_SUBREG_REG (XEXP (x, 0)) ? 0 : nonreg_cost)
5445 + (REG_OR_SUBREG_REG (XEXP (x, 1)) ? 0 : nonreg_cost);
5449 *total = arm_rtx_costs_1 (x, code, outer_code);
5453 /* All address computations that can be done are free, but rtx cost returns
5454 the same for practically all of them. So we weight the different types
5455 of address here in the order (most pref first):
5456 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
5458 arm_arm_address_cost (rtx x)
5460 enum rtx_code c = GET_CODE (x);
5462 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
5464 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
5467 if (c == PLUS || c == MINUS)
5469 if (GET_CODE (XEXP (x, 0)) == CONST_INT)
5472 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
5482 arm_thumb_address_cost (rtx x)
5484 enum rtx_code c = GET_CODE (x);
5489 && GET_CODE (XEXP (x, 0)) == REG
5490 && GET_CODE (XEXP (x, 1)) == CONST_INT)
5497 arm_address_cost (rtx x)
5499 return TARGET_32BIT ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
5503 arm_adjust_cost (rtx insn, rtx link, rtx dep, int cost)
5507 /* Some true dependencies can have a higher cost depending
5508 on precisely how certain input operands are used. */
5510 && REG_NOTE_KIND (link) == 0
5511 && recog_memoized (insn) >= 0
5512 && recog_memoized (dep) >= 0)
5514 int shift_opnum = get_attr_shift (insn);
5515 enum attr_type attr_type = get_attr_type (dep);
5517 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
5518 operand for INSN. If we have a shifted input operand and the
5519 instruction we depend on is another ALU instruction, then we may
5520 have to account for an additional stall. */
5521 if (shift_opnum != 0
5522 && (attr_type == TYPE_ALU_SHIFT || attr_type == TYPE_ALU_SHIFT_REG))
5524 rtx shifted_operand;
5527 /* Get the shifted operand. */
5528 extract_insn (insn);
5529 shifted_operand = recog_data.operand[shift_opnum];
5531 /* Iterate over all the operands in DEP. If we write an operand
5532 that overlaps with SHIFTED_OPERAND, then we have increase the
5533 cost of this dependency. */
5535 preprocess_constraints ();
5536 for (opno = 0; opno < recog_data.n_operands; opno++)
5538 /* We can ignore strict inputs. */
5539 if (recog_data.operand_type[opno] == OP_IN)
5542 if (reg_overlap_mentioned_p (recog_data.operand[opno],
5549 /* XXX This is not strictly true for the FPA. */
5550 if (REG_NOTE_KIND (link) == REG_DEP_ANTI
5551 || REG_NOTE_KIND (link) == REG_DEP_OUTPUT)
5554 /* Call insns don't incur a stall, even if they follow a load. */
5555 if (REG_NOTE_KIND (link) == 0
5556 && GET_CODE (insn) == CALL_INSN)
5559 if ((i_pat = single_set (insn)) != NULL
5560 && GET_CODE (SET_SRC (i_pat)) == MEM
5561 && (d_pat = single_set (dep)) != NULL
5562 && GET_CODE (SET_DEST (d_pat)) == MEM)
5564 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
5565 /* This is a load after a store, there is no conflict if the load reads
5566 from a cached area. Assume that loads from the stack, and from the
5567 constant pool are cached, and that others will miss. This is a
5570 if ((GET_CODE (src_mem) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (src_mem))
5571 || reg_mentioned_p (stack_pointer_rtx, src_mem)
5572 || reg_mentioned_p (frame_pointer_rtx, src_mem)
5573 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
5580 static int fp_consts_inited = 0;
5582 /* Only zero is valid for VFP. Other values are also valid for FPA. */
5583 static const char * const strings_fp[8] =
5586 "4", "5", "0.5", "10"
5589 static REAL_VALUE_TYPE values_fp[8];
5592 init_fp_table (void)
5598 fp_consts_inited = 1;
5600 fp_consts_inited = 8;
5602 for (i = 0; i < fp_consts_inited; i++)
5604 r = REAL_VALUE_ATOF (strings_fp[i], DFmode);
5609 /* Return TRUE if rtx X is a valid immediate FP constant. */
5611 arm_const_double_rtx (rtx x)
5616 if (!fp_consts_inited)
5619 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5620 if (REAL_VALUE_MINUS_ZERO (r))
5623 for (i = 0; i < fp_consts_inited; i++)
5624 if (REAL_VALUES_EQUAL (r, values_fp[i]))
5630 /* Return TRUE if rtx X is a valid immediate FPA constant. */
5632 neg_const_double_rtx_ok_for_fpa (rtx x)
5637 if (!fp_consts_inited)
5640 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
5641 r = REAL_VALUE_NEGATE (r);
5642 if (REAL_VALUE_MINUS_ZERO (r))
5645 for (i = 0; i < 8; i++)
5646 if (REAL_VALUES_EQUAL (r, values_fp[i]))
5652 /* Predicates for `match_operand' and `match_operator'. */
5654 /* Return nonzero if OP is a valid Cirrus memory address pattern. */
5656 cirrus_memory_offset (rtx op)
5658 /* Reject eliminable registers. */
5659 if (! (reload_in_progress || reload_completed)
5660 && ( reg_mentioned_p (frame_pointer_rtx, op)
5661 || reg_mentioned_p (arg_pointer_rtx, op)
5662 || reg_mentioned_p (virtual_incoming_args_rtx, op)
5663 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
5664 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
5665 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
5668 if (GET_CODE (op) == MEM)
5674 /* Match: (mem (reg)). */
5675 if (GET_CODE (ind) == REG)
5681 if (GET_CODE (ind) == PLUS
5682 && GET_CODE (XEXP (ind, 0)) == REG
5683 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5684 && GET_CODE (XEXP (ind, 1)) == CONST_INT)
5691 /* Return TRUE if OP is a valid coprocessor memory address pattern.
5692 WB is true if full writeback address modes are allowed and is false
5693 if limited writeback address modes (POST_INC and PRE_DEC) are
5697 arm_coproc_mem_operand (rtx op, bool wb)
5701 /* Reject eliminable registers. */
5702 if (! (reload_in_progress || reload_completed)
5703 && ( reg_mentioned_p (frame_pointer_rtx, op)
5704 || reg_mentioned_p (arg_pointer_rtx, op)
5705 || reg_mentioned_p (virtual_incoming_args_rtx, op)
5706 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
5707 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
5708 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
5711 /* Constants are converted into offsets from labels. */
5712 if (GET_CODE (op) != MEM)
5717 if (reload_completed
5718 && (GET_CODE (ind) == LABEL_REF
5719 || (GET_CODE (ind) == CONST
5720 && GET_CODE (XEXP (ind, 0)) == PLUS
5721 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
5722 && GET_CODE (XEXP (XEXP (ind, 0), 1)) == CONST_INT)))
5725 /* Match: (mem (reg)). */
5726 if (GET_CODE (ind) == REG)
5727 return arm_address_register_rtx_p (ind, 0);
5729 /* Autoincremment addressing modes. POST_INC and PRE_DEC are
5730 acceptable in any case (subject to verification by
5731 arm_address_register_rtx_p). We need WB to be true to accept
5732 PRE_INC and POST_DEC. */
5733 if (GET_CODE (ind) == POST_INC
5734 || GET_CODE (ind) == PRE_DEC
5736 && (GET_CODE (ind) == PRE_INC
5737 || GET_CODE (ind) == POST_DEC)))
5738 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
5741 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
5742 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
5743 && GET_CODE (XEXP (ind, 1)) == PLUS
5744 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
5745 ind = XEXP (ind, 1);
5750 if (GET_CODE (ind) == PLUS
5751 && GET_CODE (XEXP (ind, 0)) == REG
5752 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
5753 && GET_CODE (XEXP (ind, 1)) == CONST_INT
5754 && INTVAL (XEXP (ind, 1)) > -1024
5755 && INTVAL (XEXP (ind, 1)) < 1024
5756 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
5762 /* Return true if X is a register that will be eliminated later on. */
5764 arm_eliminable_register (rtx x)
5766 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
5767 || REGNO (x) == ARG_POINTER_REGNUM
5768 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
5769 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
5772 /* Return GENERAL_REGS if a scratch register required to reload x to/from
5773 coprocessor registers. Otherwise return NO_REGS. */
5776 coproc_secondary_reload_class (enum machine_mode mode, rtx x, bool wb)
5778 if (arm_coproc_mem_operand (x, wb) || s_register_operand (x, mode))
5781 return GENERAL_REGS;
5784 /* Values which must be returned in the most-significant end of the return
5788 arm_return_in_msb (tree valtype)
5790 return (TARGET_AAPCS_BASED
5792 && (AGGREGATE_TYPE_P (valtype)
5793 || TREE_CODE (valtype) == COMPLEX_TYPE));
5796 /* Returns TRUE if INSN is an "LDR REG, ADDR" instruction.
5797 Use by the Cirrus Maverick code which has to workaround
5798 a hardware bug triggered by such instructions. */
5800 arm_memory_load_p (rtx insn)
5802 rtx body, lhs, rhs;;
5804 if (insn == NULL_RTX || GET_CODE (insn) != INSN)
5807 body = PATTERN (insn);
5809 if (GET_CODE (body) != SET)
5812 lhs = XEXP (body, 0);
5813 rhs = XEXP (body, 1);
5815 lhs = REG_OR_SUBREG_RTX (lhs);
5817 /* If the destination is not a general purpose
5818 register we do not have to worry. */
5819 if (GET_CODE (lhs) != REG
5820 || REGNO_REG_CLASS (REGNO (lhs)) != GENERAL_REGS)
5823 /* As well as loads from memory we also have to react
5824 to loads of invalid constants which will be turned
5825 into loads from the minipool. */
5826 return (GET_CODE (rhs) == MEM
5827 || GET_CODE (rhs) == SYMBOL_REF
5828 || note_invalid_constants (insn, -1, false));
5831 /* Return TRUE if INSN is a Cirrus instruction. */
5833 arm_cirrus_insn_p (rtx insn)
5835 enum attr_cirrus attr;
5837 /* get_attr cannot accept USE or CLOBBER. */
5839 || GET_CODE (insn) != INSN
5840 || GET_CODE (PATTERN (insn)) == USE
5841 || GET_CODE (PATTERN (insn)) == CLOBBER)
5844 attr = get_attr_cirrus (insn);
5846 return attr != CIRRUS_NOT;
5849 /* Cirrus reorg for invalid instruction combinations. */
5851 cirrus_reorg (rtx first)
5853 enum attr_cirrus attr;
5854 rtx body = PATTERN (first);
5858 /* Any branch must be followed by 2 non Cirrus instructions. */
5859 if (GET_CODE (first) == JUMP_INSN && GET_CODE (body) != RETURN)
5862 t = next_nonnote_insn (first);
5864 if (arm_cirrus_insn_p (t))
5867 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5871 emit_insn_after (gen_nop (), first);
5876 /* (float (blah)) is in parallel with a clobber. */
5877 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
5878 body = XVECEXP (body, 0, 0);
5880 if (GET_CODE (body) == SET)
5882 rtx lhs = XEXP (body, 0), rhs = XEXP (body, 1);
5884 /* cfldrd, cfldr64, cfstrd, cfstr64 must
5885 be followed by a non Cirrus insn. */
5886 if (get_attr_cirrus (first) == CIRRUS_DOUBLE)
5888 if (arm_cirrus_insn_p (next_nonnote_insn (first)))
5889 emit_insn_after (gen_nop (), first);
5893 else if (arm_memory_load_p (first))
5895 unsigned int arm_regno;
5897 /* Any ldr/cfmvdlr, ldr/cfmvdhr, ldr/cfmvsr, ldr/cfmv64lr,
5898 ldr/cfmv64hr combination where the Rd field is the same
5899 in both instructions must be split with a non Cirrus
5906 /* Get Arm register number for ldr insn. */
5907 if (GET_CODE (lhs) == REG)
5908 arm_regno = REGNO (lhs);
5911 gcc_assert (GET_CODE (rhs) == REG);
5912 arm_regno = REGNO (rhs);
5916 first = next_nonnote_insn (first);
5918 if (! arm_cirrus_insn_p (first))
5921 body = PATTERN (first);
5923 /* (float (blah)) is in parallel with a clobber. */
5924 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0))
5925 body = XVECEXP (body, 0, 0);
5927 if (GET_CODE (body) == FLOAT)
5928 body = XEXP (body, 0);
5930 if (get_attr_cirrus (first) == CIRRUS_MOVE
5931 && GET_CODE (XEXP (body, 1)) == REG
5932 && arm_regno == REGNO (XEXP (body, 1)))
5933 emit_insn_after (gen_nop (), first);
5939 /* get_attr cannot accept USE or CLOBBER. */
5941 || GET_CODE (first) != INSN
5942 || GET_CODE (PATTERN (first)) == USE
5943 || GET_CODE (PATTERN (first)) == CLOBBER)
5946 attr = get_attr_cirrus (first);
5948 /* Any coprocessor compare instruction (cfcmps, cfcmpd, ...)
5949 must be followed by a non-coprocessor instruction. */
5950 if (attr == CIRRUS_COMPARE)
5954 t = next_nonnote_insn (first);
5956 if (arm_cirrus_insn_p (t))
5959 if (arm_cirrus_insn_p (next_nonnote_insn (t)))
5963 emit_insn_after (gen_nop (), first);
5969 /* Return TRUE if X references a SYMBOL_REF. */
5971 symbol_mentioned_p (rtx x)
5976 if (GET_CODE (x) == SYMBOL_REF)
5979 /* UNSPEC_TLS entries for a symbol include the SYMBOL_REF, but they
5980 are constant offsets, not symbols. */
5981 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
5984 fmt = GET_RTX_FORMAT (GET_CODE (x));
5986 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
5992 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
5993 if (symbol_mentioned_p (XVECEXP (x, i, j)))
5996 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
6003 /* Return TRUE if X references a LABEL_REF. */
6005 label_mentioned_p (rtx x)
6010 if (GET_CODE (x) == LABEL_REF)
6013 /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the referencing
6014 instruction, but they are constant offsets, not symbols. */
6015 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
6018 fmt = GET_RTX_FORMAT (GET_CODE (x));
6019 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
6025 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
6026 if (label_mentioned_p (XVECEXP (x, i, j)))
6029 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
6037 tls_mentioned_p (rtx x)
6039 switch (GET_CODE (x))
6042 return tls_mentioned_p (XEXP (x, 0));
6045 if (XINT (x, 1) == UNSPEC_TLS)
6053 /* Must not copy a SET whose source operand is PC-relative. */
6056 arm_cannot_copy_insn_p (rtx insn)
6058 rtx pat = PATTERN (insn);
6060 if (GET_CODE (pat) == PARALLEL
6061 && GET_CODE (XVECEXP (pat, 0, 0)) == SET)
6063 rtx rhs = SET_SRC (XVECEXP (pat, 0, 0));
6065 if (GET_CODE (rhs) == UNSPEC
6066 && XINT (rhs, 1) == UNSPEC_PIC_BASE)
6069 if (GET_CODE (rhs) == MEM
6070 && GET_CODE (XEXP (rhs, 0)) == UNSPEC
6071 && XINT (XEXP (rhs, 0), 1) == UNSPEC_PIC_BASE)
6081 enum rtx_code code = GET_CODE (x);
6098 /* Return 1 if memory locations are adjacent. */
6100 adjacent_mem_locations (rtx a, rtx b)
6102 /* We don't guarantee to preserve the order of these memory refs. */
6103 if (volatile_refs_p (a) || volatile_refs_p (b))
6106 if ((GET_CODE (XEXP (a, 0)) == REG
6107 || (GET_CODE (XEXP (a, 0)) == PLUS
6108 && GET_CODE (XEXP (XEXP (a, 0), 1)) == CONST_INT))
6109 && (GET_CODE (XEXP (b, 0)) == REG
6110 || (GET_CODE (XEXP (b, 0)) == PLUS
6111 && GET_CODE (XEXP (XEXP (b, 0), 1)) == CONST_INT)))
6113 HOST_WIDE_INT val0 = 0, val1 = 0;
6117 if (GET_CODE (XEXP (a, 0)) == PLUS)
6119 reg0 = XEXP (XEXP (a, 0), 0);
6120 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
6125 if (GET_CODE (XEXP (b, 0)) == PLUS)
6127 reg1 = XEXP (XEXP (b, 0), 0);
6128 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
6133 /* Don't accept any offset that will require multiple
6134 instructions to handle, since this would cause the
6135 arith_adjacentmem pattern to output an overlong sequence. */
6136 if (!const_ok_for_op (PLUS, val0) || !const_ok_for_op (PLUS, val1))
6139 /* Don't allow an eliminable register: register elimination can make
6140 the offset too large. */
6141 if (arm_eliminable_register (reg0))
6144 val_diff = val1 - val0;
6148 /* If the target has load delay slots, then there's no benefit
6149 to using an ldm instruction unless the offset is zero and
6150 we are optimizing for size. */
6151 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
6152 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
6153 && (val_diff == 4 || val_diff == -4));
6156 return ((REGNO (reg0) == REGNO (reg1))
6157 && (val_diff == 4 || val_diff == -4));
6164 load_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
6165 HOST_WIDE_INT *load_offset)
6167 int unsorted_regs[4];
6168 HOST_WIDE_INT unsorted_offsets[4];
6173 /* Can only handle 2, 3, or 4 insns at present,
6174 though could be easily extended if required. */
6175 gcc_assert (nops >= 2 && nops <= 4);
6177 /* Loop over the operands and check that the memory references are
6178 suitable (i.e. immediate offsets from the same base register). At
6179 the same time, extract the target register, and the memory
6181 for (i = 0; i < nops; i++)
6186 /* Convert a subreg of a mem into the mem itself. */
6187 if (GET_CODE (operands[nops + i]) == SUBREG)
6188 operands[nops + i] = alter_subreg (operands + (nops + i));
6190 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
6192 /* Don't reorder volatile memory references; it doesn't seem worth
6193 looking for the case where the order is ok anyway. */
6194 if (MEM_VOLATILE_P (operands[nops + i]))
6197 offset = const0_rtx;
6199 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
6200 || (GET_CODE (reg) == SUBREG
6201 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6202 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
6203 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
6205 || (GET_CODE (reg) == SUBREG
6206 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6207 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
6212 base_reg = REGNO (reg);
6213 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
6214 ? REGNO (operands[i])
6215 : REGNO (SUBREG_REG (operands[i])));
6220 if (base_reg != (int) REGNO (reg))
6221 /* Not addressed from the same base register. */
6224 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
6225 ? REGNO (operands[i])
6226 : REGNO (SUBREG_REG (operands[i])));
6227 if (unsorted_regs[i] < unsorted_regs[order[0]])
6231 /* If it isn't an integer register, or if it overwrites the
6232 base register but isn't the last insn in the list, then
6233 we can't do this. */
6234 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14
6235 || (i != nops - 1 && unsorted_regs[i] == base_reg))
6238 unsorted_offsets[i] = INTVAL (offset);
6241 /* Not a suitable memory address. */
6245 /* All the useful information has now been extracted from the
6246 operands into unsorted_regs and unsorted_offsets; additionally,
6247 order[0] has been set to the lowest numbered register in the
6248 list. Sort the registers into order, and check that the memory
6249 offsets are ascending and adjacent. */
6251 for (i = 1; i < nops; i++)
6255 order[i] = order[i - 1];
6256 for (j = 0; j < nops; j++)
6257 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
6258 && (order[i] == order[i - 1]
6259 || unsorted_regs[j] < unsorted_regs[order[i]]))
6262 /* Have we found a suitable register? if not, one must be used more
6264 if (order[i] == order[i - 1])
6267 /* Is the memory address adjacent and ascending? */
6268 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
6276 for (i = 0; i < nops; i++)
6277 regs[i] = unsorted_regs[order[i]];
6279 *load_offset = unsorted_offsets[order[0]];
6282 if (unsorted_offsets[order[0]] == 0)
6283 return 1; /* ldmia */
6285 if (TARGET_ARM && unsorted_offsets[order[0]] == 4)
6286 return 2; /* ldmib */
6288 if (TARGET_ARM && unsorted_offsets[order[nops - 1]] == 0)
6289 return 3; /* ldmda */
6291 if (unsorted_offsets[order[nops - 1]] == -4)
6292 return 4; /* ldmdb */
6294 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
6295 if the offset isn't small enough. The reason 2 ldrs are faster
6296 is because these ARMs are able to do more than one cache access
6297 in a single cycle. The ARM9 and StrongARM have Harvard caches,
6298 whilst the ARM8 has a double bandwidth cache. This means that
6299 these cores can do both an instruction fetch and a data fetch in
6300 a single cycle, so the trick of calculating the address into a
6301 scratch register (one of the result regs) and then doing a load
6302 multiple actually becomes slower (and no smaller in code size).
6303 That is the transformation
6305 ldr rd1, [rbase + offset]
6306 ldr rd2, [rbase + offset + 4]
6310 add rd1, rbase, offset
6311 ldmia rd1, {rd1, rd2}
6313 produces worse code -- '3 cycles + any stalls on rd2' instead of
6314 '2 cycles + any stalls on rd2'. On ARMs with only one cache
6315 access per cycle, the first sequence could never complete in less
6316 than 6 cycles, whereas the ldm sequence would only take 5 and
6317 would make better use of sequential accesses if not hitting the
6320 We cheat here and test 'arm_ld_sched' which we currently know to
6321 only be true for the ARM8, ARM9 and StrongARM. If this ever
6322 changes, then the test below needs to be reworked. */
6323 if (nops == 2 && arm_ld_sched)
6326 /* Can't do it without setting up the offset, only do this if it takes
6327 no more than one insn. */
6328 return (const_ok_for_arm (unsorted_offsets[order[0]])
6329 || const_ok_for_arm (-unsorted_offsets[order[0]])) ? 5 : 0;
6333 emit_ldm_seq (rtx *operands, int nops)
6337 HOST_WIDE_INT offset;
6341 switch (load_multiple_sequence (operands, nops, regs, &base_reg, &offset))
6344 strcpy (buf, "ldm%(ia%)\t");
6348 strcpy (buf, "ldm%(ib%)\t");
6352 strcpy (buf, "ldm%(da%)\t");
6356 strcpy (buf, "ldm%(db%)\t");
6361 sprintf (buf, "add%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
6362 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
6365 sprintf (buf, "sub%%?\t%s%s, %s%s, #%ld", REGISTER_PREFIX,
6366 reg_names[regs[0]], REGISTER_PREFIX, reg_names[base_reg],
6368 output_asm_insn (buf, operands);
6370 strcpy (buf, "ldm%(ia%)\t");
6377 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
6378 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
6380 for (i = 1; i < nops; i++)
6381 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
6382 reg_names[regs[i]]);
6384 strcat (buf, "}\t%@ phole ldm");
6386 output_asm_insn (buf, operands);
6391 store_multiple_sequence (rtx *operands, int nops, int *regs, int *base,
6392 HOST_WIDE_INT * load_offset)
6394 int unsorted_regs[4];
6395 HOST_WIDE_INT unsorted_offsets[4];
6400 /* Can only handle 2, 3, or 4 insns at present, though could be easily
6401 extended if required. */
6402 gcc_assert (nops >= 2 && nops <= 4);
6404 /* Loop over the operands and check that the memory references are
6405 suitable (i.e. immediate offsets from the same base register). At
6406 the same time, extract the target register, and the memory
6408 for (i = 0; i < nops; i++)
6413 /* Convert a subreg of a mem into the mem itself. */
6414 if (GET_CODE (operands[nops + i]) == SUBREG)
6415 operands[nops + i] = alter_subreg (operands + (nops + i));
6417 gcc_assert (GET_CODE (operands[nops + i]) == MEM);
6419 /* Don't reorder volatile memory references; it doesn't seem worth
6420 looking for the case where the order is ok anyway. */
6421 if (MEM_VOLATILE_P (operands[nops + i]))
6424 offset = const0_rtx;
6426 if ((GET_CODE (reg = XEXP (operands[nops + i], 0)) == REG
6427 || (GET_CODE (reg) == SUBREG
6428 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6429 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
6430 && ((GET_CODE (reg = XEXP (XEXP (operands[nops + i], 0), 0))
6432 || (GET_CODE (reg) == SUBREG
6433 && GET_CODE (reg = SUBREG_REG (reg)) == REG))
6434 && (GET_CODE (offset = XEXP (XEXP (operands[nops + i], 0), 1))
6439 base_reg = REGNO (reg);
6440 unsorted_regs[0] = (GET_CODE (operands[i]) == REG
6441 ? REGNO (operands[i])
6442 : REGNO (SUBREG_REG (operands[i])));
6447 if (base_reg != (int) REGNO (reg))
6448 /* Not addressed from the same base register. */
6451 unsorted_regs[i] = (GET_CODE (operands[i]) == REG
6452 ? REGNO (operands[i])
6453 : REGNO (SUBREG_REG (operands[i])));
6454 if (unsorted_regs[i] < unsorted_regs[order[0]])
6458 /* If it isn't an integer register, then we can't do this. */
6459 if (unsorted_regs[i] < 0 || unsorted_regs[i] > 14)
6462 unsorted_offsets[i] = INTVAL (offset);
6465 /* Not a suitable memory address. */
6469 /* All the useful information has now been extracted from the
6470 operands into unsorted_regs and unsorted_offsets; additionally,
6471 order[0] has been set to the lowest numbered register in the
6472 list. Sort the registers into order, and check that the memory
6473 offsets are ascending and adjacent. */
6475 for (i = 1; i < nops; i++)
6479 order[i] = order[i - 1];
6480 for (j = 0; j < nops; j++)
6481 if (unsorted_regs[j] > unsorted_regs[order[i - 1]]
6482 && (order[i] == order[i - 1]
6483 || unsorted_regs[j] < unsorted_regs[order[i]]))
6486 /* Have we found a suitable register? if not, one must be used more
6488 if (order[i] == order[i - 1])
6491 /* Is the memory address adjacent and ascending? */
6492 if (unsorted_offsets[order[i]] != unsorted_offsets[order[i - 1]] + 4)
6500 for (i = 0; i < nops; i++)
6501 regs[i] = unsorted_regs[order[i]];
6503 *load_offset = unsorted_offsets[order[0]];
6506 if (unsorted_offsets[order[0]] == 0)
6507 return 1; /* stmia */
6509 if (unsorted_offsets[order[0]] == 4)
6510 return 2; /* stmib */
6512 if (unsorted_offsets[order[nops - 1]] == 0)
6513 return 3; /* stmda */
6515 if (unsorted_offsets[order[nops - 1]] == -4)
6516 return 4; /* stmdb */
6522 emit_stm_seq (rtx *operands, int nops)
6526 HOST_WIDE_INT offset;
6530 switch (store_multiple_sequence (operands, nops, regs, &base_reg, &offset))
6533 strcpy (buf, "stm%(ia%)\t");
6537 strcpy (buf, "stm%(ib%)\t");
6541 strcpy (buf, "stm%(da%)\t");
6545 strcpy (buf, "stm%(db%)\t");
6552 sprintf (buf + strlen (buf), "%s%s, {%s%s", REGISTER_PREFIX,
6553 reg_names[base_reg], REGISTER_PREFIX, reg_names[regs[0]]);
6555 for (i = 1; i < nops; i++)
6556 sprintf (buf + strlen (buf), ", %s%s", REGISTER_PREFIX,
6557 reg_names[regs[i]]);
6559 strcat (buf, "}\t%@ phole stm");
6561 output_asm_insn (buf, operands);
6565 /* Routines for use in generating RTL. */
6568 arm_gen_load_multiple (int base_regno, int count, rtx from, int up,
6569 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
6571 HOST_WIDE_INT offset = *offsetp;
6574 int sign = up ? 1 : -1;
6577 /* XScale has load-store double instructions, but they have stricter
6578 alignment requirements than load-store multiple, so we cannot
6581 For XScale ldm requires 2 + NREGS cycles to complete and blocks
6582 the pipeline until completion.
6590 An ldr instruction takes 1-3 cycles, but does not block the
6599 Best case ldr will always win. However, the more ldr instructions
6600 we issue, the less likely we are to be able to schedule them well.
6601 Using ldr instructions also increases code size.
6603 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
6604 for counts of 3 or 4 regs. */
6605 if (arm_tune_xscale && count <= 2 && ! optimize_size)
6611 for (i = 0; i < count; i++)
6613 addr = plus_constant (from, i * 4 * sign);
6614 mem = adjust_automodify_address (basemem, SImode, addr, offset);
6615 emit_move_insn (gen_rtx_REG (SImode, base_regno + i), mem);
6621 emit_move_insn (from, plus_constant (from, count * 4 * sign));
6631 result = gen_rtx_PARALLEL (VOIDmode,
6632 rtvec_alloc (count + (write_back ? 1 : 0)));
6635 XVECEXP (result, 0, 0)
6636 = gen_rtx_SET (VOIDmode, from, plus_constant (from, count * 4 * sign));
6641 for (j = 0; i < count; i++, j++)
6643 addr = plus_constant (from, j * 4 * sign);
6644 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
6645 XVECEXP (result, 0, i)
6646 = gen_rtx_SET (VOIDmode, gen_rtx_REG (SImode, base_regno + j), mem);
6657 arm_gen_store_multiple (int base_regno, int count, rtx to, int up,
6658 int write_back, rtx basemem, HOST_WIDE_INT *offsetp)
6660 HOST_WIDE_INT offset = *offsetp;
6663 int sign = up ? 1 : -1;
6666 /* See arm_gen_load_multiple for discussion of
6667 the pros/cons of ldm/stm usage for XScale. */
6668 if (arm_tune_xscale && count <= 2 && ! optimize_size)
6674 for (i = 0; i < count; i++)
6676 addr = plus_constant (to, i * 4 * sign);
6677 mem = adjust_automodify_address (basemem, SImode, addr, offset);
6678 emit_move_insn (mem, gen_rtx_REG (SImode, base_regno + i));
6684 emit_move_insn (to, plus_constant (to, count * 4 * sign));
6694 result = gen_rtx_PARALLEL (VOIDmode,
6695 rtvec_alloc (count + (write_back ? 1 : 0)));
6698 XVECEXP (result, 0, 0)
6699 = gen_rtx_SET (VOIDmode, to,
6700 plus_constant (to, count * 4 * sign));
6705 for (j = 0; i < count; i++, j++)
6707 addr = plus_constant (to, j * 4 * sign);
6708 mem = adjust_automodify_address_nv (basemem, SImode, addr, offset);
6709 XVECEXP (result, 0, i)
6710 = gen_rtx_SET (VOIDmode, mem, gen_rtx_REG (SImode, base_regno + j));
6721 arm_gen_movmemqi (rtx *operands)
6723 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
6724 HOST_WIDE_INT srcoffset, dstoffset;
6726 rtx src, dst, srcbase, dstbase;
6727 rtx part_bytes_reg = NULL;
6730 if (GET_CODE (operands[2]) != CONST_INT
6731 || GET_CODE (operands[3]) != CONST_INT
6732 || INTVAL (operands[2]) > 64
6733 || INTVAL (operands[3]) & 3)
6736 dstbase = operands[0];
6737 srcbase = operands[1];
6739 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
6740 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
6742 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
6743 out_words_to_go = INTVAL (operands[2]) / 4;
6744 last_bytes = INTVAL (operands[2]) & 3;
6745 dstoffset = srcoffset = 0;
6747 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
6748 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
6750 for (i = 0; in_words_to_go >= 2; i+=4)
6752 if (in_words_to_go > 4)
6753 emit_insn (arm_gen_load_multiple (0, 4, src, TRUE, TRUE,
6754 srcbase, &srcoffset));
6756 emit_insn (arm_gen_load_multiple (0, in_words_to_go, src, TRUE,
6757 FALSE, srcbase, &srcoffset));
6759 if (out_words_to_go)
6761 if (out_words_to_go > 4)
6762 emit_insn (arm_gen_store_multiple (0, 4, dst, TRUE, TRUE,
6763 dstbase, &dstoffset));
6764 else if (out_words_to_go != 1)
6765 emit_insn (arm_gen_store_multiple (0, out_words_to_go,
6769 dstbase, &dstoffset));
6772 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6773 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
6774 if (last_bytes != 0)
6776 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
6782 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
6783 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
6786 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
6787 if (out_words_to_go)
6791 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6792 sreg = copy_to_reg (mem);
6794 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
6795 emit_move_insn (mem, sreg);
6798 gcc_assert (!in_words_to_go); /* Sanity check */
6803 gcc_assert (in_words_to_go > 0);
6805 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
6806 part_bytes_reg = copy_to_mode_reg (SImode, mem);
6809 gcc_assert (!last_bytes || part_bytes_reg);
6811 if (BYTES_BIG_ENDIAN && last_bytes)
6813 rtx tmp = gen_reg_rtx (SImode);
6815 /* The bytes we want are in the top end of the word. */
6816 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
6817 GEN_INT (8 * (4 - last_bytes))));
6818 part_bytes_reg = tmp;
6822 mem = adjust_automodify_address (dstbase, QImode,
6823 plus_constant (dst, last_bytes - 1),
6824 dstoffset + last_bytes - 1);
6825 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6829 tmp = gen_reg_rtx (SImode);
6830 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
6831 part_bytes_reg = tmp;
6840 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
6841 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
6845 rtx tmp = gen_reg_rtx (SImode);
6846 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
6847 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
6848 part_bytes_reg = tmp;
6855 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
6856 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
6863 /* Select a dominance comparison mode if possible for a test of the general
6864 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
6865 COND_OR == DOM_CC_X_AND_Y => (X && Y)
6866 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
6867 COND_OR == DOM_CC_X_OR_Y => (X || Y)
6868 In all cases OP will be either EQ or NE, but we don't need to know which
6869 here. If we are unable to support a dominance comparison we return
6870 CC mode. This will then fail to match for the RTL expressions that
6871 generate this call. */
6873 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
6875 enum rtx_code cond1, cond2;
6878 /* Currently we will probably get the wrong result if the individual
6879 comparisons are not simple. This also ensures that it is safe to
6880 reverse a comparison if necessary. */
6881 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
6883 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
6887 /* The if_then_else variant of this tests the second condition if the
6888 first passes, but is true if the first fails. Reverse the first
6889 condition to get a true "inclusive-or" expression. */
6890 if (cond_or == DOM_CC_NX_OR_Y)
6891 cond1 = reverse_condition (cond1);
6893 /* If the comparisons are not equal, and one doesn't dominate the other,
6894 then we can't do this. */
6896 && !comparison_dominates_p (cond1, cond2)
6897 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
6902 enum rtx_code temp = cond1;
6910 if (cond_or == DOM_CC_X_AND_Y)
6915 case EQ: return CC_DEQmode;
6916 case LE: return CC_DLEmode;
6917 case LEU: return CC_DLEUmode;
6918 case GE: return CC_DGEmode;
6919 case GEU: return CC_DGEUmode;
6920 default: gcc_unreachable ();
6924 if (cond_or == DOM_CC_X_AND_Y)
6940 if (cond_or == DOM_CC_X_AND_Y)
6956 if (cond_or == DOM_CC_X_AND_Y)
6972 if (cond_or == DOM_CC_X_AND_Y)
6987 /* The remaining cases only occur when both comparisons are the
6990 gcc_assert (cond1 == cond2);
6994 gcc_assert (cond1 == cond2);
6998 gcc_assert (cond1 == cond2);
7002 gcc_assert (cond1 == cond2);
7006 gcc_assert (cond1 == cond2);
7015 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
7017 /* All floating point compares return CCFP if it is an equality
7018 comparison, and CCFPE otherwise. */
7019 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
7039 if (TARGET_HARD_FLOAT && TARGET_MAVERICK)
7048 /* A compare with a shifted operand. Because of canonicalization, the
7049 comparison will have to be swapped when we emit the assembler. */
7050 if (GET_MODE (y) == SImode && GET_CODE (y) == REG
7051 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
7052 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
7053 || GET_CODE (x) == ROTATERT))
7056 /* This operation is performed swapped, but since we only rely on the Z
7057 flag we don't need an additional mode. */
7058 if (GET_MODE (y) == SImode && REG_P (y)
7059 && GET_CODE (x) == NEG
7060 && (op == EQ || op == NE))
7063 /* This is a special case that is used by combine to allow a
7064 comparison of a shifted byte load to be split into a zero-extend
7065 followed by a comparison of the shifted integer (only valid for
7066 equalities and unsigned inequalities). */
7067 if (GET_MODE (x) == SImode
7068 && GET_CODE (x) == ASHIFT
7069 && GET_CODE (XEXP (x, 1)) == CONST_INT && INTVAL (XEXP (x, 1)) == 24
7070 && GET_CODE (XEXP (x, 0)) == SUBREG
7071 && GET_CODE (SUBREG_REG (XEXP (x, 0))) == MEM
7072 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
7073 && (op == EQ || op == NE
7074 || op == GEU || op == GTU || op == LTU || op == LEU)
7075 && GET_CODE (y) == CONST_INT)
7078 /* A construct for a conditional compare, if the false arm contains
7079 0, then both conditions must be true, otherwise either condition
7080 must be true. Not all conditions are possible, so CCmode is
7081 returned if it can't be done. */
7082 if (GET_CODE (x) == IF_THEN_ELSE
7083 && (XEXP (x, 2) == const0_rtx
7084 || XEXP (x, 2) == const1_rtx)
7085 && COMPARISON_P (XEXP (x, 0))
7086 && COMPARISON_P (XEXP (x, 1)))
7087 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
7088 INTVAL (XEXP (x, 2)));
7090 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
7091 if (GET_CODE (x) == AND
7092 && COMPARISON_P (XEXP (x, 0))
7093 && COMPARISON_P (XEXP (x, 1)))
7094 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
7097 if (GET_CODE (x) == IOR
7098 && COMPARISON_P (XEXP (x, 0))
7099 && COMPARISON_P (XEXP (x, 1)))
7100 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
7103 /* An operation (on Thumb) where we want to test for a single bit.
7104 This is done by shifting that bit up into the top bit of a
7105 scratch register; we can then branch on the sign bit. */
7107 && GET_MODE (x) == SImode
7108 && (op == EQ || op == NE)
7109 && GET_CODE (x) == ZERO_EXTRACT
7110 && XEXP (x, 1) == const1_rtx)
7113 /* An operation that sets the condition codes as a side-effect, the
7114 V flag is not set correctly, so we can only use comparisons where
7115 this doesn't matter. (For LT and GE we can use "mi" and "pl"
7117 /* ??? Does the ZERO_EXTRACT case really apply to thumb2? */
7118 if (GET_MODE (x) == SImode
7120 && (op == EQ || op == NE || op == LT || op == GE)
7121 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
7122 || GET_CODE (x) == AND || GET_CODE (x) == IOR
7123 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
7124 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
7125 || GET_CODE (x) == LSHIFTRT
7126 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
7127 || GET_CODE (x) == ROTATERT
7128 || (TARGET_32BIT && GET_CODE (x) == ZERO_EXTRACT)))
7131 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
7134 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
7135 && GET_CODE (x) == PLUS
7136 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
7142 /* X and Y are two things to compare using CODE. Emit the compare insn and
7143 return the rtx for register 0 in the proper mode. FP means this is a
7144 floating point compare: I don't think that it is needed on the arm. */
7146 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y)
7148 enum machine_mode mode = SELECT_CC_MODE (code, x, y);
7149 rtx cc_reg = gen_rtx_REG (mode, CC_REGNUM);
7151 emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y));
7156 /* Generate a sequence of insns that will generate the correct return
7157 address mask depending on the physical architecture that the program
7160 arm_gen_return_addr_mask (void)
7162 rtx reg = gen_reg_rtx (Pmode);
7164 emit_insn (gen_return_addr_mask (reg));
7169 arm_reload_in_hi (rtx *operands)
7171 rtx ref = operands[1];
7173 HOST_WIDE_INT offset = 0;
7175 if (GET_CODE (ref) == SUBREG)
7177 offset = SUBREG_BYTE (ref);
7178 ref = SUBREG_REG (ref);
7181 if (GET_CODE (ref) == REG)
7183 /* We have a pseudo which has been spilt onto the stack; there
7184 are two cases here: the first where there is a simple
7185 stack-slot replacement and a second where the stack-slot is
7186 out of range, or is used as a subreg. */
7187 if (reg_equiv_mem[REGNO (ref)])
7189 ref = reg_equiv_mem[REGNO (ref)];
7190 base = find_replacement (&XEXP (ref, 0));
7193 /* The slot is out of range, or was dressed up in a SUBREG. */
7194 base = reg_equiv_address[REGNO (ref)];
7197 base = find_replacement (&XEXP (ref, 0));
7199 /* Handle the case where the address is too complex to be offset by 1. */
7200 if (GET_CODE (base) == MINUS
7201 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
7203 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
7205 emit_set_insn (base_plus, base);
7208 else if (GET_CODE (base) == PLUS)
7210 /* The addend must be CONST_INT, or we would have dealt with it above. */
7211 HOST_WIDE_INT hi, lo;
7213 offset += INTVAL (XEXP (base, 1));
7214 base = XEXP (base, 0);
7216 /* Rework the address into a legal sequence of insns. */
7217 /* Valid range for lo is -4095 -> 4095 */
7220 : -((-offset) & 0xfff));
7222 /* Corner case, if lo is the max offset then we would be out of range
7223 once we have added the additional 1 below, so bump the msb into the
7224 pre-loading insn(s). */
7228 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
7229 ^ (HOST_WIDE_INT) 0x80000000)
7230 - (HOST_WIDE_INT) 0x80000000);
7232 gcc_assert (hi + lo == offset);
7236 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
7238 /* Get the base address; addsi3 knows how to handle constants
7239 that require more than one insn. */
7240 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
7246 /* Operands[2] may overlap operands[0] (though it won't overlap
7247 operands[1]), that's why we asked for a DImode reg -- so we can
7248 use the bit that does not overlap. */
7249 if (REGNO (operands[2]) == REGNO (operands[0]))
7250 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
7252 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
7254 emit_insn (gen_zero_extendqisi2 (scratch,
7255 gen_rtx_MEM (QImode,
7256 plus_constant (base,
7258 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
7259 gen_rtx_MEM (QImode,
7260 plus_constant (base,
7262 if (!BYTES_BIG_ENDIAN)
7263 emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
7264 gen_rtx_IOR (SImode,
7267 gen_rtx_SUBREG (SImode, operands[0], 0),
7271 emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
7272 gen_rtx_IOR (SImode,
7273 gen_rtx_ASHIFT (SImode, scratch,
7275 gen_rtx_SUBREG (SImode, operands[0], 0)));
7278 /* Handle storing a half-word to memory during reload by synthesizing as two
7279 byte stores. Take care not to clobber the input values until after we
7280 have moved them somewhere safe. This code assumes that if the DImode
7281 scratch in operands[2] overlaps either the input value or output address
7282 in some way, then that value must die in this insn (we absolutely need
7283 two scratch registers for some corner cases). */
7285 arm_reload_out_hi (rtx *operands)
7287 rtx ref = operands[0];
7288 rtx outval = operands[1];
7290 HOST_WIDE_INT offset = 0;
7292 if (GET_CODE (ref) == SUBREG)
7294 offset = SUBREG_BYTE (ref);
7295 ref = SUBREG_REG (ref);
7298 if (GET_CODE (ref) == REG)
7300 /* We have a pseudo which has been spilt onto the stack; there
7301 are two cases here: the first where there is a simple
7302 stack-slot replacement and a second where the stack-slot is
7303 out of range, or is used as a subreg. */
7304 if (reg_equiv_mem[REGNO (ref)])
7306 ref = reg_equiv_mem[REGNO (ref)];
7307 base = find_replacement (&XEXP (ref, 0));
7310 /* The slot is out of range, or was dressed up in a SUBREG. */
7311 base = reg_equiv_address[REGNO (ref)];
7314 base = find_replacement (&XEXP (ref, 0));
7316 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
7318 /* Handle the case where the address is too complex to be offset by 1. */
7319 if (GET_CODE (base) == MINUS
7320 || (GET_CODE (base) == PLUS && GET_CODE (XEXP (base, 1)) != CONST_INT))
7322 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
7324 /* Be careful not to destroy OUTVAL. */
7325 if (reg_overlap_mentioned_p (base_plus, outval))
7327 /* Updating base_plus might destroy outval, see if we can
7328 swap the scratch and base_plus. */
7329 if (!reg_overlap_mentioned_p (scratch, outval))
7332 scratch = base_plus;
7337 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
7339 /* Be conservative and copy OUTVAL into the scratch now,
7340 this should only be necessary if outval is a subreg
7341 of something larger than a word. */
7342 /* XXX Might this clobber base? I can't see how it can,
7343 since scratch is known to overlap with OUTVAL, and
7344 must be wider than a word. */
7345 emit_insn (gen_movhi (scratch_hi, outval));
7346 outval = scratch_hi;
7350 emit_set_insn (base_plus, base);
7353 else if (GET_CODE (base) == PLUS)
7355 /* The addend must be CONST_INT, or we would have dealt with it above. */
7356 HOST_WIDE_INT hi, lo;
7358 offset += INTVAL (XEXP (base, 1));
7359 base = XEXP (base, 0);
7361 /* Rework the address into a legal sequence of insns. */
7362 /* Valid range for lo is -4095 -> 4095 */
7365 : -((-offset) & 0xfff));
7367 /* Corner case, if lo is the max offset then we would be out of range
7368 once we have added the additional 1 below, so bump the msb into the
7369 pre-loading insn(s). */
7373 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
7374 ^ (HOST_WIDE_INT) 0x80000000)
7375 - (HOST_WIDE_INT) 0x80000000);
7377 gcc_assert (hi + lo == offset);
7381 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
7383 /* Be careful not to destroy OUTVAL. */
7384 if (reg_overlap_mentioned_p (base_plus, outval))
7386 /* Updating base_plus might destroy outval, see if we
7387 can swap the scratch and base_plus. */
7388 if (!reg_overlap_mentioned_p (scratch, outval))
7391 scratch = base_plus;
7396 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
7398 /* Be conservative and copy outval into scratch now,
7399 this should only be necessary if outval is a
7400 subreg of something larger than a word. */
7401 /* XXX Might this clobber base? I can't see how it
7402 can, since scratch is known to overlap with
7404 emit_insn (gen_movhi (scratch_hi, outval));
7405 outval = scratch_hi;
7409 /* Get the base address; addsi3 knows how to handle constants
7410 that require more than one insn. */
7411 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
7417 if (BYTES_BIG_ENDIAN)
7419 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
7420 plus_constant (base, offset + 1)),
7421 gen_lowpart (QImode, outval)));
7422 emit_insn (gen_lshrsi3 (scratch,
7423 gen_rtx_SUBREG (SImode, outval, 0),
7425 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
7426 gen_lowpart (QImode, scratch)));
7430 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (base, offset)),
7431 gen_lowpart (QImode, outval)));
7432 emit_insn (gen_lshrsi3 (scratch,
7433 gen_rtx_SUBREG (SImode, outval, 0),
7435 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
7436 plus_constant (base, offset + 1)),
7437 gen_lowpart (QImode, scratch)));
7441 /* Return true if a type must be passed in memory. For AAPCS, small aggregates
7442 (padded to the size of a word) should be passed in a register. */
7445 arm_must_pass_in_stack (enum machine_mode mode, tree type)
7447 if (TARGET_AAPCS_BASED)
7448 return must_pass_in_stack_var_size (mode, type);
7450 return must_pass_in_stack_var_size_or_pad (mode, type);
7454 /* For use by FUNCTION_ARG_PADDING (MODE, TYPE).
7455 Return true if an argument passed on the stack should be padded upwards,
7456 i.e. if the least-significant byte has useful data.
7457 For legacy APCS ABIs we use the default. For AAPCS based ABIs small
7458 aggregate types are placed in the lowest memory address. */
7461 arm_pad_arg_upward (enum machine_mode mode, tree type)
7463 if (!TARGET_AAPCS_BASED)
7464 return DEFAULT_FUNCTION_ARG_PADDING(mode, type) == upward;
7466 if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
7473 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
7474 For non-AAPCS, return !BYTES_BIG_ENDIAN if the least significant
7475 byte of the register has useful data, and return the opposite if the
7476 most significant byte does.
7477 For AAPCS, small aggregates and small complex types are always padded
7481 arm_pad_reg_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
7482 tree type, int first ATTRIBUTE_UNUSED)
7484 if (TARGET_AAPCS_BASED
7486 && (AGGREGATE_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE)
7487 && int_size_in_bytes (type) <= 4)
7490 /* Otherwise, use default padding. */
7491 return !BYTES_BIG_ENDIAN;
7495 /* Print a symbolic form of X to the debug file, F. */
7497 arm_print_value (FILE *f, rtx x)
7499 switch (GET_CODE (x))
7502 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
7506 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
7514 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
7516 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
7517 if (i < (CONST_VECTOR_NUNITS (x) - 1))
7525 fprintf (f, "\"%s\"", XSTR (x, 0));
7529 fprintf (f, "`%s'", XSTR (x, 0));
7533 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
7537 arm_print_value (f, XEXP (x, 0));
7541 arm_print_value (f, XEXP (x, 0));
7543 arm_print_value (f, XEXP (x, 1));
7551 fprintf (f, "????");
7556 /* Routines for manipulation of the constant pool. */
7558 /* Arm instructions cannot load a large constant directly into a
7559 register; they have to come from a pc relative load. The constant
7560 must therefore be placed in the addressable range of the pc
7561 relative load. Depending on the precise pc relative load
7562 instruction the range is somewhere between 256 bytes and 4k. This
7563 means that we often have to dump a constant inside a function, and
7564 generate code to branch around it.
7566 It is important to minimize this, since the branches will slow
7567 things down and make the code larger.
7569 Normally we can hide the table after an existing unconditional
7570 branch so that there is no interruption of the flow, but in the
7571 worst case the code looks like this:
7589 We fix this by performing a scan after scheduling, which notices
7590 which instructions need to have their operands fetched from the
7591 constant table and builds the table.
7593 The algorithm starts by building a table of all the constants that
7594 need fixing up and all the natural barriers in the function (places
7595 where a constant table can be dropped without breaking the flow).
7596 For each fixup we note how far the pc-relative replacement will be
7597 able to reach and the offset of the instruction into the function.
7599 Having built the table we then group the fixes together to form
7600 tables that are as large as possible (subject to addressing
7601 constraints) and emit each table of constants after the last
7602 barrier that is within range of all the instructions in the group.
7603 If a group does not contain a barrier, then we forcibly create one
7604 by inserting a jump instruction into the flow. Once the table has
7605 been inserted, the insns are then modified to reference the
7606 relevant entry in the pool.
7608 Possible enhancements to the algorithm (not implemented) are:
7610 1) For some processors and object formats, there may be benefit in
7611 aligning the pools to the start of cache lines; this alignment
7612 would need to be taken into account when calculating addressability
7615 /* These typedefs are located at the start of this file, so that
7616 they can be used in the prototypes there. This comment is to
7617 remind readers of that fact so that the following structures
7618 can be understood more easily.
7620 typedef struct minipool_node Mnode;
7621 typedef struct minipool_fixup Mfix; */
7623 struct minipool_node
7625 /* Doubly linked chain of entries. */
7628 /* The maximum offset into the code that this entry can be placed. While
7629 pushing fixes for forward references, all entries are sorted in order
7630 of increasing max_address. */
7631 HOST_WIDE_INT max_address;
7632 /* Similarly for an entry inserted for a backwards ref. */
7633 HOST_WIDE_INT min_address;
7634 /* The number of fixes referencing this entry. This can become zero
7635 if we "unpush" an entry. In this case we ignore the entry when we
7636 come to emit the code. */
7638 /* The offset from the start of the minipool. */
7639 HOST_WIDE_INT offset;
7640 /* The value in table. */
7642 /* The mode of value. */
7643 enum machine_mode mode;
7644 /* The size of the value. With iWMMXt enabled
7645 sizes > 4 also imply an alignment of 8-bytes. */
7649 struct minipool_fixup
7653 HOST_WIDE_INT address;
7655 enum machine_mode mode;
7659 HOST_WIDE_INT forwards;
7660 HOST_WIDE_INT backwards;
7663 /* Fixes less than a word need padding out to a word boundary. */
7664 #define MINIPOOL_FIX_SIZE(mode) \
7665 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
7667 static Mnode * minipool_vector_head;
7668 static Mnode * minipool_vector_tail;
7669 static rtx minipool_vector_label;
7670 static int minipool_pad;
7672 /* The linked list of all minipool fixes required for this function. */
7673 Mfix * minipool_fix_head;
7674 Mfix * minipool_fix_tail;
7675 /* The fix entry for the current minipool, once it has been placed. */
7676 Mfix * minipool_barrier;
7678 /* Determines if INSN is the start of a jump table. Returns the end
7679 of the TABLE or NULL_RTX. */
7681 is_jump_table (rtx insn)
7685 if (GET_CODE (insn) == JUMP_INSN
7686 && JUMP_LABEL (insn) != NULL
7687 && ((table = next_real_insn (JUMP_LABEL (insn)))
7688 == next_real_insn (insn))
7690 && GET_CODE (table) == JUMP_INSN
7691 && (GET_CODE (PATTERN (table)) == ADDR_VEC
7692 || GET_CODE (PATTERN (table)) == ADDR_DIFF_VEC))
7698 #ifndef JUMP_TABLES_IN_TEXT_SECTION
7699 #define JUMP_TABLES_IN_TEXT_SECTION 0
7702 static HOST_WIDE_INT
7703 get_jump_table_size (rtx insn)
7705 /* ADDR_VECs only take room if read-only data does into the text
7707 if (JUMP_TABLES_IN_TEXT_SECTION || readonly_data_section == text_section)
7709 rtx body = PATTERN (insn);
7710 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
7712 HOST_WIDE_INT modesize;
7714 modesize = GET_MODE_SIZE (GET_MODE (body));
7715 size = modesize * XVECLEN (body, elt);
7719 /* Round up size of TBB table to a halfword boundary. */
7720 size = (size + 1) & ~(HOST_WIDE_INT)1;
7723 /* No padding necessary for TBH. */
7726 /* Add two bytes for alignment on Thumb. */
7739 /* Move a minipool fix MP from its current location to before MAX_MP.
7740 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
7741 constraints may need updating. */
7743 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
7744 HOST_WIDE_INT max_address)
7746 /* The code below assumes these are different. */
7747 gcc_assert (mp != max_mp);
7751 if (max_address < mp->max_address)
7752 mp->max_address = max_address;
7756 if (max_address > max_mp->max_address - mp->fix_size)
7757 mp->max_address = max_mp->max_address - mp->fix_size;
7759 mp->max_address = max_address;
7761 /* Unlink MP from its current position. Since max_mp is non-null,
7762 mp->prev must be non-null. */
7763 mp->prev->next = mp->next;
7764 if (mp->next != NULL)
7765 mp->next->prev = mp->prev;
7767 minipool_vector_tail = mp->prev;
7769 /* Re-insert it before MAX_MP. */
7771 mp->prev = max_mp->prev;
7774 if (mp->prev != NULL)
7775 mp->prev->next = mp;
7777 minipool_vector_head = mp;
7780 /* Save the new entry. */
7783 /* Scan over the preceding entries and adjust their addresses as
7785 while (mp->prev != NULL
7786 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7788 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7795 /* Add a constant to the minipool for a forward reference. Returns the
7796 node added or NULL if the constant will not fit in this pool. */
7798 add_minipool_forward_ref (Mfix *fix)
7800 /* If set, max_mp is the first pool_entry that has a lower
7801 constraint than the one we are trying to add. */
7802 Mnode * max_mp = NULL;
7803 HOST_WIDE_INT max_address = fix->address + fix->forwards - minipool_pad;
7806 /* If the minipool starts before the end of FIX->INSN then this FIX
7807 can not be placed into the current pool. Furthermore, adding the
7808 new constant pool entry may cause the pool to start FIX_SIZE bytes
7810 if (minipool_vector_head &&
7811 (fix->address + get_attr_length (fix->insn)
7812 >= minipool_vector_head->max_address - fix->fix_size))
7815 /* Scan the pool to see if a constant with the same value has
7816 already been added. While we are doing this, also note the
7817 location where we must insert the constant if it doesn't already
7819 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7821 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7822 && fix->mode == mp->mode
7823 && (GET_CODE (fix->value) != CODE_LABEL
7824 || (CODE_LABEL_NUMBER (fix->value)
7825 == CODE_LABEL_NUMBER (mp->value)))
7826 && rtx_equal_p (fix->value, mp->value))
7828 /* More than one fix references this entry. */
7830 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
7833 /* Note the insertion point if necessary. */
7835 && mp->max_address > max_address)
7838 /* If we are inserting an 8-bytes aligned quantity and
7839 we have not already found an insertion point, then
7840 make sure that all such 8-byte aligned quantities are
7841 placed at the start of the pool. */
7842 if (ARM_DOUBLEWORD_ALIGN
7844 && fix->fix_size == 8
7845 && mp->fix_size != 8)
7848 max_address = mp->max_address;
7852 /* The value is not currently in the minipool, so we need to create
7853 a new entry for it. If MAX_MP is NULL, the entry will be put on
7854 the end of the list since the placement is less constrained than
7855 any existing entry. Otherwise, we insert the new fix before
7856 MAX_MP and, if necessary, adjust the constraints on the other
7859 mp->fix_size = fix->fix_size;
7860 mp->mode = fix->mode;
7861 mp->value = fix->value;
7863 /* Not yet required for a backwards ref. */
7864 mp->min_address = -65536;
7868 mp->max_address = max_address;
7870 mp->prev = minipool_vector_tail;
7872 if (mp->prev == NULL)
7874 minipool_vector_head = mp;
7875 minipool_vector_label = gen_label_rtx ();
7878 mp->prev->next = mp;
7880 minipool_vector_tail = mp;
7884 if (max_address > max_mp->max_address - mp->fix_size)
7885 mp->max_address = max_mp->max_address - mp->fix_size;
7887 mp->max_address = max_address;
7890 mp->prev = max_mp->prev;
7892 if (mp->prev != NULL)
7893 mp->prev->next = mp;
7895 minipool_vector_head = mp;
7898 /* Save the new entry. */
7901 /* Scan over the preceding entries and adjust their addresses as
7903 while (mp->prev != NULL
7904 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
7906 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
7914 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
7915 HOST_WIDE_INT min_address)
7917 HOST_WIDE_INT offset;
7919 /* The code below assumes these are different. */
7920 gcc_assert (mp != min_mp);
7924 if (min_address > mp->min_address)
7925 mp->min_address = min_address;
7929 /* We will adjust this below if it is too loose. */
7930 mp->min_address = min_address;
7932 /* Unlink MP from its current position. Since min_mp is non-null,
7933 mp->next must be non-null. */
7934 mp->next->prev = mp->prev;
7935 if (mp->prev != NULL)
7936 mp->prev->next = mp->next;
7938 minipool_vector_head = mp->next;
7940 /* Reinsert it after MIN_MP. */
7942 mp->next = min_mp->next;
7944 if (mp->next != NULL)
7945 mp->next->prev = mp;
7947 minipool_vector_tail = mp;
7953 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
7955 mp->offset = offset;
7956 if (mp->refcount > 0)
7957 offset += mp->fix_size;
7959 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
7960 mp->next->min_address = mp->min_address + mp->fix_size;
7966 /* Add a constant to the minipool for a backward reference. Returns the
7967 node added or NULL if the constant will not fit in this pool.
7969 Note that the code for insertion for a backwards reference can be
7970 somewhat confusing because the calculated offsets for each fix do
7971 not take into account the size of the pool (which is still under
7974 add_minipool_backward_ref (Mfix *fix)
7976 /* If set, min_mp is the last pool_entry that has a lower constraint
7977 than the one we are trying to add. */
7978 Mnode *min_mp = NULL;
7979 /* This can be negative, since it is only a constraint. */
7980 HOST_WIDE_INT min_address = fix->address - fix->backwards;
7983 /* If we can't reach the current pool from this insn, or if we can't
7984 insert this entry at the end of the pool without pushing other
7985 fixes out of range, then we don't try. This ensures that we
7986 can't fail later on. */
7987 if (min_address >= minipool_barrier->address
7988 || (minipool_vector_tail->min_address + fix->fix_size
7989 >= minipool_barrier->address))
7992 /* Scan the pool to see if a constant with the same value has
7993 already been added. While we are doing this, also note the
7994 location where we must insert the constant if it doesn't already
7996 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
7998 if (GET_CODE (fix->value) == GET_CODE (mp->value)
7999 && fix->mode == mp->mode
8000 && (GET_CODE (fix->value) != CODE_LABEL
8001 || (CODE_LABEL_NUMBER (fix->value)
8002 == CODE_LABEL_NUMBER (mp->value)))
8003 && rtx_equal_p (fix->value, mp->value)
8004 /* Check that there is enough slack to move this entry to the
8005 end of the table (this is conservative). */
8007 > (minipool_barrier->address
8008 + minipool_vector_tail->offset
8009 + minipool_vector_tail->fix_size)))
8012 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
8016 mp->min_address += fix->fix_size;
8019 /* Note the insertion point if necessary. */
8020 if (mp->min_address < min_address)
8022 /* For now, we do not allow the insertion of 8-byte alignment
8023 requiring nodes anywhere but at the start of the pool. */
8024 if (ARM_DOUBLEWORD_ALIGN
8025 && fix->fix_size == 8 && mp->fix_size != 8)
8030 else if (mp->max_address
8031 < minipool_barrier->address + mp->offset + fix->fix_size)
8033 /* Inserting before this entry would push the fix beyond
8034 its maximum address (which can happen if we have
8035 re-located a forwards fix); force the new fix to come
8038 min_address = mp->min_address + fix->fix_size;
8040 /* If we are inserting an 8-bytes aligned quantity and
8041 we have not already found an insertion point, then
8042 make sure that all such 8-byte aligned quantities are
8043 placed at the start of the pool. */
8044 else if (ARM_DOUBLEWORD_ALIGN
8046 && fix->fix_size == 8
8047 && mp->fix_size < 8)
8050 min_address = mp->min_address + fix->fix_size;
8055 /* We need to create a new entry. */
8057 mp->fix_size = fix->fix_size;
8058 mp->mode = fix->mode;
8059 mp->value = fix->value;
8061 mp->max_address = minipool_barrier->address + 65536;
8063 mp->min_address = min_address;
8068 mp->next = minipool_vector_head;
8070 if (mp->next == NULL)
8072 minipool_vector_tail = mp;
8073 minipool_vector_label = gen_label_rtx ();
8076 mp->next->prev = mp;
8078 minipool_vector_head = mp;
8082 mp->next = min_mp->next;
8086 if (mp->next != NULL)
8087 mp->next->prev = mp;
8089 minipool_vector_tail = mp;
8092 /* Save the new entry. */
8100 /* Scan over the following entries and adjust their offsets. */
8101 while (mp->next != NULL)
8103 if (mp->next->min_address < mp->min_address + mp->fix_size)
8104 mp->next->min_address = mp->min_address + mp->fix_size;
8107 mp->next->offset = mp->offset + mp->fix_size;
8109 mp->next->offset = mp->offset;
8118 assign_minipool_offsets (Mfix *barrier)
8120 HOST_WIDE_INT offset = 0;
8123 minipool_barrier = barrier;
8125 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
8127 mp->offset = offset;
8129 if (mp->refcount > 0)
8130 offset += mp->fix_size;
8134 /* Output the literal table */
8136 dump_minipool (rtx scan)
8142 if (ARM_DOUBLEWORD_ALIGN)
8143 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
8144 if (mp->refcount > 0 && mp->fix_size == 8)
8152 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
8153 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
8155 scan = emit_label_after (gen_label_rtx (), scan);
8156 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
8157 scan = emit_label_after (minipool_vector_label, scan);
8159 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
8161 if (mp->refcount > 0)
8166 ";; Offset %u, min %ld, max %ld ",
8167 (unsigned) mp->offset, (unsigned long) mp->min_address,
8168 (unsigned long) mp->max_address);
8169 arm_print_value (dump_file, mp->value);
8170 fputc ('\n', dump_file);
8173 switch (mp->fix_size)
8175 #ifdef HAVE_consttable_1
8177 scan = emit_insn_after (gen_consttable_1 (mp->value), scan);
8181 #ifdef HAVE_consttable_2
8183 scan = emit_insn_after (gen_consttable_2 (mp->value), scan);
8187 #ifdef HAVE_consttable_4
8189 scan = emit_insn_after (gen_consttable_4 (mp->value), scan);
8193 #ifdef HAVE_consttable_8
8195 scan = emit_insn_after (gen_consttable_8 (mp->value), scan);
8208 minipool_vector_head = minipool_vector_tail = NULL;
8209 scan = emit_insn_after (gen_consttable_end (), scan);
8210 scan = emit_barrier_after (scan);
8213 /* Return the cost of forcibly inserting a barrier after INSN. */
8215 arm_barrier_cost (rtx insn)
8217 /* Basing the location of the pool on the loop depth is preferable,
8218 but at the moment, the basic block information seems to be
8219 corrupt by this stage of the compilation. */
8221 rtx next = next_nonnote_insn (insn);
8223 if (next != NULL && GET_CODE (next) == CODE_LABEL)
8226 switch (GET_CODE (insn))
8229 /* It will always be better to place the table before the label, rather
8238 return base_cost - 10;
8241 return base_cost + 10;
8245 /* Find the best place in the insn stream in the range
8246 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
8247 Create the barrier by inserting a jump and add a new fix entry for
8250 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
8252 HOST_WIDE_INT count = 0;
8254 rtx from = fix->insn;
8255 /* The instruction after which we will insert the jump. */
8256 rtx selected = NULL;
8258 /* The address at which the jump instruction will be placed. */
8259 HOST_WIDE_INT selected_address;
8261 HOST_WIDE_INT max_count = max_address - fix->address;
8262 rtx label = gen_label_rtx ();
8264 selected_cost = arm_barrier_cost (from);
8265 selected_address = fix->address;
8267 while (from && count < max_count)
8272 /* This code shouldn't have been called if there was a natural barrier
8274 gcc_assert (GET_CODE (from) != BARRIER);
8276 /* Count the length of this insn. */
8277 count += get_attr_length (from);
8279 /* If there is a jump table, add its length. */
8280 tmp = is_jump_table (from);
8283 count += get_jump_table_size (tmp);
8285 /* Jump tables aren't in a basic block, so base the cost on
8286 the dispatch insn. If we select this location, we will
8287 still put the pool after the table. */
8288 new_cost = arm_barrier_cost (from);
8290 if (count < max_count
8291 && (!selected || new_cost <= selected_cost))
8294 selected_cost = new_cost;
8295 selected_address = fix->address + count;
8298 /* Continue after the dispatch table. */
8299 from = NEXT_INSN (tmp);
8303 new_cost = arm_barrier_cost (from);
8305 if (count < max_count
8306 && (!selected || new_cost <= selected_cost))
8309 selected_cost = new_cost;
8310 selected_address = fix->address + count;
8313 from = NEXT_INSN (from);
8316 /* Make sure that we found a place to insert the jump. */
8317 gcc_assert (selected);
8319 /* Create a new JUMP_INSN that branches around a barrier. */
8320 from = emit_jump_insn_after (gen_jump (label), selected);
8321 JUMP_LABEL (from) = label;
8322 barrier = emit_barrier_after (from);
8323 emit_label_after (label, barrier);
8325 /* Create a minipool barrier entry for the new barrier. */
8326 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
8327 new_fix->insn = barrier;
8328 new_fix->address = selected_address;
8329 new_fix->next = fix->next;
8330 fix->next = new_fix;
8335 /* Record that there is a natural barrier in the insn stream at
8338 push_minipool_barrier (rtx insn, HOST_WIDE_INT address)
8340 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
8343 fix->address = address;
8346 if (minipool_fix_head != NULL)
8347 minipool_fix_tail->next = fix;
8349 minipool_fix_head = fix;
8351 minipool_fix_tail = fix;
8354 /* Record INSN, which will need fixing up to load a value from the
8355 minipool. ADDRESS is the offset of the insn since the start of the
8356 function; LOC is a pointer to the part of the insn which requires
8357 fixing; VALUE is the constant that must be loaded, which is of type
8360 push_minipool_fix (rtx insn, HOST_WIDE_INT address, rtx *loc,
8361 enum machine_mode mode, rtx value)
8363 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
8365 #ifdef AOF_ASSEMBLER
8366 /* PIC symbol references need to be converted into offsets into the
8368 /* XXX This shouldn't be done here. */
8369 if (flag_pic && GET_CODE (value) == SYMBOL_REF)
8370 value = aof_pic_entry (value);
8371 #endif /* AOF_ASSEMBLER */
8374 fix->address = address;
8377 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
8379 fix->forwards = get_attr_pool_range (insn);
8380 fix->backwards = get_attr_neg_pool_range (insn);
8381 fix->minipool = NULL;
8383 /* If an insn doesn't have a range defined for it, then it isn't
8384 expecting to be reworked by this code. Better to stop now than
8385 to generate duff assembly code. */
8386 gcc_assert (fix->forwards || fix->backwards);
8388 /* If an entry requires 8-byte alignment then assume all constant pools
8389 require 4 bytes of padding. Trying to do this later on a per-pool
8390 basis is awkward because existing pool entries have to be modified. */
8391 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size == 8)
8397 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
8398 GET_MODE_NAME (mode),
8399 INSN_UID (insn), (unsigned long) address,
8400 -1 * (long)fix->backwards, (long)fix->forwards);
8401 arm_print_value (dump_file, fix->value);
8402 fprintf (dump_file, "\n");
8405 /* Add it to the chain of fixes. */
8408 if (minipool_fix_head != NULL)
8409 minipool_fix_tail->next = fix;
8411 minipool_fix_head = fix;
8413 minipool_fix_tail = fix;
8416 /* Return the cost of synthesizing a 64-bit constant VAL inline.
8417 Returns the number of insns needed, or 99 if we don't know how to
8420 arm_const_double_inline_cost (rtx val)
8422 rtx lowpart, highpart;
8423 enum machine_mode mode;
8425 mode = GET_MODE (val);
8427 if (mode == VOIDmode)
8430 gcc_assert (GET_MODE_SIZE (mode) == 8);
8432 lowpart = gen_lowpart (SImode, val);
8433 highpart = gen_highpart_mode (SImode, mode, val);
8435 gcc_assert (GET_CODE (lowpart) == CONST_INT);
8436 gcc_assert (GET_CODE (highpart) == CONST_INT);
8438 return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
8439 NULL_RTX, NULL_RTX, 0, 0)
8440 + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
8441 NULL_RTX, NULL_RTX, 0, 0));
8444 /* Return true if it is worthwhile to split a 64-bit constant into two
8445 32-bit operations. This is the case if optimizing for size, or
8446 if we have load delay slots, or if one 32-bit part can be done with
8447 a single data operation. */
8449 arm_const_double_by_parts (rtx val)
8451 enum machine_mode mode = GET_MODE (val);
8454 if (optimize_size || arm_ld_sched)
8457 if (mode == VOIDmode)
8460 part = gen_highpart_mode (SImode, mode, val);
8462 gcc_assert (GET_CODE (part) == CONST_INT);
8464 if (const_ok_for_arm (INTVAL (part))
8465 || const_ok_for_arm (~INTVAL (part)))
8468 part = gen_lowpart (SImode, val);
8470 gcc_assert (GET_CODE (part) == CONST_INT);
8472 if (const_ok_for_arm (INTVAL (part))
8473 || const_ok_for_arm (~INTVAL (part)))
8479 /* Scan INSN and note any of its operands that need fixing.
8480 If DO_PUSHES is false we do not actually push any of the fixups
8481 needed. The function returns TRUE if any fixups were needed/pushed.
8482 This is used by arm_memory_load_p() which needs to know about loads
8483 of constants that will be converted into minipool loads. */
8485 note_invalid_constants (rtx insn, HOST_WIDE_INT address, int do_pushes)
8487 bool result = false;
8490 extract_insn (insn);
8492 if (!constrain_operands (1))
8493 fatal_insn_not_found (insn);
8495 if (recog_data.n_alternatives == 0)
8498 /* Fill in recog_op_alt with information about the constraints of
8500 preprocess_constraints ();
8502 for (opno = 0; opno < recog_data.n_operands; opno++)
8504 /* Things we need to fix can only occur in inputs. */
8505 if (recog_data.operand_type[opno] != OP_IN)
8508 /* If this alternative is a memory reference, then any mention
8509 of constants in this alternative is really to fool reload
8510 into allowing us to accept one there. We need to fix them up
8511 now so that we output the right code. */
8512 if (recog_op_alt[opno][which_alternative].memory_ok)
8514 rtx op = recog_data.operand[opno];
8516 if (CONSTANT_P (op))
8519 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
8520 recog_data.operand_mode[opno], op);
8523 else if (GET_CODE (op) == MEM
8524 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
8525 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
8529 rtx cop = avoid_constant_pool_reference (op);
8531 /* Casting the address of something to a mode narrower
8532 than a word can cause avoid_constant_pool_reference()
8533 to return the pool reference itself. That's no good to
8534 us here. Lets just hope that we can use the
8535 constant pool value directly. */
8537 cop = get_pool_constant (XEXP (op, 0));
8539 push_minipool_fix (insn, address,
8540 recog_data.operand_loc[opno],
8541 recog_data.operand_mode[opno], cop);
8552 /* Gcc puts the pool in the wrong place for ARM, since we can only
8553 load addresses a limited distance around the pc. We do some
8554 special munging to move the constant pool values to the correct
8555 point in the code. */
8560 HOST_WIDE_INT address = 0;
8563 minipool_fix_head = minipool_fix_tail = NULL;
8565 /* The first insn must always be a note, or the code below won't
8566 scan it properly. */
8567 insn = get_insns ();
8568 gcc_assert (GET_CODE (insn) == NOTE);
8571 /* Scan all the insns and record the operands that will need fixing. */
8572 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
8574 if (TARGET_CIRRUS_FIX_INVALID_INSNS
8575 && (arm_cirrus_insn_p (insn)
8576 || GET_CODE (insn) == JUMP_INSN
8577 || arm_memory_load_p (insn)))
8578 cirrus_reorg (insn);
8580 if (GET_CODE (insn) == BARRIER)
8581 push_minipool_barrier (insn, address);
8582 else if (INSN_P (insn))
8586 note_invalid_constants (insn, address, true);
8587 address += get_attr_length (insn);
8589 /* If the insn is a vector jump, add the size of the table
8590 and skip the table. */
8591 if ((table = is_jump_table (insn)) != NULL)
8593 address += get_jump_table_size (table);
8599 fix = minipool_fix_head;
8601 /* Now scan the fixups and perform the required changes. */
8606 Mfix * last_added_fix;
8607 Mfix * last_barrier = NULL;
8610 /* Skip any further barriers before the next fix. */
8611 while (fix && GET_CODE (fix->insn) == BARRIER)
8614 /* No more fixes. */
8618 last_added_fix = NULL;
8620 for (ftmp = fix; ftmp; ftmp = ftmp->next)
8622 if (GET_CODE (ftmp->insn) == BARRIER)
8624 if (ftmp->address >= minipool_vector_head->max_address)
8627 last_barrier = ftmp;
8629 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
8632 last_added_fix = ftmp; /* Keep track of the last fix added. */
8635 /* If we found a barrier, drop back to that; any fixes that we
8636 could have reached but come after the barrier will now go in
8637 the next mini-pool. */
8638 if (last_barrier != NULL)
8640 /* Reduce the refcount for those fixes that won't go into this
8642 for (fdel = last_barrier->next;
8643 fdel && fdel != ftmp;
8646 fdel->minipool->refcount--;
8647 fdel->minipool = NULL;
8650 ftmp = last_barrier;
8654 /* ftmp is first fix that we can't fit into this pool and
8655 there no natural barriers that we could use. Insert a
8656 new barrier in the code somewhere between the previous
8657 fix and this one, and arrange to jump around it. */
8658 HOST_WIDE_INT max_address;
8660 /* The last item on the list of fixes must be a barrier, so
8661 we can never run off the end of the list of fixes without
8662 last_barrier being set. */
8665 max_address = minipool_vector_head->max_address;
8666 /* Check that there isn't another fix that is in range that
8667 we couldn't fit into this pool because the pool was
8668 already too large: we need to put the pool before such an
8669 instruction. The pool itself may come just after the
8670 fix because create_fix_barrier also allows space for a
8671 jump instruction. */
8672 if (ftmp->address < max_address)
8673 max_address = ftmp->address + 1;
8675 last_barrier = create_fix_barrier (last_added_fix, max_address);
8678 assign_minipool_offsets (last_barrier);
8682 if (GET_CODE (ftmp->insn) != BARRIER
8683 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
8690 /* Scan over the fixes we have identified for this pool, fixing them
8691 up and adding the constants to the pool itself. */
8692 for (this_fix = fix; this_fix && ftmp != this_fix;
8693 this_fix = this_fix->next)
8694 if (GET_CODE (this_fix->insn) != BARRIER)
8697 = plus_constant (gen_rtx_LABEL_REF (VOIDmode,
8698 minipool_vector_label),
8699 this_fix->minipool->offset);
8700 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
8703 dump_minipool (last_barrier->insn);
8707 /* From now on we must synthesize any constants that we can't handle
8708 directly. This can happen if the RTL gets split during final
8709 instruction generation. */
8710 after_arm_reorg = 1;
8712 /* Free the minipool memory. */
8713 obstack_free (&minipool_obstack, minipool_startobj);
8716 /* Routines to output assembly language. */
8718 /* If the rtx is the correct value then return the string of the number.
8719 In this way we can ensure that valid double constants are generated even
8720 when cross compiling. */
8722 fp_immediate_constant (rtx x)
8727 if (!fp_consts_inited)
8730 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
8731 for (i = 0; i < 8; i++)
8732 if (REAL_VALUES_EQUAL (r, values_fp[i]))
8733 return strings_fp[i];
8738 /* As for fp_immediate_constant, but value is passed directly, not in rtx. */
8740 fp_const_from_val (REAL_VALUE_TYPE *r)
8744 if (!fp_consts_inited)
8747 for (i = 0; i < 8; i++)
8748 if (REAL_VALUES_EQUAL (*r, values_fp[i]))
8749 return strings_fp[i];
8754 /* Output the operands of a LDM/STM instruction to STREAM.
8755 MASK is the ARM register set mask of which only bits 0-15 are important.
8756 REG is the base register, either the frame pointer or the stack pointer,
8757 INSTR is the possibly suffixed load or store instruction.
8758 RFE is nonzero if the instruction should also copy spsr to cpsr. */
8761 print_multi_reg (FILE *stream, const char *instr, unsigned reg,
8762 unsigned long mask, int rfe)
8765 bool not_first = FALSE;
8767 gcc_assert (!rfe || (mask & (1 << PC_REGNUM)));
8768 fputc ('\t', stream);
8769 asm_fprintf (stream, instr, reg);
8770 fputc ('{', stream);
8772 for (i = 0; i <= LAST_ARM_REGNUM; i++)
8773 if (mask & (1 << i))
8776 fprintf (stream, ", ");
8778 asm_fprintf (stream, "%r", i);
8783 fprintf (stream, "}^\n");
8785 fprintf (stream, "}\n");
8789 /* Output a FLDMD instruction to STREAM.
8790 BASE if the register containing the address.
8791 REG and COUNT specify the register range.
8792 Extra registers may be added to avoid hardware bugs.
8794 We output FLDMD even for ARMv5 VFP implementations. Although
8795 FLDMD is technically not supported until ARMv6, it is believed
8796 that all VFP implementations support its use in this context. */
8799 vfp_output_fldmd (FILE * stream, unsigned int base, int reg, int count)
8803 /* Workaround ARM10 VFPr1 bug. */
8804 if (count == 2 && !arm_arch6)
8811 fputc ('\t', stream);
8812 asm_fprintf (stream, "fldmfdd\t%r!, {", base);
8814 for (i = reg; i < reg + count; i++)
8817 fputs (", ", stream);
8818 asm_fprintf (stream, "d%d", i);
8820 fputs ("}\n", stream);
8825 /* Output the assembly for a store multiple. */
8828 vfp_output_fstmd (rtx * operands)
8835 strcpy (pattern, "fstmfdd\t%m0!, {%P1");
8836 p = strlen (pattern);
8838 gcc_assert (GET_CODE (operands[1]) == REG);
8840 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
8841 for (i = 1; i < XVECLEN (operands[2], 0); i++)
8843 p += sprintf (&pattern[p], ", d%d", base + i);
8845 strcpy (&pattern[p], "}");
8847 output_asm_insn (pattern, operands);
8852 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
8853 number of bytes pushed. */
8856 vfp_emit_fstmd (int base_reg, int count)
8863 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
8864 register pairs are stored by a store multiple insn. We avoid this
8865 by pushing an extra pair. */
8866 if (count == 2 && !arm_arch6)
8868 if (base_reg == LAST_VFP_REGNUM - 3)
8873 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
8874 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
8876 reg = gen_rtx_REG (DFmode, base_reg);
8880 = gen_rtx_SET (VOIDmode,
8881 gen_frame_mem (BLKmode,
8882 gen_rtx_PRE_DEC (BLKmode,
8883 stack_pointer_rtx)),
8884 gen_rtx_UNSPEC (BLKmode,
8888 tmp = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8889 plus_constant (stack_pointer_rtx, -(count * 8)));
8890 RTX_FRAME_RELATED_P (tmp) = 1;
8891 XVECEXP (dwarf, 0, 0) = tmp;
8893 tmp = gen_rtx_SET (VOIDmode,
8894 gen_frame_mem (DFmode, stack_pointer_rtx),
8896 RTX_FRAME_RELATED_P (tmp) = 1;
8897 XVECEXP (dwarf, 0, 1) = tmp;
8899 for (i = 1; i < count; i++)
8901 reg = gen_rtx_REG (DFmode, base_reg);
8903 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
8905 tmp = gen_rtx_SET (VOIDmode,
8906 gen_frame_mem (DFmode,
8907 plus_constant (stack_pointer_rtx,
8910 RTX_FRAME_RELATED_P (tmp) = 1;
8911 XVECEXP (dwarf, 0, i + 1) = tmp;
8914 par = emit_insn (par);
8915 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
8917 RTX_FRAME_RELATED_P (par) = 1;
8923 /* Output a 'call' insn. */
8925 output_call (rtx *operands)
8927 gcc_assert (!arm_arch5); /* Patterns should call blx <reg> directly. */
8929 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
8930 if (REGNO (operands[0]) == LR_REGNUM)
8932 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
8933 output_asm_insn ("mov%?\t%0, %|lr", operands);
8936 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8938 if (TARGET_INTERWORK || arm_arch4t)
8939 output_asm_insn ("bx%?\t%0", operands);
8941 output_asm_insn ("mov%?\t%|pc, %0", operands);
8946 /* Output a 'call' insn that is a reference in memory. */
8948 output_call_mem (rtx *operands)
8950 if (TARGET_INTERWORK && !arm_arch5)
8952 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8953 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8954 output_asm_insn ("bx%?\t%|ip", operands);
8956 else if (regno_use_in (LR_REGNUM, operands[0]))
8958 /* LR is used in the memory address. We load the address in the
8959 first instruction. It's safe to use IP as the target of the
8960 load since the call will kill it anyway. */
8961 output_asm_insn ("ldr%?\t%|ip, %0", operands);
8963 output_asm_insn ("blx%?\t%|ip", operands);
8966 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8968 output_asm_insn ("bx%?\t%|ip", operands);
8970 output_asm_insn ("mov%?\t%|pc, %|ip", operands);
8975 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
8976 output_asm_insn ("ldr%?\t%|pc, %0", operands);
8983 /* Output a move from arm registers to an fpa registers.
8984 OPERANDS[0] is an fpa register.
8985 OPERANDS[1] is the first registers of an arm register pair. */
8987 output_mov_long_double_fpa_from_arm (rtx *operands)
8989 int arm_reg0 = REGNO (operands[1]);
8992 gcc_assert (arm_reg0 != IP_REGNUM);
8994 ops[0] = gen_rtx_REG (SImode, arm_reg0);
8995 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
8996 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
8998 output_asm_insn ("stm%(fd%)\t%|sp!, {%0, %1, %2}", ops);
8999 output_asm_insn ("ldf%?e\t%0, [%|sp], #12", operands);
9004 /* Output a move from an fpa register to arm registers.
9005 OPERANDS[0] is the first registers of an arm register pair.
9006 OPERANDS[1] is an fpa register. */
9008 output_mov_long_double_arm_from_fpa (rtx *operands)
9010 int arm_reg0 = REGNO (operands[0]);
9013 gcc_assert (arm_reg0 != IP_REGNUM);
9015 ops[0] = gen_rtx_REG (SImode, arm_reg0);
9016 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
9017 ops[2] = gen_rtx_REG (SImode, 2 + arm_reg0);
9019 output_asm_insn ("stf%?e\t%1, [%|sp, #-12]!", operands);
9020 output_asm_insn ("ldm%(fd%)\t%|sp!, {%0, %1, %2}", ops);
9024 /* Output a move from arm registers to arm registers of a long double
9025 OPERANDS[0] is the destination.
9026 OPERANDS[1] is the source. */
9028 output_mov_long_double_arm_from_arm (rtx *operands)
9030 /* We have to be careful here because the two might overlap. */
9031 int dest_start = REGNO (operands[0]);
9032 int src_start = REGNO (operands[1]);
9036 if (dest_start < src_start)
9038 for (i = 0; i < 3; i++)
9040 ops[0] = gen_rtx_REG (SImode, dest_start + i);
9041 ops[1] = gen_rtx_REG (SImode, src_start + i);
9042 output_asm_insn ("mov%?\t%0, %1", ops);
9047 for (i = 2; i >= 0; i--)
9049 ops[0] = gen_rtx_REG (SImode, dest_start + i);
9050 ops[1] = gen_rtx_REG (SImode, src_start + i);
9051 output_asm_insn ("mov%?\t%0, %1", ops);
9059 /* Output a move from arm registers to an fpa registers.
9060 OPERANDS[0] is an fpa register.
9061 OPERANDS[1] is the first registers of an arm register pair. */
9063 output_mov_double_fpa_from_arm (rtx *operands)
9065 int arm_reg0 = REGNO (operands[1]);
9068 gcc_assert (arm_reg0 != IP_REGNUM);
9070 ops[0] = gen_rtx_REG (SImode, arm_reg0);
9071 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
9072 output_asm_insn ("stm%(fd%)\t%|sp!, {%0, %1}", ops);
9073 output_asm_insn ("ldf%?d\t%0, [%|sp], #8", operands);
9077 /* Output a move from an fpa register to arm registers.
9078 OPERANDS[0] is the first registers of an arm register pair.
9079 OPERANDS[1] is an fpa register. */
9081 output_mov_double_arm_from_fpa (rtx *operands)
9083 int arm_reg0 = REGNO (operands[0]);
9086 gcc_assert (arm_reg0 != IP_REGNUM);
9088 ops[0] = gen_rtx_REG (SImode, arm_reg0);
9089 ops[1] = gen_rtx_REG (SImode, 1 + arm_reg0);
9090 output_asm_insn ("stf%?d\t%1, [%|sp, #-8]!", operands);
9091 output_asm_insn ("ldm%(fd%)\t%|sp!, {%0, %1}", ops);
9095 /* Output a move between double words.
9096 It must be REG<-REG, REG<-CONST_DOUBLE, REG<-CONST_INT, REG<-MEM
9097 or MEM<-REG and all MEMs must be offsettable addresses. */
9099 output_move_double (rtx *operands)
9101 enum rtx_code code0 = GET_CODE (operands[0]);
9102 enum rtx_code code1 = GET_CODE (operands[1]);
9107 int reg0 = REGNO (operands[0]);
9109 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
9111 gcc_assert (code1 == MEM); /* Constraints should ensure this. */
9113 switch (GET_CODE (XEXP (operands[1], 0)))
9116 output_asm_insn ("ldm%(ia%)\t%m1, %M0", operands);
9120 gcc_assert (TARGET_LDRD);
9121 output_asm_insn ("ldr%(d%)\t%0, [%m1, #8]!", operands);
9126 output_asm_insn ("ldr%(d%)\t%0, [%m1, #-8]!", operands);
9128 output_asm_insn ("ldm%(db%)\t%m1!, %M0", operands);
9132 output_asm_insn ("ldm%(ia%)\t%m1!, %M0", operands);
9136 gcc_assert (TARGET_LDRD);
9137 output_asm_insn ("ldr%(d%)\t%0, [%m1], #-8", operands);
9142 otherops[0] = operands[0];
9143 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
9144 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
9146 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
9148 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
9150 /* Registers overlap so split out the increment. */
9151 output_asm_insn ("add%?\t%1, %1, %2", otherops);
9152 output_asm_insn ("ldr%(d%)\t%0, [%1] @split", otherops);
9156 /* IWMMXT allows offsets larger than ldrd can handle,
9157 fix these up with a pair of ldr. */
9158 if (GET_CODE (otherops[2]) == CONST_INT
9159 && (INTVAL(otherops[2]) <= -256
9160 || INTVAL(otherops[2]) >= 256))
9162 output_asm_insn ("ldr%?\t%0, [%1, %2]!", otherops);
9163 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
9164 output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
9167 output_asm_insn ("ldr%(d%)\t%0, [%1, %2]!", otherops);
9172 /* IWMMXT allows offsets larger than ldrd can handle,
9173 fix these up with a pair of ldr. */
9174 if (GET_CODE (otherops[2]) == CONST_INT
9175 && (INTVAL(otherops[2]) <= -256
9176 || INTVAL(otherops[2]) >= 256))
9178 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
9179 output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
9180 otherops[0] = operands[0];
9181 output_asm_insn ("ldr%?\t%0, [%1], %2", otherops);
9184 /* We only allow constant increments, so this is safe. */
9185 output_asm_insn ("ldr%(d%)\t%0, [%1], %2", otherops);
9191 output_asm_insn ("adr%?\t%0, %1", operands);
9192 output_asm_insn ("ldm%(ia%)\t%0, %M0", operands);
9195 /* ??? This needs checking for thumb2. */
9197 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
9198 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
9200 otherops[0] = operands[0];
9201 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
9202 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
9204 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
9206 if (GET_CODE (otherops[2]) == CONST_INT)
9208 switch ((int) INTVAL (otherops[2]))
9211 output_asm_insn ("ldm%(db%)\t%1, %M0", otherops);
9216 output_asm_insn ("ldm%(da%)\t%1, %M0", otherops);
9221 output_asm_insn ("ldm%(ib%)\t%1, %M0", otherops);
9226 && (GET_CODE (otherops[2]) == REG
9227 || (GET_CODE (otherops[2]) == CONST_INT
9228 && INTVAL (otherops[2]) > -256
9229 && INTVAL (otherops[2]) < 256)))
9231 if (reg_overlap_mentioned_p (otherops[0],
9234 /* Swap base and index registers over to
9235 avoid a conflict. */
9236 otherops[1] = XEXP (XEXP (operands[1], 0), 1);
9237 otherops[2] = XEXP (XEXP (operands[1], 0), 0);
9239 /* If both registers conflict, it will usually
9240 have been fixed by a splitter. */
9241 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
9243 output_asm_insn ("add%?\t%1, %1, %2", otherops);
9244 output_asm_insn ("ldr%(d%)\t%0, [%1]",
9248 output_asm_insn ("ldr%(d%)\t%0, [%1, %2]", otherops);
9252 if (GET_CODE (otherops[2]) == CONST_INT)
9254 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
9255 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
9257 output_asm_insn ("add%?\t%0, %1, %2", otherops);
9260 output_asm_insn ("add%?\t%0, %1, %2", otherops);
9263 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
9265 return "ldm%(ia%)\t%0, %M0";
9269 otherops[1] = adjust_address (operands[1], SImode, 4);
9270 /* Take care of overlapping base/data reg. */
9271 if (reg_mentioned_p (operands[0], operands[1]))
9273 output_asm_insn ("ldr%?\t%0, %1", otherops);
9274 output_asm_insn ("ldr%?\t%0, %1", operands);
9278 output_asm_insn ("ldr%?\t%0, %1", operands);
9279 output_asm_insn ("ldr%?\t%0, %1", otherops);
9286 /* Constraints should ensure this. */
9287 gcc_assert (code0 == MEM && code1 == REG);
9288 gcc_assert (REGNO (operands[1]) != IP_REGNUM);
9290 switch (GET_CODE (XEXP (operands[0], 0)))
9293 output_asm_insn ("stm%(ia%)\t%m0, %M1", operands);
9297 gcc_assert (TARGET_LDRD);
9298 output_asm_insn ("str%(d%)\t%1, [%m0, #8]!", operands);
9303 output_asm_insn ("str%(d%)\t%1, [%m0, #-8]!", operands);
9305 output_asm_insn ("stm%(db%)\t%m0!, %M1", operands);
9309 output_asm_insn ("stm%(ia%)\t%m0!, %M1", operands);
9313 gcc_assert (TARGET_LDRD);
9314 output_asm_insn ("str%(d%)\t%1, [%m0], #-8", operands);
9319 otherops[0] = operands[1];
9320 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
9321 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
9323 /* IWMMXT allows offsets larger than ldrd can handle,
9324 fix these up with a pair of ldr. */
9325 if (GET_CODE (otherops[2]) == CONST_INT
9326 && (INTVAL(otherops[2]) <= -256
9327 || INTVAL(otherops[2]) >= 256))
9330 reg1 = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
9331 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
9333 output_asm_insn ("ldr%?\t%0, [%1, %2]!", otherops);
9335 output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
9340 output_asm_insn ("ldr%?\t%0, [%1, #4]", otherops);
9341 otherops[0] = operands[1];
9342 output_asm_insn ("ldr%?\t%0, [%1], %2", otherops);
9345 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
9346 output_asm_insn ("str%(d%)\t%0, [%1, %2]!", otherops);
9348 output_asm_insn ("str%(d%)\t%0, [%1], %2", otherops);
9352 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
9353 if (GET_CODE (otherops[2]) == CONST_INT)
9355 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
9358 output_asm_insn ("stm%(db%)\t%m0, %M1", operands);
9364 output_asm_insn ("stm%(da%)\t%m0, %M1", operands);
9370 output_asm_insn ("stm%(ib%)\t%m0, %M1", operands);
9375 && (GET_CODE (otherops[2]) == REG
9376 || (GET_CODE (otherops[2]) == CONST_INT
9377 && INTVAL (otherops[2]) > -256
9378 && INTVAL (otherops[2]) < 256)))
9380 otherops[0] = operands[1];
9381 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
9382 output_asm_insn ("str%(d%)\t%0, [%1, %2]", otherops);
9388 otherops[0] = adjust_address (operands[0], SImode, 4);
9389 otherops[1] = gen_rtx_REG (SImode, 1 + REGNO (operands[1]));
9390 output_asm_insn ("str%?\t%1, %0", operands);
9391 output_asm_insn ("str%?\t%1, %0", otherops);
9398 /* Output a VFP load or store instruction. */
9401 output_move_vfp (rtx *operands)
9403 rtx reg, mem, addr, ops[2];
9404 int load = REG_P (operands[0]);
9405 int dp = GET_MODE_SIZE (GET_MODE (operands[0])) == 8;
9406 int integer_p = GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_INT;
9407 const char *template;
9410 reg = operands[!load];
9411 mem = operands[load];
9413 gcc_assert (REG_P (reg));
9414 gcc_assert (IS_VFP_REGNUM (REGNO (reg)));
9415 gcc_assert (GET_MODE (reg) == SFmode
9416 || GET_MODE (reg) == DFmode
9417 || GET_MODE (reg) == SImode
9418 || GET_MODE (reg) == DImode);
9419 gcc_assert (MEM_P (mem));
9421 addr = XEXP (mem, 0);
9423 switch (GET_CODE (addr))
9426 template = "f%smdb%c%%?\t%%0!, {%%%s1}%s";
9427 ops[0] = XEXP (addr, 0);
9432 template = "f%smia%c%%?\t%%0!, {%%%s1}%s";
9433 ops[0] = XEXP (addr, 0);
9438 template = "f%s%c%%?\t%%%s0, %%1%s";
9444 sprintf (buff, template,
9448 integer_p ? "\t%@ int" : "");
9449 output_asm_insn (buff, ops);
9454 /* Output an ADD r, s, #n where n may be too big for one instruction.
9455 If adding zero to one register, output nothing. */
9457 output_add_immediate (rtx *operands)
9459 HOST_WIDE_INT n = INTVAL (operands[2]);
9461 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
9464 output_multi_immediate (operands,
9465 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
9468 output_multi_immediate (operands,
9469 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
9476 /* Output a multiple immediate operation.
9477 OPERANDS is the vector of operands referred to in the output patterns.
9478 INSTR1 is the output pattern to use for the first constant.
9479 INSTR2 is the output pattern to use for subsequent constants.
9480 IMMED_OP is the index of the constant slot in OPERANDS.
9481 N is the constant value. */
9483 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
9484 int immed_op, HOST_WIDE_INT n)
9486 #if HOST_BITS_PER_WIDE_INT > 32
9492 /* Quick and easy output. */
9493 operands[immed_op] = const0_rtx;
9494 output_asm_insn (instr1, operands);
9499 const char * instr = instr1;
9501 /* Note that n is never zero here (which would give no output). */
9502 for (i = 0; i < 32; i += 2)
9506 operands[immed_op] = GEN_INT (n & (255 << i));
9507 output_asm_insn (instr, operands);
9517 /* Return the name of a shifter operation. */
9519 arm_shift_nmem(enum rtx_code code)
9524 return ARM_LSL_NAME;
9540 /* Return the appropriate ARM instruction for the operation code.
9541 The returned result should not be overwritten. OP is the rtx of the
9542 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
9545 arithmetic_instr (rtx op, int shift_first_arg)
9547 switch (GET_CODE (op))
9553 return shift_first_arg ? "rsb" : "sub";
9568 return arm_shift_nmem(GET_CODE(op));
9575 /* Ensure valid constant shifts and return the appropriate shift mnemonic
9576 for the operation code. The returned result should not be overwritten.
9577 OP is the rtx code of the shift.
9578 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
9581 shift_op (rtx op, HOST_WIDE_INT *amountp)
9584 enum rtx_code code = GET_CODE (op);
9586 switch (GET_CODE (XEXP (op, 1)))
9594 *amountp = INTVAL (XEXP (op, 1));
9604 gcc_assert (*amountp != -1);
9605 *amountp = 32 - *amountp;
9614 mnem = arm_shift_nmem(code);
9618 /* We never have to worry about the amount being other than a
9619 power of 2, since this case can never be reloaded from a reg. */
9620 gcc_assert (*amountp != -1);
9621 *amountp = int_log2 (*amountp);
9622 return ARM_LSL_NAME;
9630 /* This is not 100% correct, but follows from the desire to merge
9631 multiplication by a power of 2 with the recognizer for a
9632 shift. >=32 is not a valid shift for "lsl", so we must try and
9633 output a shift that produces the correct arithmetical result.
9634 Using lsr #32 is identical except for the fact that the carry bit
9635 is not set correctly if we set the flags; but we never use the
9636 carry bit from such an operation, so we can ignore that. */
9637 if (code == ROTATERT)
9638 /* Rotate is just modulo 32. */
9640 else if (*amountp != (*amountp & 31))
9647 /* Shifts of 0 are no-ops. */
9655 /* Obtain the shift from the POWER of two. */
9657 static HOST_WIDE_INT
9658 int_log2 (HOST_WIDE_INT power)
9660 HOST_WIDE_INT shift = 0;
9662 while ((((HOST_WIDE_INT) 1 << shift) & power) == 0)
9664 gcc_assert (shift <= 31);
9671 /* Output a .ascii pseudo-op, keeping track of lengths. This is
9672 because /bin/as is horribly restrictive. The judgement about
9673 whether or not each character is 'printable' (and can be output as
9674 is) or not (and must be printed with an octal escape) must be made
9675 with reference to the *host* character set -- the situation is
9676 similar to that discussed in the comments above pp_c_char in
9677 c-pretty-print.c. */
9679 #define MAX_ASCII_LEN 51
9682 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
9687 fputs ("\t.ascii\t\"", stream);
9689 for (i = 0; i < len; i++)
9693 if (len_so_far >= MAX_ASCII_LEN)
9695 fputs ("\"\n\t.ascii\t\"", stream);
9701 if (c == '\\' || c == '\"')
9703 putc ('\\', stream);
9711 fprintf (stream, "\\%03o", c);
9716 fputs ("\"\n", stream);
9719 /* Compute the register save mask for registers 0 through 12
9720 inclusive. This code is used by arm_compute_save_reg_mask. */
9722 static unsigned long
9723 arm_compute_save_reg0_reg12_mask (void)
9725 unsigned long func_type = arm_current_func_type ();
9726 unsigned long save_reg_mask = 0;
9729 if (IS_INTERRUPT (func_type))
9731 unsigned int max_reg;
9732 /* Interrupt functions must not corrupt any registers,
9733 even call clobbered ones. If this is a leaf function
9734 we can just examine the registers used by the RTL, but
9735 otherwise we have to assume that whatever function is
9736 called might clobber anything, and so we have to save
9737 all the call-clobbered registers as well. */
9738 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
9739 /* FIQ handlers have registers r8 - r12 banked, so
9740 we only need to check r0 - r7, Normal ISRs only
9741 bank r14 and r15, so we must check up to r12.
9742 r13 is the stack pointer which is always preserved,
9743 so we do not need to consider it here. */
9748 for (reg = 0; reg <= max_reg; reg++)
9749 if (regs_ever_live[reg]
9750 || (! current_function_is_leaf && call_used_regs [reg]))
9751 save_reg_mask |= (1 << reg);
9753 /* Also save the pic base register if necessary. */
9755 && !TARGET_SINGLE_PIC_BASE
9756 && arm_pic_register != INVALID_REGNUM
9757 && current_function_uses_pic_offset_table)
9758 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9762 /* In arm mode we handle r11 (FP) as a special case. */
9763 unsigned last_reg = TARGET_ARM ? 10 : 11;
9765 /* In the normal case we only need to save those registers
9766 which are call saved and which are used by this function. */
9767 for (reg = 0; reg <= last_reg; reg++)
9768 if (regs_ever_live[reg] && ! call_used_regs [reg])
9769 save_reg_mask |= (1 << reg);
9771 /* Handle the frame pointer as a special case. */
9772 if (TARGET_THUMB2 && frame_pointer_needed)
9773 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
9774 else if (! TARGET_APCS_FRAME
9775 && ! frame_pointer_needed
9776 && regs_ever_live[HARD_FRAME_POINTER_REGNUM]
9777 && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
9778 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
9780 /* If we aren't loading the PIC register,
9781 don't stack it even though it may be live. */
9783 && !TARGET_SINGLE_PIC_BASE
9784 && arm_pic_register != INVALID_REGNUM
9785 && (regs_ever_live[PIC_OFFSET_TABLE_REGNUM]
9786 || current_function_uses_pic_offset_table))
9787 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9789 /* The prologue will copy SP into R0, so save it. */
9790 if (IS_STACKALIGN (func_type))
9794 /* Save registers so the exception handler can modify them. */
9795 if (current_function_calls_eh_return)
9801 reg = EH_RETURN_DATA_REGNO (i);
9802 if (reg == INVALID_REGNUM)
9804 save_reg_mask |= 1 << reg;
9808 return save_reg_mask;
9812 /* Compute a bit mask of which registers need to be
9813 saved on the stack for the current function. */
9815 static unsigned long
9816 arm_compute_save_reg_mask (void)
9818 unsigned int save_reg_mask = 0;
9819 unsigned long func_type = arm_current_func_type ();
9822 if (IS_NAKED (func_type))
9823 /* This should never really happen. */
9826 /* If we are creating a stack frame, then we must save the frame pointer,
9827 IP (which will hold the old stack pointer), LR and the PC. */
9828 if (frame_pointer_needed && TARGET_ARM)
9830 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
9835 /* Volatile functions do not return, so there
9836 is no need to save any other registers. */
9837 if (IS_VOLATILE (func_type))
9838 return save_reg_mask;
9840 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
9842 /* Decide if we need to save the link register.
9843 Interrupt routines have their own banked link register,
9844 so they never need to save it.
9845 Otherwise if we do not use the link register we do not need to save
9846 it. If we are pushing other registers onto the stack however, we
9847 can save an instruction in the epilogue by pushing the link register
9848 now and then popping it back into the PC. This incurs extra memory
9849 accesses though, so we only do it when optimizing for size, and only
9850 if we know that we will not need a fancy return sequence. */
9851 if (regs_ever_live [LR_REGNUM]
9854 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
9855 && !current_function_calls_eh_return))
9856 save_reg_mask |= 1 << LR_REGNUM;
9858 if (cfun->machine->lr_save_eliminated)
9859 save_reg_mask &= ~ (1 << LR_REGNUM);
9861 if (TARGET_REALLY_IWMMXT
9862 && ((bit_count (save_reg_mask)
9863 + ARM_NUM_INTS (current_function_pretend_args_size)) % 2) != 0)
9865 /* The total number of registers that are going to be pushed
9866 onto the stack is odd. We need to ensure that the stack
9867 is 64-bit aligned before we start to save iWMMXt registers,
9868 and also before we start to create locals. (A local variable
9869 might be a double or long long which we will load/store using
9870 an iWMMXt instruction). Therefore we need to push another
9871 ARM register, so that the stack will be 64-bit aligned. We
9872 try to avoid using the arg registers (r0 -r3) as they might be
9873 used to pass values in a tail call. */
9874 for (reg = 4; reg <= 12; reg++)
9875 if ((save_reg_mask & (1 << reg)) == 0)
9879 save_reg_mask |= (1 << reg);
9882 cfun->machine->sibcall_blocked = 1;
9883 save_reg_mask |= (1 << 3);
9887 /* We may need to push an additional register for use initializing the
9888 PIC base register. */
9889 if (TARGET_THUMB2 && IS_NESTED (func_type) && flag_pic
9890 && (save_reg_mask & THUMB2_WORK_REGS) == 0)
9892 reg = thumb_find_work_register (1 << 4);
9893 if (!call_used_regs[reg])
9894 save_reg_mask |= (1 << reg);
9897 return save_reg_mask;
9901 /* Compute a bit mask of which registers need to be
9902 saved on the stack for the current function. */
9903 static unsigned long
9904 thumb1_compute_save_reg_mask (void)
9910 for (reg = 0; reg < 12; reg ++)
9911 if (regs_ever_live[reg] && !call_used_regs[reg])
9915 && !TARGET_SINGLE_PIC_BASE
9916 && arm_pic_register != INVALID_REGNUM
9917 && current_function_uses_pic_offset_table)
9918 mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
9920 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
9921 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
9922 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
9924 /* LR will also be pushed if any lo regs are pushed. */
9925 if (mask & 0xff || thumb_force_lr_save ())
9926 mask |= (1 << LR_REGNUM);
9928 /* Make sure we have a low work register if we need one.
9929 We will need one if we are going to push a high register,
9930 but we are not currently intending to push a low register. */
9931 if ((mask & 0xff) == 0
9932 && ((mask & 0x0f00) || TARGET_BACKTRACE))
9934 /* Use thumb_find_work_register to choose which register
9935 we will use. If the register is live then we will
9936 have to push it. Use LAST_LO_REGNUM as our fallback
9937 choice for the register to select. */
9938 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
9940 if (! call_used_regs[reg])
9948 /* Return the number of bytes required to save VFP registers. */
9950 arm_get_vfp_saved_size (void)
9957 /* Space for saved VFP registers. */
9958 if (TARGET_HARD_FLOAT && TARGET_VFP)
9961 for (regno = FIRST_VFP_REGNUM;
9962 regno < LAST_VFP_REGNUM;
9965 if ((!regs_ever_live[regno] || call_used_regs[regno])
9966 && (!regs_ever_live[regno + 1] || call_used_regs[regno + 1]))
9970 /* Workaround ARM10 VFPr1 bug. */
9971 if (count == 2 && !arm_arch6)
9982 if (count == 2 && !arm_arch6)
9991 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
9992 everything bar the final return instruction. */
9994 output_return_instruction (rtx operand, int really_return, int reverse)
9996 char conditional[10];
9999 unsigned long live_regs_mask;
10000 unsigned long func_type;
10001 arm_stack_offsets *offsets;
10003 func_type = arm_current_func_type ();
10005 if (IS_NAKED (func_type))
10008 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
10010 /* If this function was declared non-returning, and we have
10011 found a tail call, then we have to trust that the called
10012 function won't return. */
10017 /* Otherwise, trap an attempted return by aborting. */
10019 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
10021 assemble_external_libcall (ops[1]);
10022 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
10028 gcc_assert (!current_function_calls_alloca || really_return);
10030 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
10032 return_used_this_function = 1;
10034 live_regs_mask = arm_compute_save_reg_mask ();
10036 if (live_regs_mask)
10038 const char * return_reg;
10040 /* If we do not have any special requirements for function exit
10041 (e.g. interworking) then we can load the return address
10042 directly into the PC. Otherwise we must load it into LR. */
10044 && (IS_INTERRUPT (func_type) || !TARGET_INTERWORK))
10045 return_reg = reg_names[PC_REGNUM];
10047 return_reg = reg_names[LR_REGNUM];
10049 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
10051 /* There are three possible reasons for the IP register
10052 being saved. 1) a stack frame was created, in which case
10053 IP contains the old stack pointer, or 2) an ISR routine
10054 corrupted it, or 3) it was saved to align the stack on
10055 iWMMXt. In case 1, restore IP into SP, otherwise just
10057 if (frame_pointer_needed)
10059 live_regs_mask &= ~ (1 << IP_REGNUM);
10060 live_regs_mask |= (1 << SP_REGNUM);
10063 gcc_assert (IS_INTERRUPT (func_type) || TARGET_REALLY_IWMMXT);
10066 /* On some ARM architectures it is faster to use LDR rather than
10067 LDM to load a single register. On other architectures, the
10068 cost is the same. In 26 bit mode, or for exception handlers,
10069 we have to use LDM to load the PC so that the CPSR is also
10071 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
10072 if (live_regs_mask == (1U << reg))
10075 if (reg <= LAST_ARM_REGNUM
10076 && (reg != LR_REGNUM
10078 || ! IS_INTERRUPT (func_type)))
10080 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
10081 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
10088 /* Generate the load multiple instruction to restore the
10089 registers. Note we can get here, even if
10090 frame_pointer_needed is true, but only if sp already
10091 points to the base of the saved core registers. */
10092 if (live_regs_mask & (1 << SP_REGNUM))
10094 unsigned HOST_WIDE_INT stack_adjust;
10096 offsets = arm_get_frame_offsets ();
10097 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
10098 gcc_assert (stack_adjust == 0 || stack_adjust == 4);
10100 if (stack_adjust && arm_arch5 && TARGET_ARM)
10101 sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
10104 /* If we can't use ldmib (SA110 bug),
10105 then try to pop r3 instead. */
10107 live_regs_mask |= 1 << 3;
10108 sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
10112 sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
10114 p = instr + strlen (instr);
10116 for (reg = 0; reg <= SP_REGNUM; reg++)
10117 if (live_regs_mask & (1 << reg))
10119 int l = strlen (reg_names[reg]);
10125 memcpy (p, ", ", 2);
10129 memcpy (p, "%|", 2);
10130 memcpy (p + 2, reg_names[reg], l);
10134 if (live_regs_mask & (1 << LR_REGNUM))
10136 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
10137 /* If returning from an interrupt, restore the CPSR. */
10138 if (IS_INTERRUPT (func_type))
10145 output_asm_insn (instr, & operand);
10147 /* See if we need to generate an extra instruction to
10148 perform the actual function return. */
10150 && func_type != ARM_FT_INTERWORKED
10151 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
10153 /* The return has already been handled
10154 by loading the LR into the PC. */
10161 switch ((int) ARM_FUNC_TYPE (func_type))
10165 /* ??? This is wrong for unified assembly syntax. */
10166 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
10169 case ARM_FT_INTERWORKED:
10170 sprintf (instr, "bx%s\t%%|lr", conditional);
10173 case ARM_FT_EXCEPTION:
10174 /* ??? This is wrong for unified assembly syntax. */
10175 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
10179 /* Use bx if it's available. */
10180 if (arm_arch5 || arm_arch4t)
10181 sprintf (instr, "bx%s\t%%|lr", conditional);
10183 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
10187 output_asm_insn (instr, & operand);
10193 /* Write the function name into the code section, directly preceding
10194 the function prologue.
10196 Code will be output similar to this:
10198 .ascii "arm_poke_function_name", 0
10201 .word 0xff000000 + (t1 - t0)
10202 arm_poke_function_name
10204 stmfd sp!, {fp, ip, lr, pc}
10207 When performing a stack backtrace, code can inspect the value
10208 of 'pc' stored at 'fp' + 0. If the trace function then looks
10209 at location pc - 12 and the top 8 bits are set, then we know
10210 that there is a function name embedded immediately preceding this
10211 location and has length ((pc[-3]) & 0xff000000).
10213 We assume that pc is declared as a pointer to an unsigned long.
10215 It is of no benefit to output the function name if we are assembling
10216 a leaf function. These function types will not contain a stack
10217 backtrace structure, therefore it is not possible to determine the
10220 arm_poke_function_name (FILE *stream, const char *name)
10222 unsigned long alignlength;
10223 unsigned long length;
10226 length = strlen (name) + 1;
10227 alignlength = ROUND_UP_WORD (length);
10229 ASM_OUTPUT_ASCII (stream, name, length);
10230 ASM_OUTPUT_ALIGN (stream, 2);
10231 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
10232 assemble_aligned_integer (UNITS_PER_WORD, x);
10235 /* Place some comments into the assembler stream
10236 describing the current function. */
10238 arm_output_function_prologue (FILE *f, HOST_WIDE_INT frame_size)
10240 unsigned long func_type;
10244 thumb1_output_function_prologue (f, frame_size);
10248 /* Sanity check. */
10249 gcc_assert (!arm_ccfsm_state && !arm_target_insn);
10251 func_type = arm_current_func_type ();
10253 switch ((int) ARM_FUNC_TYPE (func_type))
10256 case ARM_FT_NORMAL:
10258 case ARM_FT_INTERWORKED:
10259 asm_fprintf (f, "\t%@ Function supports interworking.\n");
10262 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
10265 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
10267 case ARM_FT_EXCEPTION:
10268 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
10272 if (IS_NAKED (func_type))
10273 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
10275 if (IS_VOLATILE (func_type))
10276 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
10278 if (IS_NESTED (func_type))
10279 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
10280 if (IS_STACKALIGN (func_type))
10281 asm_fprintf (f, "\t%@ Stack Align: May be called with mis-aligned SP.\n");
10283 asm_fprintf (f, "\t%@ args = %d, pretend = %d, frame = %wd\n",
10284 current_function_args_size,
10285 current_function_pretend_args_size, frame_size);
10287 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
10288 frame_pointer_needed,
10289 cfun->machine->uses_anonymous_args);
10291 if (cfun->machine->lr_save_eliminated)
10292 asm_fprintf (f, "\t%@ link register save eliminated.\n");
10294 if (current_function_calls_eh_return)
10295 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
10297 #ifdef AOF_ASSEMBLER
10299 asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, PIC_OFFSET_TABLE_REGNUM);
10302 return_used_this_function = 0;
10306 arm_output_epilogue (rtx sibling)
10309 unsigned long saved_regs_mask;
10310 unsigned long func_type;
10311 /* Floats_offset is the offset from the "virtual" frame. In an APCS
10312 frame that is $fp + 4 for a non-variadic function. */
10313 int floats_offset = 0;
10315 FILE * f = asm_out_file;
10316 unsigned int lrm_count = 0;
10317 int really_return = (sibling == NULL);
10319 arm_stack_offsets *offsets;
10321 /* If we have already generated the return instruction
10322 then it is futile to generate anything else. */
10323 if (use_return_insn (FALSE, sibling) && return_used_this_function)
10326 func_type = arm_current_func_type ();
10328 if (IS_NAKED (func_type))
10329 /* Naked functions don't have epilogues. */
10332 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
10336 /* A volatile function should never return. Call abort. */
10337 op = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)" : "abort");
10338 assemble_external_libcall (op);
10339 output_asm_insn ("bl\t%a0", &op);
10344 /* If we are throwing an exception, then we really must be doing a
10345 return, so we can't tail-call. */
10346 gcc_assert (!current_function_calls_eh_return || really_return);
10348 offsets = arm_get_frame_offsets ();
10349 saved_regs_mask = arm_compute_save_reg_mask ();
10352 lrm_count = bit_count (saved_regs_mask);
10354 floats_offset = offsets->saved_args;
10355 /* Compute how far away the floats will be. */
10356 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
10357 if (saved_regs_mask & (1 << reg))
10358 floats_offset += 4;
10360 if (frame_pointer_needed && TARGET_ARM)
10362 /* This variable is for the Virtual Frame Pointer, not VFP regs. */
10363 int vfp_offset = offsets->frame;
10365 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10367 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10368 if (regs_ever_live[reg] && !call_used_regs[reg])
10370 floats_offset += 12;
10371 asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
10372 reg, FP_REGNUM, floats_offset - vfp_offset);
10377 start_reg = LAST_FPA_REGNUM;
10379 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
10381 if (regs_ever_live[reg] && !call_used_regs[reg])
10383 floats_offset += 12;
10385 /* We can't unstack more than four registers at once. */
10386 if (start_reg - reg == 3)
10388 asm_fprintf (f, "\tlfm\t%r, 4, [%r, #-%d]\n",
10389 reg, FP_REGNUM, floats_offset - vfp_offset);
10390 start_reg = reg - 1;
10395 if (reg != start_reg)
10396 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
10397 reg + 1, start_reg - reg,
10398 FP_REGNUM, floats_offset - vfp_offset);
10399 start_reg = reg - 1;
10403 /* Just in case the last register checked also needs unstacking. */
10404 if (reg != start_reg)
10405 asm_fprintf (f, "\tlfm\t%r, %d, [%r, #-%d]\n",
10406 reg + 1, start_reg - reg,
10407 FP_REGNUM, floats_offset - vfp_offset);
10410 if (TARGET_HARD_FLOAT && TARGET_VFP)
10414 /* The fldmd insns do not have base+offset addressing
10415 modes, so we use IP to hold the address. */
10416 saved_size = arm_get_vfp_saved_size ();
10418 if (saved_size > 0)
10420 floats_offset += saved_size;
10421 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", IP_REGNUM,
10422 FP_REGNUM, floats_offset - vfp_offset);
10424 start_reg = FIRST_VFP_REGNUM;
10425 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10427 if ((!regs_ever_live[reg] || call_used_regs[reg])
10428 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10430 if (start_reg != reg)
10431 vfp_output_fldmd (f, IP_REGNUM,
10432 (start_reg - FIRST_VFP_REGNUM) / 2,
10433 (reg - start_reg) / 2);
10434 start_reg = reg + 2;
10437 if (start_reg != reg)
10438 vfp_output_fldmd (f, IP_REGNUM,
10439 (start_reg - FIRST_VFP_REGNUM) / 2,
10440 (reg - start_reg) / 2);
10445 /* The frame pointer is guaranteed to be non-double-word aligned.
10446 This is because it is set to (old_stack_pointer - 4) and the
10447 old_stack_pointer was double word aligned. Thus the offset to
10448 the iWMMXt registers to be loaded must also be non-double-word
10449 sized, so that the resultant address *is* double-word aligned.
10450 We can ignore floats_offset since that was already included in
10451 the live_regs_mask. */
10452 lrm_count += (lrm_count % 2 ? 2 : 1);
10454 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
10455 if (regs_ever_live[reg] && !call_used_regs[reg])
10457 asm_fprintf (f, "\twldrd\t%r, [%r, #-%d]\n",
10458 reg, FP_REGNUM, lrm_count * 4);
10463 /* saved_regs_mask should contain the IP, which at the time of stack
10464 frame generation actually contains the old stack pointer. So a
10465 quick way to unwind the stack is just pop the IP register directly
10466 into the stack pointer. */
10467 gcc_assert (saved_regs_mask & (1 << IP_REGNUM));
10468 saved_regs_mask &= ~ (1 << IP_REGNUM);
10469 saved_regs_mask |= (1 << SP_REGNUM);
10471 /* There are two registers left in saved_regs_mask - LR and PC. We
10472 only need to restore the LR register (the return address), but to
10473 save time we can load it directly into the PC, unless we need a
10474 special function exit sequence, or we are not really returning. */
10476 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
10477 && !current_function_calls_eh_return)
10478 /* Delete the LR from the register mask, so that the LR on
10479 the stack is loaded into the PC in the register mask. */
10480 saved_regs_mask &= ~ (1 << LR_REGNUM);
10482 saved_regs_mask &= ~ (1 << PC_REGNUM);
10484 /* We must use SP as the base register, because SP is one of the
10485 registers being restored. If an interrupt or page fault
10486 happens in the ldm instruction, the SP might or might not
10487 have been restored. That would be bad, as then SP will no
10488 longer indicate the safe area of stack, and we can get stack
10489 corruption. Using SP as the base register means that it will
10490 be reset correctly to the original value, should an interrupt
10491 occur. If the stack pointer already points at the right
10492 place, then omit the subtraction. */
10493 if (offsets->outgoing_args != (1 + (int) bit_count (saved_regs_mask))
10494 || current_function_calls_alloca)
10495 asm_fprintf (f, "\tsub\t%r, %r, #%d\n", SP_REGNUM, FP_REGNUM,
10496 4 * bit_count (saved_regs_mask));
10497 print_multi_reg (f, "ldmfd\t%r, ", SP_REGNUM, saved_regs_mask, 0);
10499 if (IS_INTERRUPT (func_type))
10500 /* Interrupt handlers will have pushed the
10501 IP onto the stack, so restore it now. */
10502 print_multi_reg (f, "ldmfd\t%r!, ", SP_REGNUM, 1 << IP_REGNUM, 0);
10506 HOST_WIDE_INT amount;
10508 /* Restore stack pointer if necessary. */
10509 if (frame_pointer_needed)
10511 /* For Thumb-2 restore sp from the frame pointer.
10512 Operand restrictions mean we have to increment FP, then copy
10514 amount = offsets->locals_base - offsets->saved_regs;
10515 operands[0] = hard_frame_pointer_rtx;
10519 operands[0] = stack_pointer_rtx;
10520 amount = offsets->outgoing_args - offsets->saved_regs;
10525 operands[1] = operands[0];
10526 operands[2] = GEN_INT (amount);
10527 output_add_immediate (operands);
10529 if (frame_pointer_needed)
10530 asm_fprintf (f, "\tmov\t%r, %r\n",
10531 SP_REGNUM, HARD_FRAME_POINTER_REGNUM);
10533 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
10535 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
10536 if (regs_ever_live[reg] && !call_used_regs[reg])
10537 asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
10542 start_reg = FIRST_FPA_REGNUM;
10544 for (reg = FIRST_FPA_REGNUM; reg <= LAST_FPA_REGNUM; reg++)
10546 if (regs_ever_live[reg] && !call_used_regs[reg])
10548 if (reg - start_reg == 3)
10550 asm_fprintf (f, "\tlfmfd\t%r, 4, [%r]!\n",
10551 start_reg, SP_REGNUM);
10552 start_reg = reg + 1;
10557 if (reg != start_reg)
10558 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
10559 start_reg, reg - start_reg,
10562 start_reg = reg + 1;
10566 /* Just in case the last register checked also needs unstacking. */
10567 if (reg != start_reg)
10568 asm_fprintf (f, "\tlfmfd\t%r, %d, [%r]!\n",
10569 start_reg, reg - start_reg, SP_REGNUM);
10572 if (TARGET_HARD_FLOAT && TARGET_VFP)
10574 start_reg = FIRST_VFP_REGNUM;
10575 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
10577 if ((!regs_ever_live[reg] || call_used_regs[reg])
10578 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
10580 if (start_reg != reg)
10581 vfp_output_fldmd (f, SP_REGNUM,
10582 (start_reg - FIRST_VFP_REGNUM) / 2,
10583 (reg - start_reg) / 2);
10584 start_reg = reg + 2;
10587 if (start_reg != reg)
10588 vfp_output_fldmd (f, SP_REGNUM,
10589 (start_reg - FIRST_VFP_REGNUM) / 2,
10590 (reg - start_reg) / 2);
10593 for (reg = FIRST_IWMMXT_REGNUM; reg <= LAST_IWMMXT_REGNUM; reg++)
10594 if (regs_ever_live[reg] && !call_used_regs[reg])
10595 asm_fprintf (f, "\twldrd\t%r, [%r], #8\n", reg, SP_REGNUM);
10597 /* If we can, restore the LR into the PC. */
10598 if (ARM_FUNC_TYPE (func_type) != ARM_FT_INTERWORKED
10599 && (TARGET_ARM || ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL)
10600 && !IS_STACKALIGN (func_type)
10602 && current_function_pretend_args_size == 0
10603 && saved_regs_mask & (1 << LR_REGNUM)
10604 && !current_function_calls_eh_return)
10606 saved_regs_mask &= ~ (1 << LR_REGNUM);
10607 saved_regs_mask |= (1 << PC_REGNUM);
10608 rfe = IS_INTERRUPT (func_type);
10613 /* Load the registers off the stack. If we only have one register
10614 to load use the LDR instruction - it is faster. For Thumb-2
10615 always use pop and the assembler will pick the best instruction.*/
10616 if (TARGET_ARM && saved_regs_mask == (1 << LR_REGNUM)
10617 && !IS_INTERRUPT(func_type))
10619 asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
10621 else if (saved_regs_mask)
10623 if (saved_regs_mask & (1 << SP_REGNUM))
10624 /* Note - write back to the stack register is not enabled
10625 (i.e. "ldmfd sp!..."). We know that the stack pointer is
10626 in the list of registers and if we add writeback the
10627 instruction becomes UNPREDICTABLE. */
10628 print_multi_reg (f, "ldmfd\t%r, ", SP_REGNUM, saved_regs_mask,
10630 else if (TARGET_ARM)
10631 print_multi_reg (f, "ldmfd\t%r!, ", SP_REGNUM, saved_regs_mask,
10634 print_multi_reg (f, "pop\t", SP_REGNUM, saved_regs_mask, 0);
10637 if (current_function_pretend_args_size)
10639 /* Unwind the pre-pushed regs. */
10640 operands[0] = operands[1] = stack_pointer_rtx;
10641 operands[2] = GEN_INT (current_function_pretend_args_size);
10642 output_add_immediate (operands);
10646 /* We may have already restored PC directly from the stack. */
10647 if (!really_return || saved_regs_mask & (1 << PC_REGNUM))
10650 /* Stack adjustment for exception handler. */
10651 if (current_function_calls_eh_return)
10652 asm_fprintf (f, "\tadd\t%r, %r, %r\n", SP_REGNUM, SP_REGNUM,
10653 ARM_EH_STACKADJ_REGNUM);
10655 /* Generate the return instruction. */
10656 switch ((int) ARM_FUNC_TYPE (func_type))
10660 asm_fprintf (f, "\tsubs\t%r, %r, #4\n", PC_REGNUM, LR_REGNUM);
10663 case ARM_FT_EXCEPTION:
10664 asm_fprintf (f, "\tmovs\t%r, %r\n", PC_REGNUM, LR_REGNUM);
10667 case ARM_FT_INTERWORKED:
10668 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
10672 if (IS_STACKALIGN (func_type))
10674 /* See comment in arm_expand_prologue. */
10675 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, 0);
10677 if (arm_arch5 || arm_arch4t)
10678 asm_fprintf (f, "\tbx\t%r\n", LR_REGNUM);
10680 asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, LR_REGNUM);
10688 arm_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
10689 HOST_WIDE_INT frame_size ATTRIBUTE_UNUSED)
10691 arm_stack_offsets *offsets;
10697 /* Emit any call-via-reg trampolines that are needed for v4t support
10698 of call_reg and call_value_reg type insns. */
10699 for (regno = 0; regno < LR_REGNUM; regno++)
10701 rtx label = cfun->machine->call_via[regno];
10705 switch_to_section (function_section (current_function_decl));
10706 targetm.asm_out.internal_label (asm_out_file, "L",
10707 CODE_LABEL_NUMBER (label));
10708 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
10712 /* ??? Probably not safe to set this here, since it assumes that a
10713 function will be emitted as assembly immediately after we generate
10714 RTL for it. This does not happen for inline functions. */
10715 return_used_this_function = 0;
10717 else /* TARGET_32BIT */
10719 /* We need to take into account any stack-frame rounding. */
10720 offsets = arm_get_frame_offsets ();
10722 gcc_assert (!use_return_insn (FALSE, NULL)
10723 || !return_used_this_function
10724 || offsets->saved_regs == offsets->outgoing_args
10725 || frame_pointer_needed);
10727 /* Reset the ARM-specific per-function variables. */
10728 after_arm_reorg = 0;
10732 /* Generate and emit an insn that we will recognize as a push_multi.
10733 Unfortunately, since this insn does not reflect very well the actual
10734 semantics of the operation, we need to annotate the insn for the benefit
10735 of DWARF2 frame unwind information. */
10737 emit_multi_reg_push (unsigned long mask)
10740 int num_dwarf_regs;
10744 int dwarf_par_index;
10747 for (i = 0; i <= LAST_ARM_REGNUM; i++)
10748 if (mask & (1 << i))
10751 gcc_assert (num_regs && num_regs <= 16);
10753 /* We don't record the PC in the dwarf frame information. */
10754 num_dwarf_regs = num_regs;
10755 if (mask & (1 << PC_REGNUM))
10758 /* For the body of the insn we are going to generate an UNSPEC in
10759 parallel with several USEs. This allows the insn to be recognized
10760 by the push_multi pattern in the arm.md file. The insn looks
10761 something like this:
10764 (set (mem:BLK (pre_dec:BLK (reg:SI sp)))
10765 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
10766 (use (reg:SI 11 fp))
10767 (use (reg:SI 12 ip))
10768 (use (reg:SI 14 lr))
10769 (use (reg:SI 15 pc))
10772 For the frame note however, we try to be more explicit and actually
10773 show each register being stored into the stack frame, plus a (single)
10774 decrement of the stack pointer. We do it this way in order to be
10775 friendly to the stack unwinding code, which only wants to see a single
10776 stack decrement per instruction. The RTL we generate for the note looks
10777 something like this:
10780 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
10781 (set (mem:SI (reg:SI sp)) (reg:SI r4))
10782 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI fp))
10783 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI ip))
10784 (set (mem:SI (plus:SI (reg:SI sp) (const_int 12))) (reg:SI lr))
10787 This sequence is used both by the code to support stack unwinding for
10788 exceptions handlers and the code to generate dwarf2 frame debugging. */
10790 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
10791 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
10792 dwarf_par_index = 1;
10794 for (i = 0; i <= LAST_ARM_REGNUM; i++)
10796 if (mask & (1 << i))
10798 reg = gen_rtx_REG (SImode, i);
10800 XVECEXP (par, 0, 0)
10801 = gen_rtx_SET (VOIDmode,
10802 gen_frame_mem (BLKmode,
10803 gen_rtx_PRE_DEC (BLKmode,
10804 stack_pointer_rtx)),
10805 gen_rtx_UNSPEC (BLKmode,
10806 gen_rtvec (1, reg),
10807 UNSPEC_PUSH_MULT));
10809 if (i != PC_REGNUM)
10811 tmp = gen_rtx_SET (VOIDmode,
10812 gen_frame_mem (SImode, stack_pointer_rtx),
10814 RTX_FRAME_RELATED_P (tmp) = 1;
10815 XVECEXP (dwarf, 0, dwarf_par_index) = tmp;
10823 for (j = 1, i++; j < num_regs; i++)
10825 if (mask & (1 << i))
10827 reg = gen_rtx_REG (SImode, i);
10829 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
10831 if (i != PC_REGNUM)
10834 = gen_rtx_SET (VOIDmode,
10835 gen_frame_mem (SImode,
10836 plus_constant (stack_pointer_rtx,
10839 RTX_FRAME_RELATED_P (tmp) = 1;
10840 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
10847 par = emit_insn (par);
10849 tmp = gen_rtx_SET (VOIDmode,
10851 plus_constant (stack_pointer_rtx, -4 * num_regs));
10852 RTX_FRAME_RELATED_P (tmp) = 1;
10853 XVECEXP (dwarf, 0, 0) = tmp;
10855 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
10860 /* Calculate the size of the return value that is passed in registers. */
10862 arm_size_return_regs (void)
10864 enum machine_mode mode;
10866 if (current_function_return_rtx != 0)
10867 mode = GET_MODE (current_function_return_rtx);
10869 mode = DECL_MODE (DECL_RESULT (current_function_decl));
10871 return GET_MODE_SIZE (mode);
10875 emit_sfm (int base_reg, int count)
10882 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
10883 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
10885 reg = gen_rtx_REG (XFmode, base_reg++);
10887 XVECEXP (par, 0, 0)
10888 = gen_rtx_SET (VOIDmode,
10889 gen_frame_mem (BLKmode,
10890 gen_rtx_PRE_DEC (BLKmode,
10891 stack_pointer_rtx)),
10892 gen_rtx_UNSPEC (BLKmode,
10893 gen_rtvec (1, reg),
10894 UNSPEC_PUSH_MULT));
10895 tmp = gen_rtx_SET (VOIDmode,
10896 gen_frame_mem (XFmode, stack_pointer_rtx), reg);
10897 RTX_FRAME_RELATED_P (tmp) = 1;
10898 XVECEXP (dwarf, 0, 1) = tmp;
10900 for (i = 1; i < count; i++)
10902 reg = gen_rtx_REG (XFmode, base_reg++);
10903 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
10905 tmp = gen_rtx_SET (VOIDmode,
10906 gen_frame_mem (XFmode,
10907 plus_constant (stack_pointer_rtx,
10910 RTX_FRAME_RELATED_P (tmp) = 1;
10911 XVECEXP (dwarf, 0, i + 1) = tmp;
10914 tmp = gen_rtx_SET (VOIDmode,
10916 plus_constant (stack_pointer_rtx, -12 * count));
10918 RTX_FRAME_RELATED_P (tmp) = 1;
10919 XVECEXP (dwarf, 0, 0) = tmp;
10921 par = emit_insn (par);
10922 REG_NOTES (par) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
10928 /* Return true if the current function needs to save/restore LR. */
10931 thumb_force_lr_save (void)
10933 return !cfun->machine->lr_save_eliminated
10934 && (!leaf_function_p ()
10935 || thumb_far_jump_used_p ()
10936 || regs_ever_live [LR_REGNUM]);
10940 /* Compute the distance from register FROM to register TO.
10941 These can be the arg pointer (26), the soft frame pointer (25),
10942 the stack pointer (13) or the hard frame pointer (11).
10943 In thumb mode r7 is used as the soft frame pointer, if needed.
10944 Typical stack layout looks like this:
10946 old stack pointer -> | |
10949 | | saved arguments for
10950 | | vararg functions
10953 hard FP & arg pointer -> | | \
10961 soft frame pointer -> | | /
10966 locals base pointer -> | | /
10971 current stack pointer -> | | /
10974 For a given function some or all of these stack components
10975 may not be needed, giving rise to the possibility of
10976 eliminating some of the registers.
10978 The values returned by this function must reflect the behavior
10979 of arm_expand_prologue() and arm_compute_save_reg_mask().
10981 The sign of the number returned reflects the direction of stack
10982 growth, so the values are positive for all eliminations except
10983 from the soft frame pointer to the hard frame pointer.
10985 SFP may point just inside the local variables block to ensure correct
10989 /* Calculate stack offsets. These are used to calculate register elimination
10990 offsets and in prologue/epilogue code. */
10992 static arm_stack_offsets *
10993 arm_get_frame_offsets (void)
10995 struct arm_stack_offsets *offsets;
10996 unsigned long func_type;
10999 HOST_WIDE_INT frame_size;
11001 offsets = &cfun->machine->stack_offsets;
11003 /* We need to know if we are a leaf function. Unfortunately, it
11004 is possible to be called after start_sequence has been called,
11005 which causes get_insns to return the insns for the sequence,
11006 not the function, which will cause leaf_function_p to return
11007 the incorrect result.
11009 to know about leaf functions once reload has completed, and the
11010 frame size cannot be changed after that time, so we can safely
11011 use the cached value. */
11013 if (reload_completed)
11016 /* Initially this is the size of the local variables. It will translated
11017 into an offset once we have determined the size of preceding data. */
11018 frame_size = ROUND_UP_WORD (get_frame_size ());
11020 leaf = leaf_function_p ();
11022 /* Space for variadic functions. */
11023 offsets->saved_args = current_function_pretend_args_size;
11025 /* In Thumb mode this is incorrect, but never used. */
11026 offsets->frame = offsets->saved_args + (frame_pointer_needed ? 4 : 0);
11030 unsigned int regno;
11032 saved = bit_count (arm_compute_save_reg_mask ()) * 4;
11034 /* We know that SP will be doubleword aligned on entry, and we must
11035 preserve that condition at any subroutine call. We also require the
11036 soft frame pointer to be doubleword aligned. */
11038 if (TARGET_REALLY_IWMMXT)
11040 /* Check for the call-saved iWMMXt registers. */
11041 for (regno = FIRST_IWMMXT_REGNUM;
11042 regno <= LAST_IWMMXT_REGNUM;
11044 if (regs_ever_live [regno] && ! call_used_regs [regno])
11048 func_type = arm_current_func_type ();
11049 if (! IS_VOLATILE (func_type))
11051 /* Space for saved FPA registers. */
11052 for (regno = FIRST_FPA_REGNUM; regno <= LAST_FPA_REGNUM; regno++)
11053 if (regs_ever_live[regno] && ! call_used_regs[regno])
11056 /* Space for saved VFP registers. */
11057 if (TARGET_HARD_FLOAT && TARGET_VFP)
11058 saved += arm_get_vfp_saved_size ();
11061 else /* TARGET_THUMB1 */
11063 saved = bit_count (thumb1_compute_save_reg_mask ()) * 4;
11064 if (TARGET_BACKTRACE)
11068 /* Saved registers include the stack frame. */
11069 offsets->saved_regs = offsets->saved_args + saved;
11070 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
11071 /* A leaf function does not need any stack alignment if it has nothing
11073 if (leaf && frame_size == 0)
11075 offsets->outgoing_args = offsets->soft_frame;
11079 /* Ensure SFP has the correct alignment. */
11080 if (ARM_DOUBLEWORD_ALIGN
11081 && (offsets->soft_frame & 7))
11082 offsets->soft_frame += 4;
11084 offsets->locals_base = offsets->soft_frame + frame_size;
11085 offsets->outgoing_args = (offsets->locals_base
11086 + current_function_outgoing_args_size);
11088 if (ARM_DOUBLEWORD_ALIGN)
11090 /* Ensure SP remains doubleword aligned. */
11091 if (offsets->outgoing_args & 7)
11092 offsets->outgoing_args += 4;
11093 gcc_assert (!(offsets->outgoing_args & 7));
11100 /* Calculate the relative offsets for the different stack pointers. Positive
11101 offsets are in the direction of stack growth. */
11104 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
11106 arm_stack_offsets *offsets;
11108 offsets = arm_get_frame_offsets ();
11110 /* OK, now we have enough information to compute the distances.
11111 There must be an entry in these switch tables for each pair
11112 of registers in ELIMINABLE_REGS, even if some of the entries
11113 seem to be redundant or useless. */
11116 case ARG_POINTER_REGNUM:
11119 case THUMB_HARD_FRAME_POINTER_REGNUM:
11122 case FRAME_POINTER_REGNUM:
11123 /* This is the reverse of the soft frame pointer
11124 to hard frame pointer elimination below. */
11125 return offsets->soft_frame - offsets->saved_args;
11127 case ARM_HARD_FRAME_POINTER_REGNUM:
11128 /* If there is no stack frame then the hard
11129 frame pointer and the arg pointer coincide. */
11130 if (offsets->frame == offsets->saved_regs)
11132 /* FIXME: Not sure about this. Maybe we should always return 0 ? */
11133 return (frame_pointer_needed
11134 && cfun->static_chain_decl != NULL
11135 && ! cfun->machine->uses_anonymous_args) ? 4 : 0;
11137 case STACK_POINTER_REGNUM:
11138 /* If nothing has been pushed on the stack at all
11139 then this will return -4. This *is* correct! */
11140 return offsets->outgoing_args - (offsets->saved_args + 4);
11143 gcc_unreachable ();
11145 gcc_unreachable ();
11147 case FRAME_POINTER_REGNUM:
11150 case THUMB_HARD_FRAME_POINTER_REGNUM:
11153 case ARM_HARD_FRAME_POINTER_REGNUM:
11154 /* The hard frame pointer points to the top entry in the
11155 stack frame. The soft frame pointer to the bottom entry
11156 in the stack frame. If there is no stack frame at all,
11157 then they are identical. */
11159 return offsets->frame - offsets->soft_frame;
11161 case STACK_POINTER_REGNUM:
11162 return offsets->outgoing_args - offsets->soft_frame;
11165 gcc_unreachable ();
11167 gcc_unreachable ();
11170 /* You cannot eliminate from the stack pointer.
11171 In theory you could eliminate from the hard frame
11172 pointer to the stack pointer, but this will never
11173 happen, since if a stack frame is not needed the
11174 hard frame pointer will never be used. */
11175 gcc_unreachable ();
11180 /* Emit RTL to save coprocessor registers on function entry. Returns the
11181 number of bytes pushed. */
11184 arm_save_coproc_regs(void)
11186 int saved_size = 0;
11188 unsigned start_reg;
11191 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
11192 if (regs_ever_live[reg] && ! call_used_regs [reg])
11194 insn = gen_rtx_PRE_DEC (V2SImode, stack_pointer_rtx);
11195 insn = gen_rtx_MEM (V2SImode, insn);
11196 insn = emit_set_insn (insn, gen_rtx_REG (V2SImode, reg));
11197 RTX_FRAME_RELATED_P (insn) = 1;
11201 /* Save any floating point call-saved registers used by this
11203 if (arm_fpu_arch == FPUTYPE_FPA_EMU2)
11205 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
11206 if (regs_ever_live[reg] && !call_used_regs[reg])
11208 insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
11209 insn = gen_rtx_MEM (XFmode, insn);
11210 insn = emit_set_insn (insn, gen_rtx_REG (XFmode, reg));
11211 RTX_FRAME_RELATED_P (insn) = 1;
11217 start_reg = LAST_FPA_REGNUM;
11219 for (reg = LAST_FPA_REGNUM; reg >= FIRST_FPA_REGNUM; reg--)
11221 if (regs_ever_live[reg] && !call_used_regs[reg])
11223 if (start_reg - reg == 3)
11225 insn = emit_sfm (reg, 4);
11226 RTX_FRAME_RELATED_P (insn) = 1;
11228 start_reg = reg - 1;
11233 if (start_reg != reg)
11235 insn = emit_sfm (reg + 1, start_reg - reg);
11236 RTX_FRAME_RELATED_P (insn) = 1;
11237 saved_size += (start_reg - reg) * 12;
11239 start_reg = reg - 1;
11243 if (start_reg != reg)
11245 insn = emit_sfm (reg + 1, start_reg - reg);
11246 saved_size += (start_reg - reg) * 12;
11247 RTX_FRAME_RELATED_P (insn) = 1;
11250 if (TARGET_HARD_FLOAT && TARGET_VFP)
11252 start_reg = FIRST_VFP_REGNUM;
11254 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
11256 if ((!regs_ever_live[reg] || call_used_regs[reg])
11257 && (!regs_ever_live[reg + 1] || call_used_regs[reg + 1]))
11259 if (start_reg != reg)
11260 saved_size += vfp_emit_fstmd (start_reg,
11261 (reg - start_reg) / 2);
11262 start_reg = reg + 2;
11265 if (start_reg != reg)
11266 saved_size += vfp_emit_fstmd (start_reg,
11267 (reg - start_reg) / 2);
11273 /* Set the Thumb frame pointer from the stack pointer. */
11276 thumb_set_frame_pointer (arm_stack_offsets *offsets)
11278 HOST_WIDE_INT amount;
11281 amount = offsets->outgoing_args - offsets->locals_base;
11283 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
11284 stack_pointer_rtx, GEN_INT (amount)));
11287 emit_insn (gen_movsi (hard_frame_pointer_rtx, GEN_INT (amount)));
11288 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
11289 hard_frame_pointer_rtx,
11290 stack_pointer_rtx));
11291 dwarf = gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
11292 plus_constant (stack_pointer_rtx, amount));
11293 RTX_FRAME_RELATED_P (dwarf) = 1;
11294 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
11298 RTX_FRAME_RELATED_P (insn) = 1;
11301 /* Generate the prologue instructions for entry into an ARM or Thumb-2
11304 arm_expand_prologue (void)
11309 unsigned long live_regs_mask;
11310 unsigned long func_type;
11312 int saved_pretend_args = 0;
11313 int saved_regs = 0;
11314 unsigned HOST_WIDE_INT args_to_push;
11315 arm_stack_offsets *offsets;
11317 func_type = arm_current_func_type ();
11319 /* Naked functions don't have prologues. */
11320 if (IS_NAKED (func_type))
11323 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
11324 args_to_push = current_function_pretend_args_size;
11326 /* Compute which register we will have to save onto the stack. */
11327 live_regs_mask = arm_compute_save_reg_mask ();
11329 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
11331 if (IS_STACKALIGN (func_type))
11336 /* Handle a word-aligned stack pointer. We generate the following:
11341 <save and restore r0 in normal prologue/epilogue>
11345 The unwinder doesn't need to know about the stack realignment.
11346 Just tell it we saved SP in r0. */
11347 gcc_assert (TARGET_THUMB2 && !arm_arch_notm && args_to_push == 0);
11349 r0 = gen_rtx_REG (SImode, 0);
11350 r1 = gen_rtx_REG (SImode, 1);
11351 dwarf = gen_rtx_UNSPEC (SImode, NULL_RTVEC, UNSPEC_STACK_ALIGN);
11352 dwarf = gen_rtx_SET (VOIDmode, r0, dwarf);
11353 insn = gen_movsi (r0, stack_pointer_rtx);
11354 RTX_FRAME_RELATED_P (insn) = 1;
11355 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
11356 dwarf, REG_NOTES (insn));
11358 emit_insn (gen_andsi3 (r1, r0, GEN_INT (~(HOST_WIDE_INT)7)));
11359 emit_insn (gen_movsi (stack_pointer_rtx, r1));
11362 if (frame_pointer_needed && TARGET_ARM)
11364 if (IS_INTERRUPT (func_type))
11366 /* Interrupt functions must not corrupt any registers.
11367 Creating a frame pointer however, corrupts the IP
11368 register, so we must push it first. */
11369 insn = emit_multi_reg_push (1 << IP_REGNUM);
11371 /* Do not set RTX_FRAME_RELATED_P on this insn.
11372 The dwarf stack unwinding code only wants to see one
11373 stack decrement per function, and this is not it. If
11374 this instruction is labeled as being part of the frame
11375 creation sequence then dwarf2out_frame_debug_expr will
11376 die when it encounters the assignment of IP to FP
11377 later on, since the use of SP here establishes SP as
11378 the CFA register and not IP.
11380 Anyway this instruction is not really part of the stack
11381 frame creation although it is part of the prologue. */
11383 else if (IS_NESTED (func_type))
11385 /* The Static chain register is the same as the IP register
11386 used as a scratch register during stack frame creation.
11387 To get around this need to find somewhere to store IP
11388 whilst the frame is being created. We try the following
11391 1. The last argument register.
11392 2. A slot on the stack above the frame. (This only
11393 works if the function is not a varargs function).
11394 3. Register r3, after pushing the argument registers
11397 Note - we only need to tell the dwarf2 backend about the SP
11398 adjustment in the second variant; the static chain register
11399 doesn't need to be unwound, as it doesn't contain a value
11400 inherited from the caller. */
11402 if (regs_ever_live[3] == 0)
11403 insn = emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
11404 else if (args_to_push == 0)
11408 insn = gen_rtx_PRE_DEC (SImode, stack_pointer_rtx);
11409 insn = emit_set_insn (gen_frame_mem (SImode, insn), ip_rtx);
11412 /* Just tell the dwarf backend that we adjusted SP. */
11413 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
11414 plus_constant (stack_pointer_rtx,
11416 RTX_FRAME_RELATED_P (insn) = 1;
11417 REG_NOTES (insn) = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR,
11418 dwarf, REG_NOTES (insn));
11422 /* Store the args on the stack. */
11423 if (cfun->machine->uses_anonymous_args)
11424 insn = emit_multi_reg_push
11425 ((0xf0 >> (args_to_push / 4)) & 0xf);
11428 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
11429 GEN_INT (- args_to_push)));
11431 RTX_FRAME_RELATED_P (insn) = 1;
11433 saved_pretend_args = 1;
11434 fp_offset = args_to_push;
11437 /* Now reuse r3 to preserve IP. */
11438 emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
11442 insn = emit_set_insn (ip_rtx,
11443 plus_constant (stack_pointer_rtx, fp_offset));
11444 RTX_FRAME_RELATED_P (insn) = 1;
11449 /* Push the argument registers, or reserve space for them. */
11450 if (cfun->machine->uses_anonymous_args)
11451 insn = emit_multi_reg_push
11452 ((0xf0 >> (args_to_push / 4)) & 0xf);
11455 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
11456 GEN_INT (- args_to_push)));
11457 RTX_FRAME_RELATED_P (insn) = 1;
11460 /* If this is an interrupt service routine, and the link register
11461 is going to be pushed, and we are not creating a stack frame,
11462 (which would involve an extra push of IP and a pop in the epilogue)
11463 subtracting four from LR now will mean that the function return
11464 can be done with a single instruction. */
11465 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
11466 && (live_regs_mask & (1 << LR_REGNUM)) != 0
11467 && ! frame_pointer_needed
11470 rtx lr = gen_rtx_REG (SImode, LR_REGNUM);
11472 emit_set_insn (lr, plus_constant (lr, -4));
11475 if (live_regs_mask)
11477 insn = emit_multi_reg_push (live_regs_mask);
11478 saved_regs += bit_count (live_regs_mask) * 4;
11479 RTX_FRAME_RELATED_P (insn) = 1;
11482 if (! IS_VOLATILE (func_type))
11483 saved_regs += arm_save_coproc_regs ();
11485 if (frame_pointer_needed && TARGET_ARM)
11487 /* Create the new frame pointer. */
11489 insn = GEN_INT (-(4 + args_to_push + fp_offset));
11490 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
11491 RTX_FRAME_RELATED_P (insn) = 1;
11493 if (IS_NESTED (func_type))
11495 /* Recover the static chain register. */
11496 if (regs_ever_live [3] == 0
11497 || saved_pretend_args)
11498 insn = gen_rtx_REG (SImode, 3);
11499 else /* if (current_function_pretend_args_size == 0) */
11501 insn = plus_constant (hard_frame_pointer_rtx, 4);
11502 insn = gen_frame_mem (SImode, insn);
11504 emit_set_insn (ip_rtx, insn);
11505 /* Add a USE to stop propagate_one_insn() from barfing. */
11506 emit_insn (gen_prologue_use (ip_rtx));
11511 offsets = arm_get_frame_offsets ();
11512 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
11514 /* This add can produce multiple insns for a large constant, so we
11515 need to get tricky. */
11516 rtx last = get_last_insn ();
11518 amount = GEN_INT (offsets->saved_args + saved_regs
11519 - offsets->outgoing_args);
11521 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
11525 last = last ? NEXT_INSN (last) : get_insns ();
11526 RTX_FRAME_RELATED_P (last) = 1;
11528 while (last != insn);
11530 /* If the frame pointer is needed, emit a special barrier that
11531 will prevent the scheduler from moving stores to the frame
11532 before the stack adjustment. */
11533 if (frame_pointer_needed)
11534 insn = emit_insn (gen_stack_tie (stack_pointer_rtx,
11535 hard_frame_pointer_rtx));
11539 if (frame_pointer_needed && TARGET_THUMB2)
11540 thumb_set_frame_pointer (offsets);
11542 if (flag_pic && arm_pic_register != INVALID_REGNUM)
11544 unsigned long mask;
11546 mask = live_regs_mask;
11547 mask &= THUMB2_WORK_REGS;
11548 if (!IS_NESTED (func_type))
11549 mask |= (1 << IP_REGNUM);
11550 arm_load_pic_register (mask);
11553 /* If we are profiling, make sure no instructions are scheduled before
11554 the call to mcount. Similarly if the user has requested no
11555 scheduling in the prolog. Similarly if we want non-call exceptions
11556 using the EABI unwinder, to prevent faulting instructions from being
11557 swapped with a stack adjustment. */
11558 if (current_function_profile || !TARGET_SCHED_PROLOG
11559 || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
11560 emit_insn (gen_blockage ());
11562 /* If the link register is being kept alive, with the return address in it,
11563 then make sure that it does not get reused by the ce2 pass. */
11564 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
11566 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
11567 cfun->machine->lr_save_eliminated = 1;
11571 /* Print condition code to STREAM. Helper function for arm_print_operand. */
11573 arm_print_condition (FILE *stream)
11575 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
11577 /* Branch conversion is not implemented for Thumb-2. */
11580 output_operand_lossage ("predicated Thumb instruction");
11583 if (current_insn_predicate != NULL)
11585 output_operand_lossage
11586 ("predicated instruction in conditional sequence");
11590 fputs (arm_condition_codes[arm_current_cc], stream);
11592 else if (current_insn_predicate)
11594 enum arm_cond_code code;
11598 output_operand_lossage ("predicated Thumb instruction");
11602 code = get_arm_condition_code (current_insn_predicate);
11603 fputs (arm_condition_codes[code], stream);
11608 /* If CODE is 'd', then the X is a condition operand and the instruction
11609 should only be executed if the condition is true.
11610 if CODE is 'D', then the X is a condition operand and the instruction
11611 should only be executed if the condition is false: however, if the mode
11612 of the comparison is CCFPEmode, then always execute the instruction -- we
11613 do this because in these circumstances !GE does not necessarily imply LT;
11614 in these cases the instruction pattern will take care to make sure that
11615 an instruction containing %d will follow, thereby undoing the effects of
11616 doing this instruction unconditionally.
11617 If CODE is 'N' then X is a floating point operand that must be negated
11619 If CODE is 'B' then output a bitwise inverted value of X (a const int).
11620 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
11622 arm_print_operand (FILE *stream, rtx x, int code)
11627 fputs (ASM_COMMENT_START, stream);
11631 fputs (user_label_prefix, stream);
11635 fputs (REGISTER_PREFIX, stream);
11639 arm_print_condition (stream);
11643 /* Nothing in unified syntax, otherwise the current condition code. */
11644 if (!TARGET_UNIFIED_ASM)
11645 arm_print_condition (stream);
11649 /* The current condition code in unified syntax, otherwise nothing. */
11650 if (TARGET_UNIFIED_ASM)
11651 arm_print_condition (stream);
11655 /* The current condition code for a condition code setting instruction.
11656 Preceded by 's' in unified syntax, otherwise followed by 's'. */
11657 if (TARGET_UNIFIED_ASM)
11659 fputc('s', stream);
11660 arm_print_condition (stream);
11664 arm_print_condition (stream);
11665 fputc('s', stream);
11670 /* If the instruction is conditionally executed then print
11671 the current condition code, otherwise print 's'. */
11672 gcc_assert (TARGET_THUMB2 && TARGET_UNIFIED_ASM);
11673 if (current_insn_predicate)
11674 arm_print_condition (stream);
11676 fputc('s', stream);
11682 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
11683 r = REAL_VALUE_NEGATE (r);
11684 fprintf (stream, "%s", fp_const_from_val (&r));
11689 if (GET_CODE (x) == CONST_INT)
11692 val = ARM_SIGN_EXTEND (~INTVAL (x));
11693 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
11697 putc ('~', stream);
11698 output_addr_const (stream, x);
11703 /* The low 16 bits of an immediate constant. */
11704 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL(x) & 0xffff);
11708 fprintf (stream, "%s", arithmetic_instr (x, 1));
11711 /* Truncate Cirrus shift counts. */
11713 if (GET_CODE (x) == CONST_INT)
11715 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0x3f);
11718 arm_print_operand (stream, x, 0);
11722 fprintf (stream, "%s", arithmetic_instr (x, 0));
11730 if (!shift_operator (x, SImode))
11732 output_operand_lossage ("invalid shift operand");
11736 shift = shift_op (x, &val);
11740 fprintf (stream, ", %s ", shift);
11742 arm_print_operand (stream, XEXP (x, 1), 0);
11744 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
11749 /* An explanation of the 'Q', 'R' and 'H' register operands:
11751 In a pair of registers containing a DI or DF value the 'Q'
11752 operand returns the register number of the register containing
11753 the least significant part of the value. The 'R' operand returns
11754 the register number of the register containing the most
11755 significant part of the value.
11757 The 'H' operand returns the higher of the two register numbers.
11758 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
11759 same as the 'Q' operand, since the most significant part of the
11760 value is held in the lower number register. The reverse is true
11761 on systems where WORDS_BIG_ENDIAN is false.
11763 The purpose of these operands is to distinguish between cases
11764 where the endian-ness of the values is important (for example
11765 when they are added together), and cases where the endian-ness
11766 is irrelevant, but the order of register operations is important.
11767 For example when loading a value from memory into a register
11768 pair, the endian-ness does not matter. Provided that the value
11769 from the lower memory address is put into the lower numbered
11770 register, and the value from the higher address is put into the
11771 higher numbered register, the load will work regardless of whether
11772 the value being loaded is big-wordian or little-wordian. The
11773 order of the two register loads can matter however, if the address
11774 of the memory location is actually held in one of the registers
11775 being overwritten by the load. */
11777 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11779 output_operand_lossage ("invalid operand for code '%c'", code);
11783 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
11787 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11789 output_operand_lossage ("invalid operand for code '%c'", code);
11793 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
11797 if (GET_CODE (x) != REG || REGNO (x) > LAST_ARM_REGNUM)
11799 output_operand_lossage ("invalid operand for code '%c'", code);
11803 asm_fprintf (stream, "%r", REGNO (x) + 1);
11807 asm_fprintf (stream, "%r",
11808 GET_CODE (XEXP (x, 0)) == REG
11809 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
11813 asm_fprintf (stream, "{%r-%r}",
11815 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
11819 /* CONST_TRUE_RTX means always -- that's the default. */
11820 if (x == const_true_rtx)
11823 if (!COMPARISON_P (x))
11825 output_operand_lossage ("invalid operand for code '%c'", code);
11829 fputs (arm_condition_codes[get_arm_condition_code (x)],
11834 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
11835 want to do that. */
11836 if (x == const_true_rtx)
11838 output_operand_lossage ("instruction never exectued");
11841 if (!COMPARISON_P (x))
11843 output_operand_lossage ("invalid operand for code '%c'", code);
11847 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
11848 (get_arm_condition_code (x))],
11852 /* Cirrus registers can be accessed in a variety of ways:
11853 single floating point (f)
11854 double floating point (d)
11856 64bit integer (dx). */
11857 case 'W': /* Cirrus register in F mode. */
11858 case 'X': /* Cirrus register in D mode. */
11859 case 'Y': /* Cirrus register in FX mode. */
11860 case 'Z': /* Cirrus register in DX mode. */
11861 gcc_assert (GET_CODE (x) == REG
11862 && REGNO_REG_CLASS (REGNO (x)) == CIRRUS_REGS);
11864 fprintf (stream, "mv%s%s",
11866 : code == 'X' ? "d"
11867 : code == 'Y' ? "fx" : "dx", reg_names[REGNO (x)] + 2);
11871 /* Print cirrus register in the mode specified by the register's mode. */
11874 int mode = GET_MODE (x);
11876 if (GET_CODE (x) != REG || REGNO_REG_CLASS (REGNO (x)) != CIRRUS_REGS)
11878 output_operand_lossage ("invalid operand for code '%c'", code);
11882 fprintf (stream, "mv%s%s",
11883 mode == DFmode ? "d"
11884 : mode == SImode ? "fx"
11885 : mode == DImode ? "dx"
11886 : "f", reg_names[REGNO (x)] + 2);
11892 if (GET_CODE (x) != REG
11893 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
11894 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
11895 /* Bad value for wCG register number. */
11897 output_operand_lossage ("invalid operand for code '%c'", code);
11902 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
11905 /* Print an iWMMXt control register name. */
11907 if (GET_CODE (x) != CONST_INT
11909 || INTVAL (x) >= 16)
11910 /* Bad value for wC register number. */
11912 output_operand_lossage ("invalid operand for code '%c'", code);
11918 static const char * wc_reg_names [16] =
11920 "wCID", "wCon", "wCSSF", "wCASF",
11921 "wC4", "wC5", "wC6", "wC7",
11922 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
11923 "wC12", "wC13", "wC14", "wC15"
11926 fprintf (stream, wc_reg_names [INTVAL (x)]);
11930 /* Print a VFP double precision register name. */
11933 int mode = GET_MODE (x);
11936 if (mode != DImode && mode != DFmode)
11938 output_operand_lossage ("invalid operand for code '%c'", code);
11942 if (GET_CODE (x) != REG
11943 || !IS_VFP_REGNUM (REGNO (x)))
11945 output_operand_lossage ("invalid operand for code '%c'", code);
11949 num = REGNO(x) - FIRST_VFP_REGNUM;
11952 output_operand_lossage ("invalid operand for code '%c'", code);
11956 fprintf (stream, "d%d", num >> 1);
11963 output_operand_lossage ("missing operand");
11967 switch (GET_CODE (x))
11970 asm_fprintf (stream, "%r", REGNO (x));
11974 output_memory_reference_mode = GET_MODE (x);
11975 output_address (XEXP (x, 0));
11979 fprintf (stream, "#%s", fp_immediate_constant (x));
11983 gcc_assert (GET_CODE (x) != NEG);
11984 fputc ('#', stream);
11985 output_addr_const (stream, x);
11991 #ifndef AOF_ASSEMBLER
11992 /* Target hook for assembling integer objects. The ARM version needs to
11993 handle word-sized values specially. */
11995 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
11997 if (size == UNITS_PER_WORD && aligned_p)
11999 fputs ("\t.word\t", asm_out_file);
12000 output_addr_const (asm_out_file, x);
12002 /* Mark symbols as position independent. We only do this in the
12003 .text segment, not in the .data segment. */
12004 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
12005 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
12007 if (GET_CODE (x) == SYMBOL_REF
12008 && (CONSTANT_POOL_ADDRESS_P (x)
12009 || SYMBOL_REF_LOCAL_P (x)))
12010 fputs ("(GOTOFF)", asm_out_file);
12011 else if (GET_CODE (x) == LABEL_REF)
12012 fputs ("(GOTOFF)", asm_out_file);
12014 fputs ("(GOT)", asm_out_file);
12016 fputc ('\n', asm_out_file);
12020 if (arm_vector_mode_supported_p (GET_MODE (x)))
12024 gcc_assert (GET_CODE (x) == CONST_VECTOR);
12026 units = CONST_VECTOR_NUNITS (x);
12028 switch (GET_MODE (x))
12030 case V2SImode: size = 4; break;
12031 case V4HImode: size = 2; break;
12032 case V8QImode: size = 1; break;
12034 gcc_unreachable ();
12037 for (i = 0; i < units; i++)
12041 elt = CONST_VECTOR_ELT (x, i);
12043 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
12049 return default_assemble_integer (x, size, aligned_p);
12053 arm_elf_asm_cdtor (rtx symbol, int priority, bool is_ctor)
12057 if (!TARGET_AAPCS_BASED)
12060 default_named_section_asm_out_constructor
12061 : default_named_section_asm_out_destructor) (symbol, priority);
12065 /* Put these in the .init_array section, using a special relocation. */
12066 if (priority != DEFAULT_INIT_PRIORITY)
12069 sprintf (buf, "%s.%.5u",
12070 is_ctor ? ".init_array" : ".fini_array",
12072 s = get_section (buf, SECTION_WRITE, NULL_TREE);
12079 switch_to_section (s);
12080 assemble_align (POINTER_SIZE);
12081 fputs ("\t.word\t", asm_out_file);
12082 output_addr_const (asm_out_file, symbol);
12083 fputs ("(target1)\n", asm_out_file);
12086 /* Add a function to the list of static constructors. */
12089 arm_elf_asm_constructor (rtx symbol, int priority)
12091 arm_elf_asm_cdtor (symbol, priority, /*is_ctor=*/true);
12094 /* Add a function to the list of static destructors. */
12097 arm_elf_asm_destructor (rtx symbol, int priority)
12099 arm_elf_asm_cdtor (symbol, priority, /*is_ctor=*/false);
12103 /* A finite state machine takes care of noticing whether or not instructions
12104 can be conditionally executed, and thus decrease execution time and code
12105 size by deleting branch instructions. The fsm is controlled by
12106 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
12108 /* The state of the fsm controlling condition codes are:
12109 0: normal, do nothing special
12110 1: make ASM_OUTPUT_OPCODE not output this instruction
12111 2: make ASM_OUTPUT_OPCODE not output this instruction
12112 3: make instructions conditional
12113 4: make instructions conditional
12115 State transitions (state->state by whom under condition):
12116 0 -> 1 final_prescan_insn if the `target' is a label
12117 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
12118 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
12119 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
12120 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
12121 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
12122 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
12123 (the target insn is arm_target_insn).
12125 If the jump clobbers the conditions then we use states 2 and 4.
12127 A similar thing can be done with conditional return insns.
12129 XXX In case the `target' is an unconditional branch, this conditionalising
12130 of the instructions always reduces code size, but not always execution
12131 time. But then, I want to reduce the code size to somewhere near what
12132 /bin/cc produces. */
12134 /* In addition to this, state is maintained for Thumb-2 COND_EXEC
12135 instructions. When a COND_EXEC instruction is seen the subsequent
12136 instructions are scanned so that multiple conditional instructions can be
12137 combined into a single IT block. arm_condexec_count and arm_condexec_mask
12138 specify the length and true/false mask for the IT block. These will be
12139 decremented/zeroed by arm_asm_output_opcode as the insns are output. */
12141 /* Returns the index of the ARM condition code string in
12142 `arm_condition_codes'. COMPARISON should be an rtx like
12143 `(eq (...) (...))'. */
12144 static enum arm_cond_code
12145 get_arm_condition_code (rtx comparison)
12147 enum machine_mode mode = GET_MODE (XEXP (comparison, 0));
12149 enum rtx_code comp_code = GET_CODE (comparison);
12151 if (GET_MODE_CLASS (mode) != MODE_CC)
12152 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
12153 XEXP (comparison, 1));
12157 case CC_DNEmode: code = ARM_NE; goto dominance;
12158 case CC_DEQmode: code = ARM_EQ; goto dominance;
12159 case CC_DGEmode: code = ARM_GE; goto dominance;
12160 case CC_DGTmode: code = ARM_GT; goto dominance;
12161 case CC_DLEmode: code = ARM_LE; goto dominance;
12162 case CC_DLTmode: code = ARM_LT; goto dominance;
12163 case CC_DGEUmode: code = ARM_CS; goto dominance;
12164 case CC_DGTUmode: code = ARM_HI; goto dominance;
12165 case CC_DLEUmode: code = ARM_LS; goto dominance;
12166 case CC_DLTUmode: code = ARM_CC;
12169 gcc_assert (comp_code == EQ || comp_code == NE);
12171 if (comp_code == EQ)
12172 return ARM_INVERSE_CONDITION_CODE (code);
12178 case NE: return ARM_NE;
12179 case EQ: return ARM_EQ;
12180 case GE: return ARM_PL;
12181 case LT: return ARM_MI;
12182 default: gcc_unreachable ();
12188 case NE: return ARM_NE;
12189 case EQ: return ARM_EQ;
12190 default: gcc_unreachable ();
12196 case NE: return ARM_MI;
12197 case EQ: return ARM_PL;
12198 default: gcc_unreachable ();
12203 /* These encodings assume that AC=1 in the FPA system control
12204 byte. This allows us to handle all cases except UNEQ and
12208 case GE: return ARM_GE;
12209 case GT: return ARM_GT;
12210 case LE: return ARM_LS;
12211 case LT: return ARM_MI;
12212 case NE: return ARM_NE;
12213 case EQ: return ARM_EQ;
12214 case ORDERED: return ARM_VC;
12215 case UNORDERED: return ARM_VS;
12216 case UNLT: return ARM_LT;
12217 case UNLE: return ARM_LE;
12218 case UNGT: return ARM_HI;
12219 case UNGE: return ARM_PL;
12220 /* UNEQ and LTGT do not have a representation. */
12221 case UNEQ: /* Fall through. */
12222 case LTGT: /* Fall through. */
12223 default: gcc_unreachable ();
12229 case NE: return ARM_NE;
12230 case EQ: return ARM_EQ;
12231 case GE: return ARM_LE;
12232 case GT: return ARM_LT;
12233 case LE: return ARM_GE;
12234 case LT: return ARM_GT;
12235 case GEU: return ARM_LS;
12236 case GTU: return ARM_CC;
12237 case LEU: return ARM_CS;
12238 case LTU: return ARM_HI;
12239 default: gcc_unreachable ();
12245 case LTU: return ARM_CS;
12246 case GEU: return ARM_CC;
12247 default: gcc_unreachable ();
12253 case NE: return ARM_NE;
12254 case EQ: return ARM_EQ;
12255 case GE: return ARM_GE;
12256 case GT: return ARM_GT;
12257 case LE: return ARM_LE;
12258 case LT: return ARM_LT;
12259 case GEU: return ARM_CS;
12260 case GTU: return ARM_HI;
12261 case LEU: return ARM_LS;
12262 case LTU: return ARM_CC;
12263 default: gcc_unreachable ();
12266 default: gcc_unreachable ();
12270 /* Tell arm_asm_output_opcode to output IT blocks for conditionally executed
12273 thumb2_final_prescan_insn (rtx insn)
12275 rtx first_insn = insn;
12276 rtx body = PATTERN (insn);
12278 enum arm_cond_code code;
12282 /* Remove the previous insn from the count of insns to be output. */
12283 if (arm_condexec_count)
12284 arm_condexec_count--;
12286 /* Nothing to do if we are already inside a conditional block. */
12287 if (arm_condexec_count)
12290 if (GET_CODE (body) != COND_EXEC)
12293 /* Conditional jumps are implemented directly. */
12294 if (GET_CODE (insn) == JUMP_INSN)
12297 predicate = COND_EXEC_TEST (body);
12298 arm_current_cc = get_arm_condition_code (predicate);
12300 n = get_attr_ce_count (insn);
12301 arm_condexec_count = 1;
12302 arm_condexec_mask = (1 << n) - 1;
12303 arm_condexec_masklen = n;
12304 /* See if subsequent instructions can be combined into the same block. */
12307 insn = next_nonnote_insn (insn);
12309 /* Jumping into the middle of an IT block is illegal, so a label or
12310 barrier terminates the block. */
12311 if (GET_CODE (insn) != INSN && GET_CODE(insn) != JUMP_INSN)
12314 body = PATTERN (insn);
12315 /* USE and CLOBBER aren't really insns, so just skip them. */
12316 if (GET_CODE (body) == USE
12317 || GET_CODE (body) == CLOBBER)
12320 /* ??? Recognize conditional jumps, and combine them with IT blocks. */
12321 if (GET_CODE (body) != COND_EXEC)
12323 /* Allow up to 4 conditionally executed instructions in a block. */
12324 n = get_attr_ce_count (insn);
12325 if (arm_condexec_masklen + n > 4)
12328 predicate = COND_EXEC_TEST (body);
12329 code = get_arm_condition_code (predicate);
12330 mask = (1 << n) - 1;
12331 if (arm_current_cc == code)
12332 arm_condexec_mask |= (mask << arm_condexec_masklen);
12333 else if (arm_current_cc != ARM_INVERSE_CONDITION_CODE(code))
12336 arm_condexec_count++;
12337 arm_condexec_masklen += n;
12339 /* A jump must be the last instruction in a conditional block. */
12340 if (GET_CODE(insn) == JUMP_INSN)
12343 /* Restore recog_data (getting the attributes of other insns can
12344 destroy this array, but final.c assumes that it remains intact
12345 across this call). */
12346 extract_constrain_insn_cached (first_insn);
12350 arm_final_prescan_insn (rtx insn)
12352 /* BODY will hold the body of INSN. */
12353 rtx body = PATTERN (insn);
12355 /* This will be 1 if trying to repeat the trick, and things need to be
12356 reversed if it appears to fail. */
12359 /* JUMP_CLOBBERS will be one implies that the conditions if a branch is
12360 taken are clobbered, even if the rtl suggests otherwise. It also
12361 means that we have to grub around within the jump expression to find
12362 out what the conditions are when the jump isn't taken. */
12363 int jump_clobbers = 0;
12365 /* If we start with a return insn, we only succeed if we find another one. */
12366 int seeking_return = 0;
12368 /* START_INSN will hold the insn from where we start looking. This is the
12369 first insn after the following code_label if REVERSE is true. */
12370 rtx start_insn = insn;
12372 /* If in state 4, check if the target branch is reached, in order to
12373 change back to state 0. */
12374 if (arm_ccfsm_state == 4)
12376 if (insn == arm_target_insn)
12378 arm_target_insn = NULL;
12379 arm_ccfsm_state = 0;
12384 /* If in state 3, it is possible to repeat the trick, if this insn is an
12385 unconditional branch to a label, and immediately following this branch
12386 is the previous target label which is only used once, and the label this
12387 branch jumps to is not too far off. */
12388 if (arm_ccfsm_state == 3)
12390 if (simplejump_p (insn))
12392 start_insn = next_nonnote_insn (start_insn);
12393 if (GET_CODE (start_insn) == BARRIER)
12395 /* XXX Isn't this always a barrier? */
12396 start_insn = next_nonnote_insn (start_insn);
12398 if (GET_CODE (start_insn) == CODE_LABEL
12399 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
12400 && LABEL_NUSES (start_insn) == 1)
12405 else if (GET_CODE (body) == RETURN)
12407 start_insn = next_nonnote_insn (start_insn);
12408 if (GET_CODE (start_insn) == BARRIER)
12409 start_insn = next_nonnote_insn (start_insn);
12410 if (GET_CODE (start_insn) == CODE_LABEL
12411 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
12412 && LABEL_NUSES (start_insn) == 1)
12415 seeking_return = 1;
12424 gcc_assert (!arm_ccfsm_state || reverse);
12425 if (GET_CODE (insn) != JUMP_INSN)
12428 /* This jump might be paralleled with a clobber of the condition codes
12429 the jump should always come first */
12430 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
12431 body = XVECEXP (body, 0, 0);
12434 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
12435 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
12438 int fail = FALSE, succeed = FALSE;
12439 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
12440 int then_not_else = TRUE;
12441 rtx this_insn = start_insn, label = 0;
12443 /* If the jump cannot be done with one instruction, we cannot
12444 conditionally execute the instruction in the inverse case. */
12445 if (get_attr_conds (insn) == CONDS_JUMP_CLOB)
12451 /* Register the insn jumped to. */
12454 if (!seeking_return)
12455 label = XEXP (SET_SRC (body), 0);
12457 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
12458 label = XEXP (XEXP (SET_SRC (body), 1), 0);
12459 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
12461 label = XEXP (XEXP (SET_SRC (body), 2), 0);
12462 then_not_else = FALSE;
12464 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == RETURN)
12465 seeking_return = 1;
12466 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == RETURN)
12468 seeking_return = 1;
12469 then_not_else = FALSE;
12472 gcc_unreachable ();
12474 /* See how many insns this branch skips, and what kind of insns. If all
12475 insns are okay, and the label or unconditional branch to the same
12476 label is not too far away, succeed. */
12477 for (insns_skipped = 0;
12478 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
12482 this_insn = next_nonnote_insn (this_insn);
12486 switch (GET_CODE (this_insn))
12489 /* Succeed if it is the target label, otherwise fail since
12490 control falls in from somewhere else. */
12491 if (this_insn == label)
12495 arm_ccfsm_state = 2;
12496 this_insn = next_nonnote_insn (this_insn);
12499 arm_ccfsm_state = 1;
12507 /* Succeed if the following insn is the target label.
12509 If return insns are used then the last insn in a function
12510 will be a barrier. */
12511 this_insn = next_nonnote_insn (this_insn);
12512 if (this_insn && this_insn == label)
12516 arm_ccfsm_state = 2;
12517 this_insn = next_nonnote_insn (this_insn);
12520 arm_ccfsm_state = 1;
12528 /* The AAPCS says that conditional calls should not be
12529 used since they make interworking inefficient (the
12530 linker can't transform BL<cond> into BLX). That's
12531 only a problem if the machine has BLX. */
12538 /* Succeed if the following insn is the target label, or
12539 if the following two insns are a barrier and the
12541 this_insn = next_nonnote_insn (this_insn);
12542 if (this_insn && GET_CODE (this_insn) == BARRIER)
12543 this_insn = next_nonnote_insn (this_insn);
12545 if (this_insn && this_insn == label
12546 && insns_skipped < max_insns_skipped)
12550 arm_ccfsm_state = 2;
12551 this_insn = next_nonnote_insn (this_insn);
12554 arm_ccfsm_state = 1;
12562 /* If this is an unconditional branch to the same label, succeed.
12563 If it is to another label, do nothing. If it is conditional,
12565 /* XXX Probably, the tests for SET and the PC are
12568 scanbody = PATTERN (this_insn);
12569 if (GET_CODE (scanbody) == SET
12570 && GET_CODE (SET_DEST (scanbody)) == PC)
12572 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
12573 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
12575 arm_ccfsm_state = 2;
12578 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
12581 /* Fail if a conditional return is undesirable (e.g. on a
12582 StrongARM), but still allow this if optimizing for size. */
12583 else if (GET_CODE (scanbody) == RETURN
12584 && !use_return_insn (TRUE, NULL)
12587 else if (GET_CODE (scanbody) == RETURN
12590 arm_ccfsm_state = 2;
12593 else if (GET_CODE (scanbody) == PARALLEL)
12595 switch (get_attr_conds (this_insn))
12605 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
12610 /* Instructions using or affecting the condition codes make it
12612 scanbody = PATTERN (this_insn);
12613 if (!(GET_CODE (scanbody) == SET
12614 || GET_CODE (scanbody) == PARALLEL)
12615 || get_attr_conds (this_insn) != CONDS_NOCOND)
12618 /* A conditional cirrus instruction must be followed by
12619 a non Cirrus instruction. However, since we
12620 conditionalize instructions in this function and by
12621 the time we get here we can't add instructions
12622 (nops), because shorten_branches() has already been
12623 called, we will disable conditionalizing Cirrus
12624 instructions to be safe. */
12625 if (GET_CODE (scanbody) != USE
12626 && GET_CODE (scanbody) != CLOBBER
12627 && get_attr_cirrus (this_insn) != CIRRUS_NOT)
12637 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
12638 arm_target_label = CODE_LABEL_NUMBER (label);
12641 gcc_assert (seeking_return || arm_ccfsm_state == 2);
12643 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
12645 this_insn = next_nonnote_insn (this_insn);
12646 gcc_assert (!this_insn
12647 || (GET_CODE (this_insn) != BARRIER
12648 && GET_CODE (this_insn) != CODE_LABEL));
12652 /* Oh, dear! we ran off the end.. give up. */
12653 extract_constrain_insn_cached (insn);
12654 arm_ccfsm_state = 0;
12655 arm_target_insn = NULL;
12658 arm_target_insn = this_insn;
12662 gcc_assert (!reverse);
12664 get_arm_condition_code (XEXP (XEXP (XEXP (SET_SRC (body),
12666 if (GET_CODE (XEXP (XEXP (SET_SRC (body), 0), 0)) == AND)
12667 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
12668 if (GET_CODE (XEXP (SET_SRC (body), 0)) == NE)
12669 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
12673 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
12676 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body),
12680 if (reverse || then_not_else)
12681 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
12684 /* Restore recog_data (getting the attributes of other insns can
12685 destroy this array, but final.c assumes that it remains intact
12686 across this call. */
12687 extract_constrain_insn_cached (insn);
12691 /* Output IT instructions. */
12693 thumb2_asm_output_opcode (FILE * stream)
12698 if (arm_condexec_mask)
12700 for (n = 0; n < arm_condexec_masklen; n++)
12701 buff[n] = (arm_condexec_mask & (1 << n)) ? 't' : 'e';
12703 asm_fprintf(stream, "i%s\t%s\n\t", buff,
12704 arm_condition_codes[arm_current_cc]);
12705 arm_condexec_mask = 0;
12709 /* Returns true if REGNO is a valid register
12710 for holding a quantity of type MODE. */
12712 arm_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
12714 if (GET_MODE_CLASS (mode) == MODE_CC)
12715 return (regno == CC_REGNUM
12716 || (TARGET_HARD_FLOAT && TARGET_VFP
12717 && regno == VFPCC_REGNUM));
12720 /* For the Thumb we only allow values bigger than SImode in
12721 registers 0 - 6, so that there is always a second low
12722 register available to hold the upper part of the value.
12723 We probably we ought to ensure that the register is the
12724 start of an even numbered register pair. */
12725 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
12727 if (TARGET_HARD_FLOAT && TARGET_MAVERICK
12728 && IS_CIRRUS_REGNUM (regno))
12729 /* We have outlawed SI values in Cirrus registers because they
12730 reside in the lower 32 bits, but SF values reside in the
12731 upper 32 bits. This causes gcc all sorts of grief. We can't
12732 even split the registers into pairs because Cirrus SI values
12733 get sign extended to 64bits-- aldyh. */
12734 return (GET_MODE_CLASS (mode) == MODE_FLOAT) || (mode == DImode);
12736 if (TARGET_HARD_FLOAT && TARGET_VFP
12737 && IS_VFP_REGNUM (regno))
12739 if (mode == SFmode || mode == SImode)
12742 /* DFmode values are only valid in even register pairs. */
12743 if (mode == DFmode)
12744 return ((regno - FIRST_VFP_REGNUM) & 1) == 0;
12748 if (TARGET_REALLY_IWMMXT)
12750 if (IS_IWMMXT_GR_REGNUM (regno))
12751 return mode == SImode;
12753 if (IS_IWMMXT_REGNUM (regno))
12754 return VALID_IWMMXT_REG_MODE (mode);
12757 /* We allow any value to be stored in the general registers.
12758 Restrict doubleword quantities to even register pairs so that we can
12760 if (regno <= LAST_ARM_REGNUM)
12761 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
12763 if (regno == FRAME_POINTER_REGNUM
12764 || regno == ARG_POINTER_REGNUM)
12765 /* We only allow integers in the fake hard registers. */
12766 return GET_MODE_CLASS (mode) == MODE_INT;
12768 /* The only registers left are the FPA registers
12769 which we only allow to hold FP values. */
12770 return (TARGET_HARD_FLOAT && TARGET_FPA
12771 && GET_MODE_CLASS (mode) == MODE_FLOAT
12772 && regno >= FIRST_FPA_REGNUM
12773 && regno <= LAST_FPA_REGNUM);
12776 /* For efficiency and historical reasons LO_REGS, HI_REGS and CC_REGS are
12777 not used in arm mode. */
12779 arm_regno_class (int regno)
12783 if (regno == STACK_POINTER_REGNUM)
12785 if (regno == CC_REGNUM)
12792 if (TARGET_THUMB2 && regno < 8)
12795 if ( regno <= LAST_ARM_REGNUM
12796 || regno == FRAME_POINTER_REGNUM
12797 || regno == ARG_POINTER_REGNUM)
12798 return TARGET_THUMB2 ? HI_REGS : GENERAL_REGS;
12800 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
12801 return TARGET_THUMB2 ? CC_REG : NO_REGS;
12803 if (IS_CIRRUS_REGNUM (regno))
12804 return CIRRUS_REGS;
12806 if (IS_VFP_REGNUM (regno))
12809 if (IS_IWMMXT_REGNUM (regno))
12810 return IWMMXT_REGS;
12812 if (IS_IWMMXT_GR_REGNUM (regno))
12813 return IWMMXT_GR_REGS;
12818 /* Handle a special case when computing the offset
12819 of an argument from the frame pointer. */
12821 arm_debugger_arg_offset (int value, rtx addr)
12825 /* We are only interested if dbxout_parms() failed to compute the offset. */
12829 /* We can only cope with the case where the address is held in a register. */
12830 if (GET_CODE (addr) != REG)
12833 /* If we are using the frame pointer to point at the argument, then
12834 an offset of 0 is correct. */
12835 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
12838 /* If we are using the stack pointer to point at the
12839 argument, then an offset of 0 is correct. */
12840 /* ??? Check this is consistent with thumb2 frame layout. */
12841 if ((TARGET_THUMB || !frame_pointer_needed)
12842 && REGNO (addr) == SP_REGNUM)
12845 /* Oh dear. The argument is pointed to by a register rather
12846 than being held in a register, or being stored at a known
12847 offset from the frame pointer. Since GDB only understands
12848 those two kinds of argument we must translate the address
12849 held in the register into an offset from the frame pointer.
12850 We do this by searching through the insns for the function
12851 looking to see where this register gets its value. If the
12852 register is initialized from the frame pointer plus an offset
12853 then we are in luck and we can continue, otherwise we give up.
12855 This code is exercised by producing debugging information
12856 for a function with arguments like this:
12858 double func (double a, double b, int c, double d) {return d;}
12860 Without this code the stab for parameter 'd' will be set to
12861 an offset of 0 from the frame pointer, rather than 8. */
12863 /* The if() statement says:
12865 If the insn is a normal instruction
12866 and if the insn is setting the value in a register
12867 and if the register being set is the register holding the address of the argument
12868 and if the address is computing by an addition
12869 that involves adding to a register
12870 which is the frame pointer
12875 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
12877 if ( GET_CODE (insn) == INSN
12878 && GET_CODE (PATTERN (insn)) == SET
12879 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
12880 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
12881 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 0)) == REG
12882 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
12883 && GET_CODE (XEXP (XEXP (PATTERN (insn), 1), 1)) == CONST_INT
12886 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
12895 warning (0, "unable to compute real location of stacked parameter");
12896 value = 8; /* XXX magic hack */
12902 #define def_mbuiltin(MASK, NAME, TYPE, CODE) \
12905 if ((MASK) & insn_flags) \
12906 add_builtin_function ((NAME), (TYPE), (CODE), \
12907 BUILT_IN_MD, NULL, NULL_TREE); \
12911 struct builtin_description
12913 const unsigned int mask;
12914 const enum insn_code icode;
12915 const char * const name;
12916 const enum arm_builtins code;
12917 const enum rtx_code comparison;
12918 const unsigned int flag;
12921 static const struct builtin_description bdesc_2arg[] =
12923 #define IWMMXT_BUILTIN(code, string, builtin) \
12924 { FL_IWMMXT, CODE_FOR_##code, "__builtin_arm_" string, \
12925 ARM_BUILTIN_##builtin, 0, 0 },
12927 IWMMXT_BUILTIN (addv8qi3, "waddb", WADDB)
12928 IWMMXT_BUILTIN (addv4hi3, "waddh", WADDH)
12929 IWMMXT_BUILTIN (addv2si3, "waddw", WADDW)
12930 IWMMXT_BUILTIN (subv8qi3, "wsubb", WSUBB)
12931 IWMMXT_BUILTIN (subv4hi3, "wsubh", WSUBH)
12932 IWMMXT_BUILTIN (subv2si3, "wsubw", WSUBW)
12933 IWMMXT_BUILTIN (ssaddv8qi3, "waddbss", WADDSSB)
12934 IWMMXT_BUILTIN (ssaddv4hi3, "waddhss", WADDSSH)
12935 IWMMXT_BUILTIN (ssaddv2si3, "waddwss", WADDSSW)
12936 IWMMXT_BUILTIN (sssubv8qi3, "wsubbss", WSUBSSB)
12937 IWMMXT_BUILTIN (sssubv4hi3, "wsubhss", WSUBSSH)
12938 IWMMXT_BUILTIN (sssubv2si3, "wsubwss", WSUBSSW)
12939 IWMMXT_BUILTIN (usaddv8qi3, "waddbus", WADDUSB)
12940 IWMMXT_BUILTIN (usaddv4hi3, "waddhus", WADDUSH)
12941 IWMMXT_BUILTIN (usaddv2si3, "waddwus", WADDUSW)
12942 IWMMXT_BUILTIN (ussubv8qi3, "wsubbus", WSUBUSB)
12943 IWMMXT_BUILTIN (ussubv4hi3, "wsubhus", WSUBUSH)
12944 IWMMXT_BUILTIN (ussubv2si3, "wsubwus", WSUBUSW)
12945 IWMMXT_BUILTIN (mulv4hi3, "wmulul", WMULUL)
12946 IWMMXT_BUILTIN (smulv4hi3_highpart, "wmulsm", WMULSM)
12947 IWMMXT_BUILTIN (umulv4hi3_highpart, "wmulum", WMULUM)
12948 IWMMXT_BUILTIN (eqv8qi3, "wcmpeqb", WCMPEQB)
12949 IWMMXT_BUILTIN (eqv4hi3, "wcmpeqh", WCMPEQH)
12950 IWMMXT_BUILTIN (eqv2si3, "wcmpeqw", WCMPEQW)
12951 IWMMXT_BUILTIN (gtuv8qi3, "wcmpgtub", WCMPGTUB)
12952 IWMMXT_BUILTIN (gtuv4hi3, "wcmpgtuh", WCMPGTUH)
12953 IWMMXT_BUILTIN (gtuv2si3, "wcmpgtuw", WCMPGTUW)
12954 IWMMXT_BUILTIN (gtv8qi3, "wcmpgtsb", WCMPGTSB)
12955 IWMMXT_BUILTIN (gtv4hi3, "wcmpgtsh", WCMPGTSH)
12956 IWMMXT_BUILTIN (gtv2si3, "wcmpgtsw", WCMPGTSW)
12957 IWMMXT_BUILTIN (umaxv8qi3, "wmaxub", WMAXUB)
12958 IWMMXT_BUILTIN (smaxv8qi3, "wmaxsb", WMAXSB)
12959 IWMMXT_BUILTIN (umaxv4hi3, "wmaxuh", WMAXUH)
12960 IWMMXT_BUILTIN (smaxv4hi3, "wmaxsh", WMAXSH)
12961 IWMMXT_BUILTIN (umaxv2si3, "wmaxuw", WMAXUW)
12962 IWMMXT_BUILTIN (smaxv2si3, "wmaxsw", WMAXSW)
12963 IWMMXT_BUILTIN (uminv8qi3, "wminub", WMINUB)
12964 IWMMXT_BUILTIN (sminv8qi3, "wminsb", WMINSB)
12965 IWMMXT_BUILTIN (uminv4hi3, "wminuh", WMINUH)
12966 IWMMXT_BUILTIN (sminv4hi3, "wminsh", WMINSH)
12967 IWMMXT_BUILTIN (uminv2si3, "wminuw", WMINUW)
12968 IWMMXT_BUILTIN (sminv2si3, "wminsw", WMINSW)
12969 IWMMXT_BUILTIN (iwmmxt_anddi3, "wand", WAND)
12970 IWMMXT_BUILTIN (iwmmxt_nanddi3, "wandn", WANDN)
12971 IWMMXT_BUILTIN (iwmmxt_iordi3, "wor", WOR)
12972 IWMMXT_BUILTIN (iwmmxt_xordi3, "wxor", WXOR)
12973 IWMMXT_BUILTIN (iwmmxt_uavgv8qi3, "wavg2b", WAVG2B)
12974 IWMMXT_BUILTIN (iwmmxt_uavgv4hi3, "wavg2h", WAVG2H)
12975 IWMMXT_BUILTIN (iwmmxt_uavgrndv8qi3, "wavg2br", WAVG2BR)
12976 IWMMXT_BUILTIN (iwmmxt_uavgrndv4hi3, "wavg2hr", WAVG2HR)
12977 IWMMXT_BUILTIN (iwmmxt_wunpckilb, "wunpckilb", WUNPCKILB)
12978 IWMMXT_BUILTIN (iwmmxt_wunpckilh, "wunpckilh", WUNPCKILH)
12979 IWMMXT_BUILTIN (iwmmxt_wunpckilw, "wunpckilw", WUNPCKILW)
12980 IWMMXT_BUILTIN (iwmmxt_wunpckihb, "wunpckihb", WUNPCKIHB)
12981 IWMMXT_BUILTIN (iwmmxt_wunpckihh, "wunpckihh", WUNPCKIHH)
12982 IWMMXT_BUILTIN (iwmmxt_wunpckihw, "wunpckihw", WUNPCKIHW)
12983 IWMMXT_BUILTIN (iwmmxt_wmadds, "wmadds", WMADDS)
12984 IWMMXT_BUILTIN (iwmmxt_wmaddu, "wmaddu", WMADDU)
12986 #define IWMMXT_BUILTIN2(code, builtin) \
12987 { FL_IWMMXT, CODE_FOR_##code, NULL, ARM_BUILTIN_##builtin, 0, 0 },
12989 IWMMXT_BUILTIN2 (iwmmxt_wpackhss, WPACKHSS)
12990 IWMMXT_BUILTIN2 (iwmmxt_wpackwss, WPACKWSS)
12991 IWMMXT_BUILTIN2 (iwmmxt_wpackdss, WPACKDSS)
12992 IWMMXT_BUILTIN2 (iwmmxt_wpackhus, WPACKHUS)
12993 IWMMXT_BUILTIN2 (iwmmxt_wpackwus, WPACKWUS)
12994 IWMMXT_BUILTIN2 (iwmmxt_wpackdus, WPACKDUS)
12995 IWMMXT_BUILTIN2 (ashlv4hi3_di, WSLLH)
12996 IWMMXT_BUILTIN2 (ashlv4hi3, WSLLHI)
12997 IWMMXT_BUILTIN2 (ashlv2si3_di, WSLLW)
12998 IWMMXT_BUILTIN2 (ashlv2si3, WSLLWI)
12999 IWMMXT_BUILTIN2 (ashldi3_di, WSLLD)
13000 IWMMXT_BUILTIN2 (ashldi3_iwmmxt, WSLLDI)
13001 IWMMXT_BUILTIN2 (lshrv4hi3_di, WSRLH)
13002 IWMMXT_BUILTIN2 (lshrv4hi3, WSRLHI)
13003 IWMMXT_BUILTIN2 (lshrv2si3_di, WSRLW)
13004 IWMMXT_BUILTIN2 (lshrv2si3, WSRLWI)
13005 IWMMXT_BUILTIN2 (lshrdi3_di, WSRLD)
13006 IWMMXT_BUILTIN2 (lshrdi3_iwmmxt, WSRLDI)
13007 IWMMXT_BUILTIN2 (ashrv4hi3_di, WSRAH)
13008 IWMMXT_BUILTIN2 (ashrv4hi3, WSRAHI)
13009 IWMMXT_BUILTIN2 (ashrv2si3_di, WSRAW)
13010 IWMMXT_BUILTIN2 (ashrv2si3, WSRAWI)
13011 IWMMXT_BUILTIN2 (ashrdi3_di, WSRAD)
13012 IWMMXT_BUILTIN2 (ashrdi3_iwmmxt, WSRADI)
13013 IWMMXT_BUILTIN2 (rorv4hi3_di, WRORH)
13014 IWMMXT_BUILTIN2 (rorv4hi3, WRORHI)
13015 IWMMXT_BUILTIN2 (rorv2si3_di, WRORW)
13016 IWMMXT_BUILTIN2 (rorv2si3, WRORWI)
13017 IWMMXT_BUILTIN2 (rordi3_di, WRORD)
13018 IWMMXT_BUILTIN2 (rordi3, WRORDI)
13019 IWMMXT_BUILTIN2 (iwmmxt_wmacuz, WMACUZ)
13020 IWMMXT_BUILTIN2 (iwmmxt_wmacsz, WMACSZ)
13023 static const struct builtin_description bdesc_1arg[] =
13025 IWMMXT_BUILTIN (iwmmxt_tmovmskb, "tmovmskb", TMOVMSKB)
13026 IWMMXT_BUILTIN (iwmmxt_tmovmskh, "tmovmskh", TMOVMSKH)
13027 IWMMXT_BUILTIN (iwmmxt_tmovmskw, "tmovmskw", TMOVMSKW)
13028 IWMMXT_BUILTIN (iwmmxt_waccb, "waccb", WACCB)
13029 IWMMXT_BUILTIN (iwmmxt_wacch, "wacch", WACCH)
13030 IWMMXT_BUILTIN (iwmmxt_waccw, "waccw", WACCW)
13031 IWMMXT_BUILTIN (iwmmxt_wunpckehub, "wunpckehub", WUNPCKEHUB)
13032 IWMMXT_BUILTIN (iwmmxt_wunpckehuh, "wunpckehuh", WUNPCKEHUH)
13033 IWMMXT_BUILTIN (iwmmxt_wunpckehuw, "wunpckehuw", WUNPCKEHUW)
13034 IWMMXT_BUILTIN (iwmmxt_wunpckehsb, "wunpckehsb", WUNPCKEHSB)
13035 IWMMXT_BUILTIN (iwmmxt_wunpckehsh, "wunpckehsh", WUNPCKEHSH)
13036 IWMMXT_BUILTIN (iwmmxt_wunpckehsw, "wunpckehsw", WUNPCKEHSW)
13037 IWMMXT_BUILTIN (iwmmxt_wunpckelub, "wunpckelub", WUNPCKELUB)
13038 IWMMXT_BUILTIN (iwmmxt_wunpckeluh, "wunpckeluh", WUNPCKELUH)
13039 IWMMXT_BUILTIN (iwmmxt_wunpckeluw, "wunpckeluw", WUNPCKELUW)
13040 IWMMXT_BUILTIN (iwmmxt_wunpckelsb, "wunpckelsb", WUNPCKELSB)
13041 IWMMXT_BUILTIN (iwmmxt_wunpckelsh, "wunpckelsh", WUNPCKELSH)
13042 IWMMXT_BUILTIN (iwmmxt_wunpckelsw, "wunpckelsw", WUNPCKELSW)
13045 /* Set up all the iWMMXt builtins. This is
13046 not called if TARGET_IWMMXT is zero. */
13049 arm_init_iwmmxt_builtins (void)
13051 const struct builtin_description * d;
13053 tree endlink = void_list_node;
13055 tree V2SI_type_node = build_vector_type_for_mode (intSI_type_node, V2SImode);
13056 tree V4HI_type_node = build_vector_type_for_mode (intHI_type_node, V4HImode);
13057 tree V8QI_type_node = build_vector_type_for_mode (intQI_type_node, V8QImode);
13060 = build_function_type (integer_type_node,
13061 tree_cons (NULL_TREE, integer_type_node, endlink));
13062 tree v8qi_ftype_v8qi_v8qi_int
13063 = build_function_type (V8QI_type_node,
13064 tree_cons (NULL_TREE, V8QI_type_node,
13065 tree_cons (NULL_TREE, V8QI_type_node,
13066 tree_cons (NULL_TREE,
13069 tree v4hi_ftype_v4hi_int
13070 = build_function_type (V4HI_type_node,
13071 tree_cons (NULL_TREE, V4HI_type_node,
13072 tree_cons (NULL_TREE, integer_type_node,
13074 tree v2si_ftype_v2si_int
13075 = build_function_type (V2SI_type_node,
13076 tree_cons (NULL_TREE, V2SI_type_node,
13077 tree_cons (NULL_TREE, integer_type_node,
13079 tree v2si_ftype_di_di
13080 = build_function_type (V2SI_type_node,
13081 tree_cons (NULL_TREE, long_long_integer_type_node,
13082 tree_cons (NULL_TREE, long_long_integer_type_node,
13084 tree di_ftype_di_int
13085 = build_function_type (long_long_integer_type_node,
13086 tree_cons (NULL_TREE, long_long_integer_type_node,
13087 tree_cons (NULL_TREE, integer_type_node,
13089 tree di_ftype_di_int_int
13090 = build_function_type (long_long_integer_type_node,
13091 tree_cons (NULL_TREE, long_long_integer_type_node,
13092 tree_cons (NULL_TREE, integer_type_node,
13093 tree_cons (NULL_TREE,
13096 tree int_ftype_v8qi
13097 = build_function_type (integer_type_node,
13098 tree_cons (NULL_TREE, V8QI_type_node,
13100 tree int_ftype_v4hi
13101 = build_function_type (integer_type_node,
13102 tree_cons (NULL_TREE, V4HI_type_node,
13104 tree int_ftype_v2si
13105 = build_function_type (integer_type_node,
13106 tree_cons (NULL_TREE, V2SI_type_node,
13108 tree int_ftype_v8qi_int
13109 = build_function_type (integer_type_node,
13110 tree_cons (NULL_TREE, V8QI_type_node,
13111 tree_cons (NULL_TREE, integer_type_node,
13113 tree int_ftype_v4hi_int
13114 = build_function_type (integer_type_node,
13115 tree_cons (NULL_TREE, V4HI_type_node,
13116 tree_cons (NULL_TREE, integer_type_node,
13118 tree int_ftype_v2si_int
13119 = build_function_type (integer_type_node,
13120 tree_cons (NULL_TREE, V2SI_type_node,
13121 tree_cons (NULL_TREE, integer_type_node,
13123 tree v8qi_ftype_v8qi_int_int
13124 = build_function_type (V8QI_type_node,
13125 tree_cons (NULL_TREE, V8QI_type_node,
13126 tree_cons (NULL_TREE, integer_type_node,
13127 tree_cons (NULL_TREE,
13130 tree v4hi_ftype_v4hi_int_int
13131 = build_function_type (V4HI_type_node,
13132 tree_cons (NULL_TREE, V4HI_type_node,
13133 tree_cons (NULL_TREE, integer_type_node,
13134 tree_cons (NULL_TREE,
13137 tree v2si_ftype_v2si_int_int
13138 = build_function_type (V2SI_type_node,
13139 tree_cons (NULL_TREE, V2SI_type_node,
13140 tree_cons (NULL_TREE, integer_type_node,
13141 tree_cons (NULL_TREE,
13144 /* Miscellaneous. */
13145 tree v8qi_ftype_v4hi_v4hi
13146 = build_function_type (V8QI_type_node,
13147 tree_cons (NULL_TREE, V4HI_type_node,
13148 tree_cons (NULL_TREE, V4HI_type_node,
13150 tree v4hi_ftype_v2si_v2si
13151 = build_function_type (V4HI_type_node,
13152 tree_cons (NULL_TREE, V2SI_type_node,
13153 tree_cons (NULL_TREE, V2SI_type_node,
13155 tree v2si_ftype_v4hi_v4hi
13156 = build_function_type (V2SI_type_node,
13157 tree_cons (NULL_TREE, V4HI_type_node,
13158 tree_cons (NULL_TREE, V4HI_type_node,
13160 tree v2si_ftype_v8qi_v8qi
13161 = build_function_type (V2SI_type_node,
13162 tree_cons (NULL_TREE, V8QI_type_node,
13163 tree_cons (NULL_TREE, V8QI_type_node,
13165 tree v4hi_ftype_v4hi_di
13166 = build_function_type (V4HI_type_node,
13167 tree_cons (NULL_TREE, V4HI_type_node,
13168 tree_cons (NULL_TREE,
13169 long_long_integer_type_node,
13171 tree v2si_ftype_v2si_di
13172 = build_function_type (V2SI_type_node,
13173 tree_cons (NULL_TREE, V2SI_type_node,
13174 tree_cons (NULL_TREE,
13175 long_long_integer_type_node,
13177 tree void_ftype_int_int
13178 = build_function_type (void_type_node,
13179 tree_cons (NULL_TREE, integer_type_node,
13180 tree_cons (NULL_TREE, integer_type_node,
13183 = build_function_type (long_long_unsigned_type_node, endlink);
13185 = build_function_type (long_long_integer_type_node,
13186 tree_cons (NULL_TREE, V8QI_type_node,
13189 = build_function_type (long_long_integer_type_node,
13190 tree_cons (NULL_TREE, V4HI_type_node,
13193 = build_function_type (long_long_integer_type_node,
13194 tree_cons (NULL_TREE, V2SI_type_node,
13196 tree v2si_ftype_v4hi
13197 = build_function_type (V2SI_type_node,
13198 tree_cons (NULL_TREE, V4HI_type_node,
13200 tree v4hi_ftype_v8qi
13201 = build_function_type (V4HI_type_node,
13202 tree_cons (NULL_TREE, V8QI_type_node,
13205 tree di_ftype_di_v4hi_v4hi
13206 = build_function_type (long_long_unsigned_type_node,
13207 tree_cons (NULL_TREE,
13208 long_long_unsigned_type_node,
13209 tree_cons (NULL_TREE, V4HI_type_node,
13210 tree_cons (NULL_TREE,
13214 tree di_ftype_v4hi_v4hi
13215 = build_function_type (long_long_unsigned_type_node,
13216 tree_cons (NULL_TREE, V4HI_type_node,
13217 tree_cons (NULL_TREE, V4HI_type_node,
13220 /* Normal vector binops. */
13221 tree v8qi_ftype_v8qi_v8qi
13222 = build_function_type (V8QI_type_node,
13223 tree_cons (NULL_TREE, V8QI_type_node,
13224 tree_cons (NULL_TREE, V8QI_type_node,
13226 tree v4hi_ftype_v4hi_v4hi
13227 = build_function_type (V4HI_type_node,
13228 tree_cons (NULL_TREE, V4HI_type_node,
13229 tree_cons (NULL_TREE, V4HI_type_node,
13231 tree v2si_ftype_v2si_v2si
13232 = build_function_type (V2SI_type_node,
13233 tree_cons (NULL_TREE, V2SI_type_node,
13234 tree_cons (NULL_TREE, V2SI_type_node,
13236 tree di_ftype_di_di
13237 = build_function_type (long_long_unsigned_type_node,
13238 tree_cons (NULL_TREE, long_long_unsigned_type_node,
13239 tree_cons (NULL_TREE,
13240 long_long_unsigned_type_node,
13243 /* Add all builtins that are more or less simple operations on two
13245 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
13247 /* Use one of the operands; the target can have a different mode for
13248 mask-generating compares. */
13249 enum machine_mode mode;
13255 mode = insn_data[d->icode].operand[1].mode;
13260 type = v8qi_ftype_v8qi_v8qi;
13263 type = v4hi_ftype_v4hi_v4hi;
13266 type = v2si_ftype_v2si_v2si;
13269 type = di_ftype_di_di;
13273 gcc_unreachable ();
13276 def_mbuiltin (d->mask, d->name, type, d->code);
13279 /* Add the remaining MMX insns with somewhat more complicated types. */
13280 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wzero", di_ftype_void, ARM_BUILTIN_WZERO);
13281 def_mbuiltin (FL_IWMMXT, "__builtin_arm_setwcx", void_ftype_int_int, ARM_BUILTIN_SETWCX);
13282 def_mbuiltin (FL_IWMMXT, "__builtin_arm_getwcx", int_ftype_int, ARM_BUILTIN_GETWCX);
13284 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSLLH);
13285 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllw", v2si_ftype_v2si_di, ARM_BUILTIN_WSLLW);
13286 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslld", di_ftype_di_di, ARM_BUILTIN_WSLLD);
13287 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSLLHI);
13288 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsllwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSLLWI);
13289 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wslldi", di_ftype_di_int, ARM_BUILTIN_WSLLDI);
13291 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRLH);
13292 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRLW);
13293 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrld", di_ftype_di_di, ARM_BUILTIN_WSRLD);
13294 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRLHI);
13295 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrlwi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRLWI);
13296 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrldi", di_ftype_di_int, ARM_BUILTIN_WSRLDI);
13298 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrah", v4hi_ftype_v4hi_di, ARM_BUILTIN_WSRAH);
13299 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsraw", v2si_ftype_v2si_di, ARM_BUILTIN_WSRAW);
13300 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrad", di_ftype_di_di, ARM_BUILTIN_WSRAD);
13301 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrahi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSRAHI);
13302 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsrawi", v2si_ftype_v2si_int, ARM_BUILTIN_WSRAWI);
13303 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsradi", di_ftype_di_int, ARM_BUILTIN_WSRADI);
13305 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorh", v4hi_ftype_v4hi_di, ARM_BUILTIN_WRORH);
13306 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorw", v2si_ftype_v2si_di, ARM_BUILTIN_WRORW);
13307 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrord", di_ftype_di_di, ARM_BUILTIN_WRORD);
13308 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorhi", v4hi_ftype_v4hi_int, ARM_BUILTIN_WRORHI);
13309 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrorwi", v2si_ftype_v2si_int, ARM_BUILTIN_WRORWI);
13310 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wrordi", di_ftype_di_int, ARM_BUILTIN_WRORDI);
13312 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wshufh", v4hi_ftype_v4hi_int, ARM_BUILTIN_WSHUFH);
13314 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadb", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADB);
13315 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadh", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADH);
13316 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadbz", v2si_ftype_v8qi_v8qi, ARM_BUILTIN_WSADBZ);
13317 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wsadhz", v2si_ftype_v4hi_v4hi, ARM_BUILTIN_WSADHZ);
13319 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsb", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMSB);
13320 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMSH);
13321 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmsw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMSW);
13322 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmub", int_ftype_v8qi_int, ARM_BUILTIN_TEXTRMUB);
13323 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuh", int_ftype_v4hi_int, ARM_BUILTIN_TEXTRMUH);
13324 def_mbuiltin (FL_IWMMXT, "__builtin_arm_textrmuw", int_ftype_v2si_int, ARM_BUILTIN_TEXTRMUW);
13325 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrb", v8qi_ftype_v8qi_int_int, ARM_BUILTIN_TINSRB);
13326 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrh", v4hi_ftype_v4hi_int_int, ARM_BUILTIN_TINSRH);
13327 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tinsrw", v2si_ftype_v2si_int_int, ARM_BUILTIN_TINSRW);
13329 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccb", di_ftype_v8qi, ARM_BUILTIN_WACCB);
13330 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wacch", di_ftype_v4hi, ARM_BUILTIN_WACCH);
13331 def_mbuiltin (FL_IWMMXT, "__builtin_arm_waccw", di_ftype_v2si, ARM_BUILTIN_WACCW);
13333 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskb", int_ftype_v8qi, ARM_BUILTIN_TMOVMSKB);
13334 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskh", int_ftype_v4hi, ARM_BUILTIN_TMOVMSKH);
13335 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmovmskw", int_ftype_v2si, ARM_BUILTIN_TMOVMSKW);
13337 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhss", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHSS);
13338 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackhus", v8qi_ftype_v4hi_v4hi, ARM_BUILTIN_WPACKHUS);
13339 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwus", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWUS);
13340 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackwss", v4hi_ftype_v2si_v2si, ARM_BUILTIN_WPACKWSS);
13341 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdus", v2si_ftype_di_di, ARM_BUILTIN_WPACKDUS);
13342 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wpackdss", v2si_ftype_di_di, ARM_BUILTIN_WPACKDSS);
13344 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHUB);
13345 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHUH);
13346 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehuw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHUW);
13347 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKEHSB);
13348 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKEHSH);
13349 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckehsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKEHSW);
13350 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelub", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELUB);
13351 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELUH);
13352 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckeluw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELUW);
13353 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsb", v4hi_ftype_v8qi, ARM_BUILTIN_WUNPCKELSB);
13354 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsh", v2si_ftype_v4hi, ARM_BUILTIN_WUNPCKELSH);
13355 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wunpckelsw", di_ftype_v2si, ARM_BUILTIN_WUNPCKELSW);
13357 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacs", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACS);
13358 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacsz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACSZ);
13359 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacu", di_ftype_di_v4hi_v4hi, ARM_BUILTIN_WMACU);
13360 def_mbuiltin (FL_IWMMXT, "__builtin_arm_wmacuz", di_ftype_v4hi_v4hi, ARM_BUILTIN_WMACUZ);
13362 def_mbuiltin (FL_IWMMXT, "__builtin_arm_walign", v8qi_ftype_v8qi_v8qi_int, ARM_BUILTIN_WALIGN);
13363 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmia", di_ftype_di_int_int, ARM_BUILTIN_TMIA);
13364 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiaph", di_ftype_di_int_int, ARM_BUILTIN_TMIAPH);
13365 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabb", di_ftype_di_int_int, ARM_BUILTIN_TMIABB);
13366 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiabt", di_ftype_di_int_int, ARM_BUILTIN_TMIABT);
13367 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatb", di_ftype_di_int_int, ARM_BUILTIN_TMIATB);
13368 def_mbuiltin (FL_IWMMXT, "__builtin_arm_tmiatt", di_ftype_di_int_int, ARM_BUILTIN_TMIATT);
13372 arm_init_tls_builtins (void)
13375 tree nothrow = tree_cons (get_identifier ("nothrow"), NULL, NULL);
13376 tree const_nothrow = tree_cons (get_identifier ("const"), NULL, nothrow);
13378 ftype = build_function_type (ptr_type_node, void_list_node);
13379 add_builtin_function ("__builtin_thread_pointer", ftype,
13380 ARM_BUILTIN_THREAD_POINTER, BUILT_IN_MD,
13381 NULL, const_nothrow);
13385 arm_init_builtins (void)
13387 arm_init_tls_builtins ();
13389 if (TARGET_REALLY_IWMMXT)
13390 arm_init_iwmmxt_builtins ();
13393 /* Errors in the source file can cause expand_expr to return const0_rtx
13394 where we expect a vector. To avoid crashing, use one of the vector
13395 clear instructions. */
13398 safe_vector_operand (rtx x, enum machine_mode mode)
13400 if (x != const0_rtx)
13402 x = gen_reg_rtx (mode);
13404 emit_insn (gen_iwmmxt_clrdi (mode == DImode ? x
13405 : gen_rtx_SUBREG (DImode, x, 0)));
13409 /* Subroutine of arm_expand_builtin to take care of binop insns. */
13412 arm_expand_binop_builtin (enum insn_code icode,
13413 tree exp, rtx target)
13416 tree arg0 = CALL_EXPR_ARG (exp, 0);
13417 tree arg1 = CALL_EXPR_ARG (exp, 1);
13418 rtx op0 = expand_normal (arg0);
13419 rtx op1 = expand_normal (arg1);
13420 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13421 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13422 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
13424 if (VECTOR_MODE_P (mode0))
13425 op0 = safe_vector_operand (op0, mode0);
13426 if (VECTOR_MODE_P (mode1))
13427 op1 = safe_vector_operand (op1, mode1);
13430 || GET_MODE (target) != tmode
13431 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13432 target = gen_reg_rtx (tmode);
13434 gcc_assert (GET_MODE (op0) == mode0 && GET_MODE (op1) == mode1);
13436 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13437 op0 = copy_to_mode_reg (mode0, op0);
13438 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13439 op1 = copy_to_mode_reg (mode1, op1);
13441 pat = GEN_FCN (icode) (target, op0, op1);
13448 /* Subroutine of arm_expand_builtin to take care of unop insns. */
13451 arm_expand_unop_builtin (enum insn_code icode,
13452 tree exp, rtx target, int do_load)
13455 tree arg0 = CALL_EXPR_ARG (exp, 0);
13456 rtx op0 = expand_normal (arg0);
13457 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13458 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13461 || GET_MODE (target) != tmode
13462 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13463 target = gen_reg_rtx (tmode);
13465 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
13468 if (VECTOR_MODE_P (mode0))
13469 op0 = safe_vector_operand (op0, mode0);
13471 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13472 op0 = copy_to_mode_reg (mode0, op0);
13475 pat = GEN_FCN (icode) (target, op0);
13482 /* Expand an expression EXP that calls a built-in function,
13483 with result going to TARGET if that's convenient
13484 (and in mode MODE if that's convenient).
13485 SUBTARGET may be used as the target for computing one of EXP's operands.
13486 IGNORE is nonzero if the value is to be ignored. */
13489 arm_expand_builtin (tree exp,
13491 rtx subtarget ATTRIBUTE_UNUSED,
13492 enum machine_mode mode ATTRIBUTE_UNUSED,
13493 int ignore ATTRIBUTE_UNUSED)
13495 const struct builtin_description * d;
13496 enum insn_code icode;
13497 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13505 int fcode = DECL_FUNCTION_CODE (fndecl);
13507 enum machine_mode tmode;
13508 enum machine_mode mode0;
13509 enum machine_mode mode1;
13510 enum machine_mode mode2;
13514 case ARM_BUILTIN_TEXTRMSB:
13515 case ARM_BUILTIN_TEXTRMUB:
13516 case ARM_BUILTIN_TEXTRMSH:
13517 case ARM_BUILTIN_TEXTRMUH:
13518 case ARM_BUILTIN_TEXTRMSW:
13519 case ARM_BUILTIN_TEXTRMUW:
13520 icode = (fcode == ARM_BUILTIN_TEXTRMSB ? CODE_FOR_iwmmxt_textrmsb
13521 : fcode == ARM_BUILTIN_TEXTRMUB ? CODE_FOR_iwmmxt_textrmub
13522 : fcode == ARM_BUILTIN_TEXTRMSH ? CODE_FOR_iwmmxt_textrmsh
13523 : fcode == ARM_BUILTIN_TEXTRMUH ? CODE_FOR_iwmmxt_textrmuh
13524 : CODE_FOR_iwmmxt_textrmw);
13526 arg0 = CALL_EXPR_ARG (exp, 0);
13527 arg1 = CALL_EXPR_ARG (exp, 1);
13528 op0 = expand_normal (arg0);
13529 op1 = expand_normal (arg1);
13530 tmode = insn_data[icode].operand[0].mode;
13531 mode0 = insn_data[icode].operand[1].mode;
13532 mode1 = insn_data[icode].operand[2].mode;
13534 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13535 op0 = copy_to_mode_reg (mode0, op0);
13536 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13538 /* @@@ better error message */
13539 error ("selector must be an immediate");
13540 return gen_reg_rtx (tmode);
13543 || GET_MODE (target) != tmode
13544 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13545 target = gen_reg_rtx (tmode);
13546 pat = GEN_FCN (icode) (target, op0, op1);
13552 case ARM_BUILTIN_TINSRB:
13553 case ARM_BUILTIN_TINSRH:
13554 case ARM_BUILTIN_TINSRW:
13555 icode = (fcode == ARM_BUILTIN_TINSRB ? CODE_FOR_iwmmxt_tinsrb
13556 : fcode == ARM_BUILTIN_TINSRH ? CODE_FOR_iwmmxt_tinsrh
13557 : CODE_FOR_iwmmxt_tinsrw);
13558 arg0 = CALL_EXPR_ARG (exp, 0);
13559 arg1 = CALL_EXPR_ARG (exp, 1);
13560 arg2 = CALL_EXPR_ARG (exp, 2);
13561 op0 = expand_normal (arg0);
13562 op1 = expand_normal (arg1);
13563 op2 = expand_normal (arg2);
13564 tmode = insn_data[icode].operand[0].mode;
13565 mode0 = insn_data[icode].operand[1].mode;
13566 mode1 = insn_data[icode].operand[2].mode;
13567 mode2 = insn_data[icode].operand[3].mode;
13569 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13570 op0 = copy_to_mode_reg (mode0, op0);
13571 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13572 op1 = copy_to_mode_reg (mode1, op1);
13573 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13575 /* @@@ better error message */
13576 error ("selector must be an immediate");
13580 || GET_MODE (target) != tmode
13581 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13582 target = gen_reg_rtx (tmode);
13583 pat = GEN_FCN (icode) (target, op0, op1, op2);
13589 case ARM_BUILTIN_SETWCX:
13590 arg0 = CALL_EXPR_ARG (exp, 0);
13591 arg1 = CALL_EXPR_ARG (exp, 1);
13592 op0 = force_reg (SImode, expand_normal (arg0));
13593 op1 = expand_normal (arg1);
13594 emit_insn (gen_iwmmxt_tmcr (op1, op0));
13597 case ARM_BUILTIN_GETWCX:
13598 arg0 = CALL_EXPR_ARG (exp, 0);
13599 op0 = expand_normal (arg0);
13600 target = gen_reg_rtx (SImode);
13601 emit_insn (gen_iwmmxt_tmrc (target, op0));
13604 case ARM_BUILTIN_WSHUFH:
13605 icode = CODE_FOR_iwmmxt_wshufh;
13606 arg0 = CALL_EXPR_ARG (exp, 0);
13607 arg1 = CALL_EXPR_ARG (exp, 1);
13608 op0 = expand_normal (arg0);
13609 op1 = expand_normal (arg1);
13610 tmode = insn_data[icode].operand[0].mode;
13611 mode1 = insn_data[icode].operand[1].mode;
13612 mode2 = insn_data[icode].operand[2].mode;
13614 if (! (*insn_data[icode].operand[1].predicate) (op0, mode1))
13615 op0 = copy_to_mode_reg (mode1, op0);
13616 if (! (*insn_data[icode].operand[2].predicate) (op1, mode2))
13618 /* @@@ better error message */
13619 error ("mask must be an immediate");
13623 || GET_MODE (target) != tmode
13624 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13625 target = gen_reg_rtx (tmode);
13626 pat = GEN_FCN (icode) (target, op0, op1);
13632 case ARM_BUILTIN_WSADB:
13633 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadb, exp, target);
13634 case ARM_BUILTIN_WSADH:
13635 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadh, exp, target);
13636 case ARM_BUILTIN_WSADBZ:
13637 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadbz, exp, target);
13638 case ARM_BUILTIN_WSADHZ:
13639 return arm_expand_binop_builtin (CODE_FOR_iwmmxt_wsadhz, exp, target);
13641 /* Several three-argument builtins. */
13642 case ARM_BUILTIN_WMACS:
13643 case ARM_BUILTIN_WMACU:
13644 case ARM_BUILTIN_WALIGN:
13645 case ARM_BUILTIN_TMIA:
13646 case ARM_BUILTIN_TMIAPH:
13647 case ARM_BUILTIN_TMIATT:
13648 case ARM_BUILTIN_TMIATB:
13649 case ARM_BUILTIN_TMIABT:
13650 case ARM_BUILTIN_TMIABB:
13651 icode = (fcode == ARM_BUILTIN_WMACS ? CODE_FOR_iwmmxt_wmacs
13652 : fcode == ARM_BUILTIN_WMACU ? CODE_FOR_iwmmxt_wmacu
13653 : fcode == ARM_BUILTIN_TMIA ? CODE_FOR_iwmmxt_tmia
13654 : fcode == ARM_BUILTIN_TMIAPH ? CODE_FOR_iwmmxt_tmiaph
13655 : fcode == ARM_BUILTIN_TMIABB ? CODE_FOR_iwmmxt_tmiabb
13656 : fcode == ARM_BUILTIN_TMIABT ? CODE_FOR_iwmmxt_tmiabt
13657 : fcode == ARM_BUILTIN_TMIATB ? CODE_FOR_iwmmxt_tmiatb
13658 : fcode == ARM_BUILTIN_TMIATT ? CODE_FOR_iwmmxt_tmiatt
13659 : CODE_FOR_iwmmxt_walign);
13660 arg0 = CALL_EXPR_ARG (exp, 0);
13661 arg1 = CALL_EXPR_ARG (exp, 1);
13662 arg2 = CALL_EXPR_ARG (exp, 2);
13663 op0 = expand_normal (arg0);
13664 op1 = expand_normal (arg1);
13665 op2 = expand_normal (arg2);
13666 tmode = insn_data[icode].operand[0].mode;
13667 mode0 = insn_data[icode].operand[1].mode;
13668 mode1 = insn_data[icode].operand[2].mode;
13669 mode2 = insn_data[icode].operand[3].mode;
13671 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13672 op0 = copy_to_mode_reg (mode0, op0);
13673 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13674 op1 = copy_to_mode_reg (mode1, op1);
13675 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
13676 op2 = copy_to_mode_reg (mode2, op2);
13678 || GET_MODE (target) != tmode
13679 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13680 target = gen_reg_rtx (tmode);
13681 pat = GEN_FCN (icode) (target, op0, op1, op2);
13687 case ARM_BUILTIN_WZERO:
13688 target = gen_reg_rtx (DImode);
13689 emit_insn (gen_iwmmxt_clrdi (target));
13692 case ARM_BUILTIN_THREAD_POINTER:
13693 return arm_load_tp (target);
13699 for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
13700 if (d->code == (const enum arm_builtins) fcode)
13701 return arm_expand_binop_builtin (d->icode, exp, target);
13703 for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
13704 if (d->code == (const enum arm_builtins) fcode)
13705 return arm_expand_unop_builtin (d->icode, exp, target, 0);
13707 /* @@@ Should really do something sensible here. */
13711 /* Return the number (counting from 0) of
13712 the least significant set bit in MASK. */
13715 number_of_first_bit_set (unsigned mask)
13720 (mask & (1 << bit)) == 0;
13727 /* Emit code to push or pop registers to or from the stack. F is the
13728 assembly file. MASK is the registers to push or pop. PUSH is
13729 nonzero if we should push, and zero if we should pop. For debugging
13730 output, if pushing, adjust CFA_OFFSET by the amount of space added
13731 to the stack. REAL_REGS should have the same number of bits set as
13732 MASK, and will be used instead (in the same order) to describe which
13733 registers were saved - this is used to mark the save slots when we
13734 push high registers after moving them to low registers. */
13736 thumb_pushpop (FILE *f, unsigned long mask, int push, int *cfa_offset,
13737 unsigned long real_regs)
13740 int lo_mask = mask & 0xFF;
13741 int pushed_words = 0;
13745 if (lo_mask == 0 && !push && (mask & (1 << PC_REGNUM)))
13747 /* Special case. Do not generate a POP PC statement here, do it in
13749 thumb_exit (f, -1);
13753 if (ARM_EABI_UNWIND_TABLES && push)
13755 fprintf (f, "\t.save\t{");
13756 for (regno = 0; regno < 15; regno++)
13758 if (real_regs & (1 << regno))
13760 if (real_regs & ((1 << regno) -1))
13762 asm_fprintf (f, "%r", regno);
13765 fprintf (f, "}\n");
13768 fprintf (f, "\t%s\t{", push ? "push" : "pop");
13770 /* Look at the low registers first. */
13771 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
13775 asm_fprintf (f, "%r", regno);
13777 if ((lo_mask & ~1) != 0)
13784 if (push && (mask & (1 << LR_REGNUM)))
13786 /* Catch pushing the LR. */
13790 asm_fprintf (f, "%r", LR_REGNUM);
13794 else if (!push && (mask & (1 << PC_REGNUM)))
13796 /* Catch popping the PC. */
13797 if (TARGET_INTERWORK || TARGET_BACKTRACE
13798 || current_function_calls_eh_return)
13800 /* The PC is never poped directly, instead
13801 it is popped into r3 and then BX is used. */
13802 fprintf (f, "}\n");
13804 thumb_exit (f, -1);
13813 asm_fprintf (f, "%r", PC_REGNUM);
13817 fprintf (f, "}\n");
13819 if (push && pushed_words && dwarf2out_do_frame ())
13821 char *l = dwarf2out_cfi_label ();
13822 int pushed_mask = real_regs;
13824 *cfa_offset += pushed_words * 4;
13825 dwarf2out_def_cfa (l, SP_REGNUM, *cfa_offset);
13828 pushed_mask = real_regs;
13829 for (regno = 0; regno <= 14; regno++, pushed_mask >>= 1)
13831 if (pushed_mask & 1)
13832 dwarf2out_reg_save (l, regno, 4 * pushed_words++ - *cfa_offset);
13837 /* Generate code to return from a thumb function.
13838 If 'reg_containing_return_addr' is -1, then the return address is
13839 actually on the stack, at the stack pointer. */
13841 thumb_exit (FILE *f, int reg_containing_return_addr)
13843 unsigned regs_available_for_popping;
13844 unsigned regs_to_pop;
13846 unsigned available;
13850 int restore_a4 = FALSE;
13852 /* Compute the registers we need to pop. */
13856 if (reg_containing_return_addr == -1)
13858 regs_to_pop |= 1 << LR_REGNUM;
13862 if (TARGET_BACKTRACE)
13864 /* Restore the (ARM) frame pointer and stack pointer. */
13865 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
13869 /* If there is nothing to pop then just emit the BX instruction and
13871 if (pops_needed == 0)
13873 if (current_function_calls_eh_return)
13874 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
13876 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
13879 /* Otherwise if we are not supporting interworking and we have not created
13880 a backtrace structure and the function was not entered in ARM mode then
13881 just pop the return address straight into the PC. */
13882 else if (!TARGET_INTERWORK
13883 && !TARGET_BACKTRACE
13884 && !is_called_in_ARM_mode (current_function_decl)
13885 && !current_function_calls_eh_return)
13887 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
13891 /* Find out how many of the (return) argument registers we can corrupt. */
13892 regs_available_for_popping = 0;
13894 /* If returning via __builtin_eh_return, the bottom three registers
13895 all contain information needed for the return. */
13896 if (current_function_calls_eh_return)
13900 /* If we can deduce the registers used from the function's
13901 return value. This is more reliable that examining
13902 regs_ever_live[] because that will be set if the register is
13903 ever used in the function, not just if the register is used
13904 to hold a return value. */
13906 if (current_function_return_rtx != 0)
13907 mode = GET_MODE (current_function_return_rtx);
13909 mode = DECL_MODE (DECL_RESULT (current_function_decl));
13911 size = GET_MODE_SIZE (mode);
13915 /* In a void function we can use any argument register.
13916 In a function that returns a structure on the stack
13917 we can use the second and third argument registers. */
13918 if (mode == VOIDmode)
13919 regs_available_for_popping =
13920 (1 << ARG_REGISTER (1))
13921 | (1 << ARG_REGISTER (2))
13922 | (1 << ARG_REGISTER (3));
13924 regs_available_for_popping =
13925 (1 << ARG_REGISTER (2))
13926 | (1 << ARG_REGISTER (3));
13928 else if (size <= 4)
13929 regs_available_for_popping =
13930 (1 << ARG_REGISTER (2))
13931 | (1 << ARG_REGISTER (3));
13932 else if (size <= 8)
13933 regs_available_for_popping =
13934 (1 << ARG_REGISTER (3));
13937 /* Match registers to be popped with registers into which we pop them. */
13938 for (available = regs_available_for_popping,
13939 required = regs_to_pop;
13940 required != 0 && available != 0;
13941 available &= ~(available & - available),
13942 required &= ~(required & - required))
13945 /* If we have any popping registers left over, remove them. */
13947 regs_available_for_popping &= ~available;
13949 /* Otherwise if we need another popping register we can use
13950 the fourth argument register. */
13951 else if (pops_needed)
13953 /* If we have not found any free argument registers and
13954 reg a4 contains the return address, we must move it. */
13955 if (regs_available_for_popping == 0
13956 && reg_containing_return_addr == LAST_ARG_REGNUM)
13958 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
13959 reg_containing_return_addr = LR_REGNUM;
13961 else if (size > 12)
13963 /* Register a4 is being used to hold part of the return value,
13964 but we have dire need of a free, low register. */
13967 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
13970 if (reg_containing_return_addr != LAST_ARG_REGNUM)
13972 /* The fourth argument register is available. */
13973 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
13979 /* Pop as many registers as we can. */
13980 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
13981 regs_available_for_popping);
13983 /* Process the registers we popped. */
13984 if (reg_containing_return_addr == -1)
13986 /* The return address was popped into the lowest numbered register. */
13987 regs_to_pop &= ~(1 << LR_REGNUM);
13989 reg_containing_return_addr =
13990 number_of_first_bit_set (regs_available_for_popping);
13992 /* Remove this register for the mask of available registers, so that
13993 the return address will not be corrupted by further pops. */
13994 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
13997 /* If we popped other registers then handle them here. */
13998 if (regs_available_for_popping)
14002 /* Work out which register currently contains the frame pointer. */
14003 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
14005 /* Move it into the correct place. */
14006 asm_fprintf (f, "\tmov\t%r, %r\n",
14007 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
14009 /* (Temporarily) remove it from the mask of popped registers. */
14010 regs_available_for_popping &= ~(1 << frame_pointer);
14011 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
14013 if (regs_available_for_popping)
14017 /* We popped the stack pointer as well,
14018 find the register that contains it. */
14019 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
14021 /* Move it into the stack register. */
14022 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
14024 /* At this point we have popped all necessary registers, so
14025 do not worry about restoring regs_available_for_popping
14026 to its correct value:
14028 assert (pops_needed == 0)
14029 assert (regs_available_for_popping == (1 << frame_pointer))
14030 assert (regs_to_pop == (1 << STACK_POINTER)) */
14034 /* Since we have just move the popped value into the frame
14035 pointer, the popping register is available for reuse, and
14036 we know that we still have the stack pointer left to pop. */
14037 regs_available_for_popping |= (1 << frame_pointer);
14041 /* If we still have registers left on the stack, but we no longer have
14042 any registers into which we can pop them, then we must move the return
14043 address into the link register and make available the register that
14045 if (regs_available_for_popping == 0 && pops_needed > 0)
14047 regs_available_for_popping |= 1 << reg_containing_return_addr;
14049 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
14050 reg_containing_return_addr);
14052 reg_containing_return_addr = LR_REGNUM;
14055 /* If we have registers left on the stack then pop some more.
14056 We know that at most we will want to pop FP and SP. */
14057 if (pops_needed > 0)
14062 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
14063 regs_available_for_popping);
14065 /* We have popped either FP or SP.
14066 Move whichever one it is into the correct register. */
14067 popped_into = number_of_first_bit_set (regs_available_for_popping);
14068 move_to = number_of_first_bit_set (regs_to_pop);
14070 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
14072 regs_to_pop &= ~(1 << move_to);
14077 /* If we still have not popped everything then we must have only
14078 had one register available to us and we are now popping the SP. */
14079 if (pops_needed > 0)
14083 thumb_pushpop (f, regs_available_for_popping, FALSE, NULL,
14084 regs_available_for_popping);
14086 popped_into = number_of_first_bit_set (regs_available_for_popping);
14088 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
14090 assert (regs_to_pop == (1 << STACK_POINTER))
14091 assert (pops_needed == 1)
14095 /* If necessary restore the a4 register. */
14098 if (reg_containing_return_addr != LR_REGNUM)
14100 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
14101 reg_containing_return_addr = LR_REGNUM;
14104 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
14107 if (current_function_calls_eh_return)
14108 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
14110 /* Return to caller. */
14111 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
14116 thumb1_final_prescan_insn (rtx insn)
14118 if (flag_print_asm_name)
14119 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
14120 INSN_ADDRESSES (INSN_UID (insn)));
14124 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
14126 unsigned HOST_WIDE_INT mask = 0xff;
14129 if (val == 0) /* XXX */
14132 for (i = 0; i < 25; i++)
14133 if ((val & (mask << i)) == val)
14139 /* Returns nonzero if the current function contains,
14140 or might contain a far jump. */
14142 thumb_far_jump_used_p (void)
14146 /* This test is only important for leaf functions. */
14147 /* assert (!leaf_function_p ()); */
14149 /* If we have already decided that far jumps may be used,
14150 do not bother checking again, and always return true even if
14151 it turns out that they are not being used. Once we have made
14152 the decision that far jumps are present (and that hence the link
14153 register will be pushed onto the stack) we cannot go back on it. */
14154 if (cfun->machine->far_jump_used)
14157 /* If this function is not being called from the prologue/epilogue
14158 generation code then it must be being called from the
14159 INITIAL_ELIMINATION_OFFSET macro. */
14160 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
14162 /* In this case we know that we are being asked about the elimination
14163 of the arg pointer register. If that register is not being used,
14164 then there are no arguments on the stack, and we do not have to
14165 worry that a far jump might force the prologue to push the link
14166 register, changing the stack offsets. In this case we can just
14167 return false, since the presence of far jumps in the function will
14168 not affect stack offsets.
14170 If the arg pointer is live (or if it was live, but has now been
14171 eliminated and so set to dead) then we do have to test to see if
14172 the function might contain a far jump. This test can lead to some
14173 false negatives, since before reload is completed, then length of
14174 branch instructions is not known, so gcc defaults to returning their
14175 longest length, which in turn sets the far jump attribute to true.
14177 A false negative will not result in bad code being generated, but it
14178 will result in a needless push and pop of the link register. We
14179 hope that this does not occur too often.
14181 If we need doubleword stack alignment this could affect the other
14182 elimination offsets so we can't risk getting it wrong. */
14183 if (regs_ever_live [ARG_POINTER_REGNUM])
14184 cfun->machine->arg_pointer_live = 1;
14185 else if (!cfun->machine->arg_pointer_live)
14189 /* Check to see if the function contains a branch
14190 insn with the far jump attribute set. */
14191 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
14193 if (GET_CODE (insn) == JUMP_INSN
14194 /* Ignore tablejump patterns. */
14195 && GET_CODE (PATTERN (insn)) != ADDR_VEC
14196 && GET_CODE (PATTERN (insn)) != ADDR_DIFF_VEC
14197 && get_attr_far_jump (insn) == FAR_JUMP_YES
14200 /* Record the fact that we have decided that
14201 the function does use far jumps. */
14202 cfun->machine->far_jump_used = 1;
14210 /* Return nonzero if FUNC must be entered in ARM mode. */
14212 is_called_in_ARM_mode (tree func)
14214 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
14216 /* Ignore the problem about functions whose address is taken. */
14217 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
14221 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
14227 /* The bits which aren't usefully expanded as rtl. */
14229 thumb_unexpanded_epilogue (void)
14232 unsigned long live_regs_mask = 0;
14233 int high_regs_pushed = 0;
14234 int had_to_push_lr;
14237 if (return_used_this_function)
14240 if (IS_NAKED (arm_current_func_type ()))
14243 live_regs_mask = thumb1_compute_save_reg_mask ();
14244 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
14246 /* If we can deduce the registers used from the function's return value.
14247 This is more reliable that examining regs_ever_live[] because that
14248 will be set if the register is ever used in the function, not just if
14249 the register is used to hold a return value. */
14250 size = arm_size_return_regs ();
14252 /* The prolog may have pushed some high registers to use as
14253 work registers. e.g. the testsuite file:
14254 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
14255 compiles to produce:
14256 push {r4, r5, r6, r7, lr}
14260 as part of the prolog. We have to undo that pushing here. */
14262 if (high_regs_pushed)
14264 unsigned long mask = live_regs_mask & 0xff;
14267 /* The available low registers depend on the size of the value we are
14275 /* Oh dear! We have no low registers into which we can pop
14278 ("no low registers available for popping high registers");
14280 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
14281 if (live_regs_mask & (1 << next_hi_reg))
14284 while (high_regs_pushed)
14286 /* Find lo register(s) into which the high register(s) can
14288 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
14290 if (mask & (1 << regno))
14291 high_regs_pushed--;
14292 if (high_regs_pushed == 0)
14296 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
14298 /* Pop the values into the low register(s). */
14299 thumb_pushpop (asm_out_file, mask, 0, NULL, mask);
14301 /* Move the value(s) into the high registers. */
14302 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
14304 if (mask & (1 << regno))
14306 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
14309 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
14310 if (live_regs_mask & (1 << next_hi_reg))
14315 live_regs_mask &= ~0x0f00;
14318 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
14319 live_regs_mask &= 0xff;
14321 if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
14323 /* Pop the return address into the PC. */
14324 if (had_to_push_lr)
14325 live_regs_mask |= 1 << PC_REGNUM;
14327 /* Either no argument registers were pushed or a backtrace
14328 structure was created which includes an adjusted stack
14329 pointer, so just pop everything. */
14330 if (live_regs_mask)
14331 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
14334 /* We have either just popped the return address into the
14335 PC or it is was kept in LR for the entire function. */
14336 if (!had_to_push_lr)
14337 thumb_exit (asm_out_file, LR_REGNUM);
14341 /* Pop everything but the return address. */
14342 if (live_regs_mask)
14343 thumb_pushpop (asm_out_file, live_regs_mask, FALSE, NULL,
14346 if (had_to_push_lr)
14350 /* We have no free low regs, so save one. */
14351 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
14355 /* Get the return address into a temporary register. */
14356 thumb_pushpop (asm_out_file, 1 << LAST_ARG_REGNUM, 0, NULL,
14357 1 << LAST_ARG_REGNUM);
14361 /* Move the return address to lr. */
14362 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
14364 /* Restore the low register. */
14365 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
14370 regno = LAST_ARG_REGNUM;
14375 /* Remove the argument registers that were pushed onto the stack. */
14376 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
14377 SP_REGNUM, SP_REGNUM,
14378 current_function_pretend_args_size);
14380 thumb_exit (asm_out_file, regno);
14386 /* Functions to save and restore machine-specific function data. */
14387 static struct machine_function *
14388 arm_init_machine_status (void)
14390 struct machine_function *machine;
14391 machine = (machine_function *) ggc_alloc_cleared (sizeof (machine_function));
14393 #if ARM_FT_UNKNOWN != 0
14394 machine->func_type = ARM_FT_UNKNOWN;
14399 /* Return an RTX indicating where the return address to the
14400 calling function can be found. */
14402 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
14407 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
14410 /* Do anything needed before RTL is emitted for each function. */
14412 arm_init_expanders (void)
14414 /* Arrange to initialize and mark the machine per-function status. */
14415 init_machine_status = arm_init_machine_status;
14417 /* This is to stop the combine pass optimizing away the alignment
14418 adjustment of va_arg. */
14419 /* ??? It is claimed that this should not be necessary. */
14421 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
14425 /* Like arm_compute_initial_elimination offset. Simpler because there
14426 isn't an ABI specified frame pointer for Thumb. Instead, we set it
14427 to point at the base of the local variables after static stack
14428 space for a function has been allocated. */
14431 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
14433 arm_stack_offsets *offsets;
14435 offsets = arm_get_frame_offsets ();
14439 case ARG_POINTER_REGNUM:
14442 case STACK_POINTER_REGNUM:
14443 return offsets->outgoing_args - offsets->saved_args;
14445 case FRAME_POINTER_REGNUM:
14446 return offsets->soft_frame - offsets->saved_args;
14448 case ARM_HARD_FRAME_POINTER_REGNUM:
14449 return offsets->saved_regs - offsets->saved_args;
14451 case THUMB_HARD_FRAME_POINTER_REGNUM:
14452 return offsets->locals_base - offsets->saved_args;
14455 gcc_unreachable ();
14459 case FRAME_POINTER_REGNUM:
14462 case STACK_POINTER_REGNUM:
14463 return offsets->outgoing_args - offsets->soft_frame;
14465 case ARM_HARD_FRAME_POINTER_REGNUM:
14466 return offsets->saved_regs - offsets->soft_frame;
14468 case THUMB_HARD_FRAME_POINTER_REGNUM:
14469 return offsets->locals_base - offsets->soft_frame;
14472 gcc_unreachable ();
14477 gcc_unreachable ();
14481 /* Generate the rest of a function's prologue. */
14483 thumb1_expand_prologue (void)
14487 HOST_WIDE_INT amount;
14488 arm_stack_offsets *offsets;
14489 unsigned long func_type;
14491 unsigned long live_regs_mask;
14493 func_type = arm_current_func_type ();
14495 /* Naked functions don't have prologues. */
14496 if (IS_NAKED (func_type))
14499 if (IS_INTERRUPT (func_type))
14501 error ("interrupt Service Routines cannot be coded in Thumb mode");
14505 live_regs_mask = thumb1_compute_save_reg_mask ();
14506 /* Load the pic register before setting the frame pointer,
14507 so we can use r7 as a temporary work register. */
14508 if (flag_pic && arm_pic_register != INVALID_REGNUM)
14509 arm_load_pic_register (live_regs_mask);
14511 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
14512 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
14513 stack_pointer_rtx);
14515 offsets = arm_get_frame_offsets ();
14516 amount = offsets->outgoing_args - offsets->saved_regs;
14521 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
14522 GEN_INT (- amount)));
14523 RTX_FRAME_RELATED_P (insn) = 1;
14529 /* The stack decrement is too big for an immediate value in a single
14530 insn. In theory we could issue multiple subtracts, but after
14531 three of them it becomes more space efficient to place the full
14532 value in the constant pool and load into a register. (Also the
14533 ARM debugger really likes to see only one stack decrement per
14534 function). So instead we look for a scratch register into which
14535 we can load the decrement, and then we subtract this from the
14536 stack pointer. Unfortunately on the thumb the only available
14537 scratch registers are the argument registers, and we cannot use
14538 these as they may hold arguments to the function. Instead we
14539 attempt to locate a call preserved register which is used by this
14540 function. If we can find one, then we know that it will have
14541 been pushed at the start of the prologue and so we can corrupt
14543 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
14544 if (live_regs_mask & (1 << regno)
14545 && !(frame_pointer_needed
14546 && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
14549 if (regno > LAST_LO_REGNUM) /* Very unlikely. */
14551 rtx spare = gen_rtx_REG (SImode, IP_REGNUM);
14553 /* Choose an arbitrary, non-argument low register. */
14554 reg = gen_rtx_REG (SImode, LAST_LO_REGNUM);
14556 /* Save it by copying it into a high, scratch register. */
14557 emit_insn (gen_movsi (spare, reg));
14558 /* Add a USE to stop propagate_one_insn() from barfing. */
14559 emit_insn (gen_prologue_use (spare));
14561 /* Decrement the stack. */
14562 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
14563 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
14564 stack_pointer_rtx, reg));
14565 RTX_FRAME_RELATED_P (insn) = 1;
14566 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
14567 plus_constant (stack_pointer_rtx,
14569 RTX_FRAME_RELATED_P (dwarf) = 1;
14571 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
14574 /* Restore the low register's original value. */
14575 emit_insn (gen_movsi (reg, spare));
14577 /* Emit a USE of the restored scratch register, so that flow
14578 analysis will not consider the restore redundant. The
14579 register won't be used again in this function and isn't
14580 restored by the epilogue. */
14581 emit_insn (gen_prologue_use (reg));
14585 reg = gen_rtx_REG (SImode, regno);
14587 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
14589 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
14590 stack_pointer_rtx, reg));
14591 RTX_FRAME_RELATED_P (insn) = 1;
14592 dwarf = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
14593 plus_constant (stack_pointer_rtx,
14595 RTX_FRAME_RELATED_P (dwarf) = 1;
14597 = gen_rtx_EXPR_LIST (REG_FRAME_RELATED_EXPR, dwarf,
14603 if (frame_pointer_needed)
14604 thumb_set_frame_pointer (offsets);
14606 /* If we are profiling, make sure no instructions are scheduled before
14607 the call to mcount. Similarly if the user has requested no
14608 scheduling in the prolog. Similarly if we want non-call exceptions
14609 using the EABI unwinder, to prevent faulting instructions from being
14610 swapped with a stack adjustment. */
14611 if (current_function_profile || !TARGET_SCHED_PROLOG
14612 || (ARM_EABI_UNWIND_TABLES && flag_non_call_exceptions))
14613 emit_insn (gen_blockage ());
14615 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
14616 if (live_regs_mask & 0xff)
14617 cfun->machine->lr_save_eliminated = 0;
14619 /* If the link register is being kept alive, with the return address in it,
14620 then make sure that it does not get reused by the ce2 pass. */
14621 if (cfun->machine->lr_save_eliminated)
14622 emit_insn (gen_prologue_use (gen_rtx_REG (SImode, LR_REGNUM)));
14627 thumb1_expand_epilogue (void)
14629 HOST_WIDE_INT amount;
14630 arm_stack_offsets *offsets;
14633 /* Naked functions don't have prologues. */
14634 if (IS_NAKED (arm_current_func_type ()))
14637 offsets = arm_get_frame_offsets ();
14638 amount = offsets->outgoing_args - offsets->saved_regs;
14640 if (frame_pointer_needed)
14642 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
14643 amount = offsets->locals_base - offsets->saved_regs;
14649 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
14650 GEN_INT (amount)));
14653 /* r3 is always free in the epilogue. */
14654 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
14656 emit_insn (gen_movsi (reg, GEN_INT (amount)));
14657 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
14661 /* Emit a USE (stack_pointer_rtx), so that
14662 the stack adjustment will not be deleted. */
14663 emit_insn (gen_prologue_use (stack_pointer_rtx));
14665 if (current_function_profile || !TARGET_SCHED_PROLOG)
14666 emit_insn (gen_blockage ());
14668 /* Emit a clobber for each insn that will be restored in the epilogue,
14669 so that flow2 will get register lifetimes correct. */
14670 for (regno = 0; regno < 13; regno++)
14671 if (regs_ever_live[regno] && !call_used_regs[regno])
14672 emit_insn (gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, regno)));
14674 if (! regs_ever_live[LR_REGNUM])
14675 emit_insn (gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, LR_REGNUM)));
14679 thumb1_output_function_prologue (FILE *f, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
14681 unsigned long live_regs_mask = 0;
14682 unsigned long l_mask;
14683 unsigned high_regs_pushed = 0;
14684 int cfa_offset = 0;
14687 if (IS_NAKED (arm_current_func_type ()))
14690 if (is_called_in_ARM_mode (current_function_decl))
14694 gcc_assert (GET_CODE (DECL_RTL (current_function_decl)) == MEM);
14695 gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
14697 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
14699 /* Generate code sequence to switch us into Thumb mode. */
14700 /* The .code 32 directive has already been emitted by
14701 ASM_DECLARE_FUNCTION_NAME. */
14702 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
14703 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
14705 /* Generate a label, so that the debugger will notice the
14706 change in instruction sets. This label is also used by
14707 the assembler to bypass the ARM code when this function
14708 is called from a Thumb encoded function elsewhere in the
14709 same file. Hence the definition of STUB_NAME here must
14710 agree with the definition in gas/config/tc-arm.c. */
14712 #define STUB_NAME ".real_start_of"
14714 fprintf (f, "\t.code\t16\n");
14716 if (arm_dllexport_name_p (name))
14717 name = arm_strip_name_encoding (name);
14719 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
14720 fprintf (f, "\t.thumb_func\n");
14721 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
14724 if (current_function_pretend_args_size)
14726 /* Output unwind directive for the stack adjustment. */
14727 if (ARM_EABI_UNWIND_TABLES)
14728 fprintf (f, "\t.pad #%d\n",
14729 current_function_pretend_args_size);
14731 if (cfun->machine->uses_anonymous_args)
14735 fprintf (f, "\tpush\t{");
14737 num_pushes = ARM_NUM_INTS (current_function_pretend_args_size);
14739 for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
14740 regno <= LAST_ARG_REGNUM;
14742 asm_fprintf (f, "%r%s", regno,
14743 regno == LAST_ARG_REGNUM ? "" : ", ");
14745 fprintf (f, "}\n");
14748 asm_fprintf (f, "\tsub\t%r, %r, #%d\n",
14749 SP_REGNUM, SP_REGNUM,
14750 current_function_pretend_args_size);
14752 /* We don't need to record the stores for unwinding (would it
14753 help the debugger any if we did?), but record the change in
14754 the stack pointer. */
14755 if (dwarf2out_do_frame ())
14757 char *l = dwarf2out_cfi_label ();
14759 cfa_offset = cfa_offset + current_function_pretend_args_size;
14760 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
14764 /* Get the registers we are going to push. */
14765 live_regs_mask = thumb1_compute_save_reg_mask ();
14766 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
14767 l_mask = live_regs_mask & 0x40ff;
14768 /* Then count how many other high registers will need to be pushed. */
14769 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
14771 if (TARGET_BACKTRACE)
14774 unsigned work_register;
14776 /* We have been asked to create a stack backtrace structure.
14777 The code looks like this:
14781 0 sub SP, #16 Reserve space for 4 registers.
14782 2 push {R7} Push low registers.
14783 4 add R7, SP, #20 Get the stack pointer before the push.
14784 6 str R7, [SP, #8] Store the stack pointer (before reserving the space).
14785 8 mov R7, PC Get hold of the start of this code plus 12.
14786 10 str R7, [SP, #16] Store it.
14787 12 mov R7, FP Get hold of the current frame pointer.
14788 14 str R7, [SP, #4] Store it.
14789 16 mov R7, LR Get hold of the current return address.
14790 18 str R7, [SP, #12] Store it.
14791 20 add R7, SP, #16 Point at the start of the backtrace structure.
14792 22 mov FP, R7 Put this value into the frame pointer. */
14794 work_register = thumb_find_work_register (live_regs_mask);
14796 if (ARM_EABI_UNWIND_TABLES)
14797 asm_fprintf (f, "\t.pad #16\n");
14800 (f, "\tsub\t%r, %r, #16\t%@ Create stack backtrace structure\n",
14801 SP_REGNUM, SP_REGNUM);
14803 if (dwarf2out_do_frame ())
14805 char *l = dwarf2out_cfi_label ();
14807 cfa_offset = cfa_offset + 16;
14808 dwarf2out_def_cfa (l, SP_REGNUM, cfa_offset);
14813 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
14814 offset = bit_count (l_mask) * UNITS_PER_WORD;
14819 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
14820 offset + 16 + current_function_pretend_args_size);
14822 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14825 /* Make sure that the instruction fetching the PC is in the right place
14826 to calculate "start of backtrace creation code + 12". */
14829 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
14830 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14832 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
14833 ARM_HARD_FRAME_POINTER_REGNUM);
14834 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14839 asm_fprintf (f, "\tmov\t%r, %r\n", work_register,
14840 ARM_HARD_FRAME_POINTER_REGNUM);
14841 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14843 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, PC_REGNUM);
14844 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14848 asm_fprintf (f, "\tmov\t%r, %r\n", work_register, LR_REGNUM);
14849 asm_fprintf (f, "\tstr\t%r, [%r, #%d]\n", work_register, SP_REGNUM,
14851 asm_fprintf (f, "\tadd\t%r, %r, #%d\n", work_register, SP_REGNUM,
14853 asm_fprintf (f, "\tmov\t%r, %r\t\t%@ Backtrace structure created\n",
14854 ARM_HARD_FRAME_POINTER_REGNUM, work_register);
14856 /* Optimization: If we are not pushing any low registers but we are going
14857 to push some high registers then delay our first push. This will just
14858 be a push of LR and we can combine it with the push of the first high
14860 else if ((l_mask & 0xff) != 0
14861 || (high_regs_pushed == 0 && l_mask))
14862 thumb_pushpop (f, l_mask, 1, &cfa_offset, l_mask);
14864 if (high_regs_pushed)
14866 unsigned pushable_regs;
14867 unsigned next_hi_reg;
14869 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
14870 if (live_regs_mask & (1 << next_hi_reg))
14873 pushable_regs = l_mask & 0xff;
14875 if (pushable_regs == 0)
14876 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
14878 while (high_regs_pushed > 0)
14880 unsigned long real_regs_mask = 0;
14882 for (regno = LAST_LO_REGNUM; regno >= 0; regno --)
14884 if (pushable_regs & (1 << regno))
14886 asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
14888 high_regs_pushed --;
14889 real_regs_mask |= (1 << next_hi_reg);
14891 if (high_regs_pushed)
14893 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
14895 if (live_regs_mask & (1 << next_hi_reg))
14900 pushable_regs &= ~((1 << regno) - 1);
14906 /* If we had to find a work register and we have not yet
14907 saved the LR then add it to the list of regs to push. */
14908 if (l_mask == (1 << LR_REGNUM))
14910 thumb_pushpop (f, pushable_regs | (1 << LR_REGNUM),
14912 real_regs_mask | (1 << LR_REGNUM));
14916 thumb_pushpop (f, pushable_regs, 1, &cfa_offset, real_regs_mask);
14921 /* Handle the case of a double word load into a low register from
14922 a computed memory address. The computed address may involve a
14923 register which is overwritten by the load. */
14925 thumb_load_double_from_address (rtx *operands)
14933 gcc_assert (GET_CODE (operands[0]) == REG);
14934 gcc_assert (GET_CODE (operands[1]) == MEM);
14936 /* Get the memory address. */
14937 addr = XEXP (operands[1], 0);
14939 /* Work out how the memory address is computed. */
14940 switch (GET_CODE (addr))
14943 operands[2] = adjust_address (operands[1], SImode, 4);
14945 if (REGNO (operands[0]) == REGNO (addr))
14947 output_asm_insn ("ldr\t%H0, %2", operands);
14948 output_asm_insn ("ldr\t%0, %1", operands);
14952 output_asm_insn ("ldr\t%0, %1", operands);
14953 output_asm_insn ("ldr\t%H0, %2", operands);
14958 /* Compute <address> + 4 for the high order load. */
14959 operands[2] = adjust_address (operands[1], SImode, 4);
14961 output_asm_insn ("ldr\t%0, %1", operands);
14962 output_asm_insn ("ldr\t%H0, %2", operands);
14966 arg1 = XEXP (addr, 0);
14967 arg2 = XEXP (addr, 1);
14969 if (CONSTANT_P (arg1))
14970 base = arg2, offset = arg1;
14972 base = arg1, offset = arg2;
14974 gcc_assert (GET_CODE (base) == REG);
14976 /* Catch the case of <address> = <reg> + <reg> */
14977 if (GET_CODE (offset) == REG)
14979 int reg_offset = REGNO (offset);
14980 int reg_base = REGNO (base);
14981 int reg_dest = REGNO (operands[0]);
14983 /* Add the base and offset registers together into the
14984 higher destination register. */
14985 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
14986 reg_dest + 1, reg_base, reg_offset);
14988 /* Load the lower destination register from the address in
14989 the higher destination register. */
14990 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
14991 reg_dest, reg_dest + 1);
14993 /* Load the higher destination register from its own address
14995 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
14996 reg_dest + 1, reg_dest + 1);
15000 /* Compute <address> + 4 for the high order load. */
15001 operands[2] = adjust_address (operands[1], SImode, 4);
15003 /* If the computed address is held in the low order register
15004 then load the high order register first, otherwise always
15005 load the low order register first. */
15006 if (REGNO (operands[0]) == REGNO (base))
15008 output_asm_insn ("ldr\t%H0, %2", operands);
15009 output_asm_insn ("ldr\t%0, %1", operands);
15013 output_asm_insn ("ldr\t%0, %1", operands);
15014 output_asm_insn ("ldr\t%H0, %2", operands);
15020 /* With no registers to worry about we can just load the value
15022 operands[2] = adjust_address (operands[1], SImode, 4);
15024 output_asm_insn ("ldr\t%H0, %2", operands);
15025 output_asm_insn ("ldr\t%0, %1", operands);
15029 gcc_unreachable ();
15036 thumb_output_move_mem_multiple (int n, rtx *operands)
15043 if (REGNO (operands[4]) > REGNO (operands[5]))
15046 operands[4] = operands[5];
15049 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
15050 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
15054 if (REGNO (operands[4]) > REGNO (operands[5]))
15057 operands[4] = operands[5];
15060 if (REGNO (operands[5]) > REGNO (operands[6]))
15063 operands[5] = operands[6];
15066 if (REGNO (operands[4]) > REGNO (operands[5]))
15069 operands[4] = operands[5];
15073 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
15074 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
15078 gcc_unreachable ();
15084 /* Output a call-via instruction for thumb state. */
15086 thumb_call_via_reg (rtx reg)
15088 int regno = REGNO (reg);
15091 gcc_assert (regno < LR_REGNUM);
15093 /* If we are in the normal text section we can use a single instance
15094 per compilation unit. If we are doing function sections, then we need
15095 an entry per section, since we can't rely on reachability. */
15096 if (in_section == text_section)
15098 thumb_call_reg_needed = 1;
15100 if (thumb_call_via_label[regno] == NULL)
15101 thumb_call_via_label[regno] = gen_label_rtx ();
15102 labelp = thumb_call_via_label + regno;
15106 if (cfun->machine->call_via[regno] == NULL)
15107 cfun->machine->call_via[regno] = gen_label_rtx ();
15108 labelp = cfun->machine->call_via + regno;
15111 output_asm_insn ("bl\t%a0", labelp);
15115 /* Routines for generating rtl. */
15117 thumb_expand_movmemqi (rtx *operands)
15119 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
15120 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
15121 HOST_WIDE_INT len = INTVAL (operands[2]);
15122 HOST_WIDE_INT offset = 0;
15126 emit_insn (gen_movmem12b (out, in, out, in));
15132 emit_insn (gen_movmem8b (out, in, out, in));
15138 rtx reg = gen_reg_rtx (SImode);
15139 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
15140 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
15147 rtx reg = gen_reg_rtx (HImode);
15148 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
15149 plus_constant (in, offset))));
15150 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (out, offset)),
15158 rtx reg = gen_reg_rtx (QImode);
15159 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
15160 plus_constant (in, offset))));
15161 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (out, offset)),
15167 thumb_reload_out_hi (rtx *operands)
15169 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
15172 /* Handle reading a half-word from memory during reload. */
15174 thumb_reload_in_hi (rtx *operands ATTRIBUTE_UNUSED)
15176 gcc_unreachable ();
15179 /* Return the length of a function name prefix
15180 that starts with the character 'c'. */
15182 arm_get_strip_length (int c)
15186 ARM_NAME_ENCODING_LENGTHS
15191 /* Return a pointer to a function's name with any
15192 and all prefix encodings stripped from it. */
15194 arm_strip_name_encoding (const char *name)
15198 while ((skip = arm_get_strip_length (* name)))
15204 /* If there is a '*' anywhere in the name's prefix, then
15205 emit the stripped name verbatim, otherwise prepend an
15206 underscore if leading underscores are being used. */
15208 arm_asm_output_labelref (FILE *stream, const char *name)
15213 while ((skip = arm_get_strip_length (* name)))
15215 verbatim |= (*name == '*');
15220 fputs (name, stream);
15222 asm_fprintf (stream, "%U%s", name);
15226 arm_file_start (void)
15230 if (TARGET_UNIFIED_ASM)
15231 asm_fprintf (asm_out_file, "\t.syntax unified\n");
15235 const char *fpu_name;
15236 if (arm_select[0].string)
15237 asm_fprintf (asm_out_file, "\t.cpu %s\n", arm_select[0].string);
15238 else if (arm_select[1].string)
15239 asm_fprintf (asm_out_file, "\t.arch %s\n", arm_select[1].string);
15241 asm_fprintf (asm_out_file, "\t.cpu %s\n",
15242 all_cores[arm_default_cpu].name);
15244 if (TARGET_SOFT_FLOAT)
15247 fpu_name = "softvfp";
15249 fpu_name = "softfpa";
15253 switch (arm_fpu_arch)
15258 case FPUTYPE_FPA_EMU2:
15261 case FPUTYPE_FPA_EMU3:
15264 case FPUTYPE_MAVERICK:
15265 fpu_name = "maverick";
15268 if (TARGET_HARD_FLOAT)
15269 asm_fprintf (asm_out_file, "\t.eabi_attribute 27, 3\n");
15270 if (TARGET_HARD_FLOAT_ABI)
15271 asm_fprintf (asm_out_file, "\t.eabi_attribute 28, 1\n");
15278 asm_fprintf (asm_out_file, "\t.fpu %s\n", fpu_name);
15280 /* Some of these attributes only apply when the corresponding features
15281 are used. However we don't have any easy way of figuring this out.
15282 Conservatively record the setting that would have been used. */
15284 /* Tag_ABI_PCS_wchar_t. */
15285 asm_fprintf (asm_out_file, "\t.eabi_attribute 18, %d\n",
15286 (int)WCHAR_TYPE_SIZE / BITS_PER_UNIT);
15288 /* Tag_ABI_FP_rounding. */
15289 if (flag_rounding_math)
15290 asm_fprintf (asm_out_file, "\t.eabi_attribute 19, 1\n");
15291 if (!flag_unsafe_math_optimizations)
15293 /* Tag_ABI_FP_denomal. */
15294 asm_fprintf (asm_out_file, "\t.eabi_attribute 20, 1\n");
15295 /* Tag_ABI_FP_exceptions. */
15296 asm_fprintf (asm_out_file, "\t.eabi_attribute 21, 1\n");
15298 /* Tag_ABI_FP_user_exceptions. */
15299 if (flag_signaling_nans)
15300 asm_fprintf (asm_out_file, "\t.eabi_attribute 22, 1\n");
15301 /* Tag_ABI_FP_number_model. */
15302 asm_fprintf (asm_out_file, "\t.eabi_attribute 23, %d\n",
15303 flag_finite_math_only ? 1 : 3);
15305 /* Tag_ABI_align8_needed. */
15306 asm_fprintf (asm_out_file, "\t.eabi_attribute 24, 1\n");
15307 /* Tag_ABI_align8_preserved. */
15308 asm_fprintf (asm_out_file, "\t.eabi_attribute 25, 1\n");
15309 /* Tag_ABI_enum_size. */
15310 asm_fprintf (asm_out_file, "\t.eabi_attribute 26, %d\n",
15311 flag_short_enums ? 1 : 2);
15313 /* Tag_ABI_optimization_goals. */
15316 else if (optimize >= 2)
15322 asm_fprintf (asm_out_file, "\t.eabi_attribute 30, %d\n", val);
15324 default_file_start();
15328 arm_file_end (void)
15332 if (NEED_INDICATE_EXEC_STACK)
15333 /* Add .note.GNU-stack. */
15334 file_end_indicate_exec_stack ();
15336 if (! thumb_call_reg_needed)
15339 switch_to_section (text_section);
15340 asm_fprintf (asm_out_file, "\t.code 16\n");
15341 ASM_OUTPUT_ALIGN (asm_out_file, 1);
15343 for (regno = 0; regno < LR_REGNUM; regno++)
15345 rtx label = thumb_call_via_label[regno];
15349 targetm.asm_out.internal_label (asm_out_file, "L",
15350 CODE_LABEL_NUMBER (label));
15351 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
15358 #ifdef AOF_ASSEMBLER
15359 /* Special functions only needed when producing AOF syntax assembler. */
15363 struct pic_chain * next;
15364 const char * symname;
15367 static struct pic_chain * aof_pic_chain = NULL;
15370 aof_pic_entry (rtx x)
15372 struct pic_chain ** chainp;
15375 if (aof_pic_label == NULL_RTX)
15377 aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
15380 for (offset = 0, chainp = &aof_pic_chain; *chainp;
15381 offset += 4, chainp = &(*chainp)->next)
15382 if ((*chainp)->symname == XSTR (x, 0))
15383 return plus_constant (aof_pic_label, offset);
15385 *chainp = (struct pic_chain *) xmalloc (sizeof (struct pic_chain));
15386 (*chainp)->next = NULL;
15387 (*chainp)->symname = XSTR (x, 0);
15388 return plus_constant (aof_pic_label, offset);
15392 aof_dump_pic_table (FILE *f)
15394 struct pic_chain * chain;
15396 if (aof_pic_chain == NULL)
15399 asm_fprintf (f, "\tAREA |%r$$adcons|, BASED %r\n",
15400 PIC_OFFSET_TABLE_REGNUM,
15401 PIC_OFFSET_TABLE_REGNUM);
15402 fputs ("|x$adcons|\n", f);
15404 for (chain = aof_pic_chain; chain; chain = chain->next)
15406 fputs ("\tDCD\t", f);
15407 assemble_name (f, chain->symname);
15412 int arm_text_section_count = 1;
15414 /* A get_unnamed_section callback for switching to the text section. */
15417 aof_output_text_section_asm_op (const void *data ATTRIBUTE_UNUSED)
15419 fprintf (asm_out_file, "\tAREA |C$$code%d|, CODE, READONLY",
15420 arm_text_section_count++);
15422 fprintf (asm_out_file, ", PIC, REENTRANT");
15423 fprintf (asm_out_file, "\n");
15426 static int arm_data_section_count = 1;
15428 /* A get_unnamed_section callback for switching to the data section. */
15431 aof_output_data_section_asm_op (const void *data ATTRIBUTE_UNUSED)
15433 fprintf (asm_out_file, "\tAREA |C$$data%d|, DATA\n",
15434 arm_data_section_count++);
15437 /* Implement TARGET_ASM_INIT_SECTIONS.
15439 AOF Assembler syntax is a nightmare when it comes to areas, since once
15440 we change from one area to another, we can't go back again. Instead,
15441 we must create a new area with the same attributes and add the new output
15442 to that. Unfortunately, there is nothing we can do here to guarantee that
15443 two areas with the same attributes will be linked adjacently in the
15444 resulting executable, so we have to be careful not to do pc-relative
15445 addressing across such boundaries. */
15448 aof_asm_init_sections (void)
15450 text_section = get_unnamed_section (SECTION_CODE,
15451 aof_output_text_section_asm_op, NULL);
15452 data_section = get_unnamed_section (SECTION_WRITE,
15453 aof_output_data_section_asm_op, NULL);
15454 readonly_data_section = text_section;
15458 zero_init_section (void)
15460 static int zero_init_count = 1;
15462 fprintf (asm_out_file, "\tAREA |C$$zidata%d|,NOINIT\n", zero_init_count++);
15466 /* The AOF assembler is religiously strict about declarations of
15467 imported and exported symbols, so that it is impossible to declare
15468 a function as imported near the beginning of the file, and then to
15469 export it later on. It is, however, possible to delay the decision
15470 until all the functions in the file have been compiled. To get
15471 around this, we maintain a list of the imports and exports, and
15472 delete from it any that are subsequently defined. At the end of
15473 compilation we spit the remainder of the list out before the END
15478 struct import * next;
15482 static struct import * imports_list = NULL;
15485 aof_add_import (const char *name)
15487 struct import * new;
15489 for (new = imports_list; new; new = new->next)
15490 if (new->name == name)
15493 new = (struct import *) xmalloc (sizeof (struct import));
15494 new->next = imports_list;
15495 imports_list = new;
15500 aof_delete_import (const char *name)
15502 struct import ** old;
15504 for (old = &imports_list; *old; old = & (*old)->next)
15506 if ((*old)->name == name)
15508 *old = (*old)->next;
15514 int arm_main_function = 0;
15517 aof_dump_imports (FILE *f)
15519 /* The AOF assembler needs this to cause the startup code to be extracted
15520 from the library. Brining in __main causes the whole thing to work
15522 if (arm_main_function)
15524 switch_to_section (text_section);
15525 fputs ("\tIMPORT __main\n", f);
15526 fputs ("\tDCD __main\n", f);
15529 /* Now dump the remaining imports. */
15530 while (imports_list)
15532 fprintf (f, "\tIMPORT\t");
15533 assemble_name (f, imports_list->name);
15535 imports_list = imports_list->next;
15540 aof_globalize_label (FILE *stream, const char *name)
15542 default_globalize_label (stream, name);
15543 if (! strcmp (name, "main"))
15544 arm_main_function = 1;
15548 aof_file_start (void)
15550 fputs ("__r0\tRN\t0\n", asm_out_file);
15551 fputs ("__a1\tRN\t0\n", asm_out_file);
15552 fputs ("__a2\tRN\t1\n", asm_out_file);
15553 fputs ("__a3\tRN\t2\n", asm_out_file);
15554 fputs ("__a4\tRN\t3\n", asm_out_file);
15555 fputs ("__v1\tRN\t4\n", asm_out_file);
15556 fputs ("__v2\tRN\t5\n", asm_out_file);
15557 fputs ("__v3\tRN\t6\n", asm_out_file);
15558 fputs ("__v4\tRN\t7\n", asm_out_file);
15559 fputs ("__v5\tRN\t8\n", asm_out_file);
15560 fputs ("__v6\tRN\t9\n", asm_out_file);
15561 fputs ("__sl\tRN\t10\n", asm_out_file);
15562 fputs ("__fp\tRN\t11\n", asm_out_file);
15563 fputs ("__ip\tRN\t12\n", asm_out_file);
15564 fputs ("__sp\tRN\t13\n", asm_out_file);
15565 fputs ("__lr\tRN\t14\n", asm_out_file);
15566 fputs ("__pc\tRN\t15\n", asm_out_file);
15567 fputs ("__f0\tFN\t0\n", asm_out_file);
15568 fputs ("__f1\tFN\t1\n", asm_out_file);
15569 fputs ("__f2\tFN\t2\n", asm_out_file);
15570 fputs ("__f3\tFN\t3\n", asm_out_file);
15571 fputs ("__f4\tFN\t4\n", asm_out_file);
15572 fputs ("__f5\tFN\t5\n", asm_out_file);
15573 fputs ("__f6\tFN\t6\n", asm_out_file);
15574 fputs ("__f7\tFN\t7\n", asm_out_file);
15575 switch_to_section (text_section);
15579 aof_file_end (void)
15582 aof_dump_pic_table (asm_out_file);
15584 aof_dump_imports (asm_out_file);
15585 fputs ("\tEND\n", asm_out_file);
15587 #endif /* AOF_ASSEMBLER */
15590 /* Symbols in the text segment can be accessed without indirecting via the
15591 constant pool; it may take an extra binary operation, but this is still
15592 faster than indirecting via memory. Don't do this when not optimizing,
15593 since we won't be calculating al of the offsets necessary to do this
15597 arm_encode_section_info (tree decl, rtx rtl, int first)
15599 /* This doesn't work with AOF syntax, since the string table may be in
15600 a different AREA. */
15601 #ifndef AOF_ASSEMBLER
15602 if (optimize > 0 && TREE_CONSTANT (decl))
15603 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
15606 /* If we are referencing a function that is weak then encode a long call
15607 flag in the function name, otherwise if the function is static or
15608 or known to be defined in this file then encode a short call flag. */
15609 if (first && DECL_P (decl))
15611 if (TREE_CODE (decl) == FUNCTION_DECL && DECL_WEAK (decl))
15612 arm_encode_call_attribute (decl, LONG_CALL_FLAG_CHAR);
15613 else if (! TREE_PUBLIC (decl))
15614 arm_encode_call_attribute (decl, SHORT_CALL_FLAG_CHAR);
15617 default_encode_section_info (decl, rtl, first);
15619 #endif /* !ARM_PE */
15622 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
15624 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
15625 && !strcmp (prefix, "L"))
15627 arm_ccfsm_state = 0;
15628 arm_target_insn = NULL;
15630 default_internal_label (stream, prefix, labelno);
15633 /* Output code to add DELTA to the first argument, and then jump
15634 to FUNCTION. Used for C++ multiple inheritance. */
15636 arm_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
15637 HOST_WIDE_INT delta,
15638 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
15641 static int thunk_label = 0;
15644 int mi_delta = delta;
15645 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
15647 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
15650 mi_delta = - mi_delta;
15651 /* When generating 16-bit thumb code, thunks are entered in arm mode. */
15654 int labelno = thunk_label++;
15655 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
15656 fputs ("\tldr\tr12, ", file);
15657 assemble_name (file, label);
15658 fputc ('\n', file);
15661 /* If we are generating PIC, the ldr instruction below loads
15662 "(target - 7) - .LTHUNKPCn" into r12. The pc reads as
15663 the address of the add + 8, so we have:
15665 r12 = (target - 7) - .LTHUNKPCn + (.LTHUNKPCn + 8)
15668 Note that we have "+ 1" because some versions of GNU ld
15669 don't set the low bit of the result for R_ARM_REL32
15670 relocations against thumb function symbols. */
15671 ASM_GENERATE_INTERNAL_LABEL (labelpc, "LTHUNKPC", labelno);
15672 assemble_name (file, labelpc);
15673 fputs (":\n", file);
15674 fputs ("\tadd\tr12, pc, r12\n", file);
15677 /* TODO: Use movw/movt for large constants when available. */
15678 while (mi_delta != 0)
15680 if ((mi_delta & (3 << shift)) == 0)
15684 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
15685 mi_op, this_regno, this_regno,
15686 mi_delta & (0xff << shift));
15687 mi_delta &= ~(0xff << shift);
15693 fprintf (file, "\tbx\tr12\n");
15694 ASM_OUTPUT_ALIGN (file, 2);
15695 assemble_name (file, label);
15696 fputs (":\n", file);
15699 /* Output ".word .LTHUNKn-7-.LTHUNKPCn". */
15700 rtx tem = XEXP (DECL_RTL (function), 0);
15701 tem = gen_rtx_PLUS (GET_MODE (tem), tem, GEN_INT (-7));
15702 tem = gen_rtx_MINUS (GET_MODE (tem),
15704 gen_rtx_SYMBOL_REF (Pmode,
15705 ggc_strdup (labelpc)));
15706 assemble_integer (tem, 4, BITS_PER_WORD, 1);
15709 /* Output ".word .LTHUNKn". */
15710 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
15714 fputs ("\tb\t", file);
15715 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
15716 if (NEED_PLT_RELOC)
15717 fputs ("(PLT)", file);
15718 fputc ('\n', file);
15723 arm_emit_vector_const (FILE *file, rtx x)
15726 const char * pattern;
15728 gcc_assert (GET_CODE (x) == CONST_VECTOR);
15730 switch (GET_MODE (x))
15732 case V2SImode: pattern = "%08x"; break;
15733 case V4HImode: pattern = "%04x"; break;
15734 case V8QImode: pattern = "%02x"; break;
15735 default: gcc_unreachable ();
15738 fprintf (file, "0x");
15739 for (i = CONST_VECTOR_NUNITS (x); i--;)
15743 element = CONST_VECTOR_ELT (x, i);
15744 fprintf (file, pattern, INTVAL (element));
15751 arm_output_load_gr (rtx *operands)
15758 if (GET_CODE (operands [1]) != MEM
15759 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
15760 || GET_CODE (reg = XEXP (sum, 0)) != REG
15761 || GET_CODE (offset = XEXP (sum, 1)) != CONST_INT
15762 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
15763 return "wldrw%?\t%0, %1";
15765 /* Fix up an out-of-range load of a GR register. */
15766 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
15767 wcgr = operands[0];
15769 output_asm_insn ("ldr%?\t%0, %1", operands);
15771 operands[0] = wcgr;
15773 output_asm_insn ("tmcr%?\t%0, %1", operands);
15774 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
15779 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
15781 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
15782 named arg and all anonymous args onto the stack.
15783 XXX I know the prologue shouldn't be pushing registers, but it is faster
15787 arm_setup_incoming_varargs (CUMULATIVE_ARGS *cum,
15788 enum machine_mode mode ATTRIBUTE_UNUSED,
15789 tree type ATTRIBUTE_UNUSED,
15791 int second_time ATTRIBUTE_UNUSED)
15793 cfun->machine->uses_anonymous_args = 1;
15794 if (cum->nregs < NUM_ARG_REGS)
15795 *pretend_size = (NUM_ARG_REGS - cum->nregs) * UNITS_PER_WORD;
15798 /* Return nonzero if the CONSUMER instruction (a store) does not need
15799 PRODUCER's value to calculate the address. */
15802 arm_no_early_store_addr_dep (rtx producer, rtx consumer)
15804 rtx value = PATTERN (producer);
15805 rtx addr = PATTERN (consumer);
15807 if (GET_CODE (value) == COND_EXEC)
15808 value = COND_EXEC_CODE (value);
15809 if (GET_CODE (value) == PARALLEL)
15810 value = XVECEXP (value, 0, 0);
15811 value = XEXP (value, 0);
15812 if (GET_CODE (addr) == COND_EXEC)
15813 addr = COND_EXEC_CODE (addr);
15814 if (GET_CODE (addr) == PARALLEL)
15815 addr = XVECEXP (addr, 0, 0);
15816 addr = XEXP (addr, 0);
15818 return !reg_overlap_mentioned_p (value, addr);
15821 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
15822 have an early register shift value or amount dependency on the
15823 result of PRODUCER. */
15826 arm_no_early_alu_shift_dep (rtx producer, rtx consumer)
15828 rtx value = PATTERN (producer);
15829 rtx op = PATTERN (consumer);
15832 if (GET_CODE (value) == COND_EXEC)
15833 value = COND_EXEC_CODE (value);
15834 if (GET_CODE (value) == PARALLEL)
15835 value = XVECEXP (value, 0, 0);
15836 value = XEXP (value, 0);
15837 if (GET_CODE (op) == COND_EXEC)
15838 op = COND_EXEC_CODE (op);
15839 if (GET_CODE (op) == PARALLEL)
15840 op = XVECEXP (op, 0, 0);
15843 early_op = XEXP (op, 0);
15844 /* This is either an actual independent shift, or a shift applied to
15845 the first operand of another operation. We want the whole shift
15847 if (GET_CODE (early_op) == REG)
15850 return !reg_overlap_mentioned_p (value, early_op);
15853 /* Return nonzero if the CONSUMER instruction (an ALU op) does not
15854 have an early register shift value dependency on the result of
15858 arm_no_early_alu_shift_value_dep (rtx producer, rtx consumer)
15860 rtx value = PATTERN (producer);
15861 rtx op = PATTERN (consumer);
15864 if (GET_CODE (value) == COND_EXEC)
15865 value = COND_EXEC_CODE (value);
15866 if (GET_CODE (value) == PARALLEL)
15867 value = XVECEXP (value, 0, 0);
15868 value = XEXP (value, 0);
15869 if (GET_CODE (op) == COND_EXEC)
15870 op = COND_EXEC_CODE (op);
15871 if (GET_CODE (op) == PARALLEL)
15872 op = XVECEXP (op, 0, 0);
15875 early_op = XEXP (op, 0);
15877 /* This is either an actual independent shift, or a shift applied to
15878 the first operand of another operation. We want the value being
15879 shifted, in either case. */
15880 if (GET_CODE (early_op) != REG)
15881 early_op = XEXP (early_op, 0);
15883 return !reg_overlap_mentioned_p (value, early_op);
15886 /* Return nonzero if the CONSUMER (a mul or mac op) does not
15887 have an early register mult dependency on the result of
15891 arm_no_early_mul_dep (rtx producer, rtx consumer)
15893 rtx value = PATTERN (producer);
15894 rtx op = PATTERN (consumer);
15896 if (GET_CODE (value) == COND_EXEC)
15897 value = COND_EXEC_CODE (value);
15898 if (GET_CODE (value) == PARALLEL)
15899 value = XVECEXP (value, 0, 0);
15900 value = XEXP (value, 0);
15901 if (GET_CODE (op) == COND_EXEC)
15902 op = COND_EXEC_CODE (op);
15903 if (GET_CODE (op) == PARALLEL)
15904 op = XVECEXP (op, 0, 0);
15907 return (GET_CODE (op) == PLUS
15908 && !reg_overlap_mentioned_p (value, XEXP (op, 0)));
15912 /* We can't rely on the caller doing the proper promotion when
15913 using APCS or ATPCS. */
15916 arm_promote_prototypes (tree t ATTRIBUTE_UNUSED)
15918 return !TARGET_AAPCS_BASED;
15922 /* AAPCS based ABIs use short enums by default. */
15925 arm_default_short_enums (void)
15927 return TARGET_AAPCS_BASED && arm_abi != ARM_ABI_AAPCS_LINUX;
15931 /* AAPCS requires that anonymous bitfields affect structure alignment. */
15934 arm_align_anon_bitfield (void)
15936 return TARGET_AAPCS_BASED;
15940 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
15943 arm_cxx_guard_type (void)
15945 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
15949 /* The EABI says test the least significant bit of a guard variable. */
15952 arm_cxx_guard_mask_bit (void)
15954 return TARGET_AAPCS_BASED;
15958 /* The EABI specifies that all array cookies are 8 bytes long. */
15961 arm_get_cookie_size (tree type)
15965 if (!TARGET_AAPCS_BASED)
15966 return default_cxx_get_cookie_size (type);
15968 size = build_int_cst (sizetype, 8);
15973 /* The EABI says that array cookies should also contain the element size. */
15976 arm_cookie_has_size (void)
15978 return TARGET_AAPCS_BASED;
15982 /* The EABI says constructors and destructors should return a pointer to
15983 the object constructed/destroyed. */
15986 arm_cxx_cdtor_returns_this (void)
15988 return TARGET_AAPCS_BASED;
15991 /* The EABI says that an inline function may never be the key
15995 arm_cxx_key_method_may_be_inline (void)
15997 return !TARGET_AAPCS_BASED;
16001 arm_cxx_determine_class_data_visibility (tree decl)
16003 if (!TARGET_AAPCS_BASED)
16006 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
16007 is exported. However, on systems without dynamic vague linkage,
16008 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
16009 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
16010 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
16012 DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
16013 DECL_VISIBILITY_SPECIFIED (decl) = 1;
16017 arm_cxx_class_data_always_comdat (void)
16019 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
16020 vague linkage if the class has no key function. */
16021 return !TARGET_AAPCS_BASED;
16025 /* The EABI says __aeabi_atexit should be used to register static
16029 arm_cxx_use_aeabi_atexit (void)
16031 return TARGET_AAPCS_BASED;
16036 arm_set_return_address (rtx source, rtx scratch)
16038 arm_stack_offsets *offsets;
16039 HOST_WIDE_INT delta;
16041 unsigned long saved_regs;
16043 saved_regs = arm_compute_save_reg_mask ();
16045 if ((saved_regs & (1 << LR_REGNUM)) == 0)
16046 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
16049 if (frame_pointer_needed)
16050 addr = plus_constant(hard_frame_pointer_rtx, -4);
16053 /* LR will be the first saved register. */
16054 offsets = arm_get_frame_offsets ();
16055 delta = offsets->outgoing_args - (offsets->frame + 4);
16060 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
16061 GEN_INT (delta & ~4095)));
16066 addr = stack_pointer_rtx;
16068 addr = plus_constant (addr, delta);
16070 emit_move_insn (gen_frame_mem (Pmode, addr), source);
16076 thumb_set_return_address (rtx source, rtx scratch)
16078 arm_stack_offsets *offsets;
16079 HOST_WIDE_INT delta;
16080 HOST_WIDE_INT limit;
16083 unsigned long mask;
16085 emit_insn (gen_rtx_USE (VOIDmode, source));
16087 mask = thumb1_compute_save_reg_mask ();
16088 if (mask & (1 << LR_REGNUM))
16090 offsets = arm_get_frame_offsets ();
16093 /* Find the saved regs. */
16094 if (frame_pointer_needed)
16096 delta = offsets->soft_frame - offsets->saved_args;
16097 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
16103 delta = offsets->outgoing_args - offsets->saved_args;
16106 /* Allow for the stack frame. */
16107 if (TARGET_THUMB1 && TARGET_BACKTRACE)
16109 /* The link register is always the first saved register. */
16112 /* Construct the address. */
16113 addr = gen_rtx_REG (SImode, reg);
16116 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
16117 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
16121 addr = plus_constant (addr, delta);
16123 emit_move_insn (gen_frame_mem (Pmode, addr), source);
16126 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
16129 /* Implements target hook vector_mode_supported_p. */
16131 arm_vector_mode_supported_p (enum machine_mode mode)
16133 if ((mode == V2SImode)
16134 || (mode == V4HImode)
16135 || (mode == V8QImode))
16141 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
16142 ARM insns and therefore guarantee that the shift count is modulo 256.
16143 DImode shifts (those implemented by lib1funcs.asm or by optabs.c)
16144 guarantee no particular behavior for out-of-range counts. */
16146 static unsigned HOST_WIDE_INT
16147 arm_shift_truncation_mask (enum machine_mode mode)
16149 return mode == SImode ? 255 : 0;
16153 /* Map internal gcc register numbers to DWARF2 register numbers. */
16156 arm_dbx_register_number (unsigned int regno)
16161 /* TODO: Legacy targets output FPA regs as registers 16-23 for backwards
16162 compatibility. The EABI defines them as registers 96-103. */
16163 if (IS_FPA_REGNUM (regno))
16164 return (TARGET_AAPCS_BASED ? 96 : 16) + regno - FIRST_FPA_REGNUM;
16166 if (IS_VFP_REGNUM (regno))
16167 return 64 + regno - FIRST_VFP_REGNUM;
16169 if (IS_IWMMXT_GR_REGNUM (regno))
16170 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
16172 if (IS_IWMMXT_REGNUM (regno))
16173 return 112 + regno - FIRST_IWMMXT_REGNUM;
16175 gcc_unreachable ();
16179 #ifdef TARGET_UNWIND_INFO
16180 /* Emit unwind directives for a store-multiple instruction or stack pointer
16181 push during alignment.
16182 These should only ever be generated by the function prologue code, so
16183 expect them to have a particular form. */
16186 arm_unwind_emit_sequence (FILE * asm_out_file, rtx p)
16189 HOST_WIDE_INT offset;
16190 HOST_WIDE_INT nregs;
16196 e = XVECEXP (p, 0, 0);
16197 if (GET_CODE (e) != SET)
16200 /* First insn will adjust the stack pointer. */
16201 if (GET_CODE (e) != SET
16202 || GET_CODE (XEXP (e, 0)) != REG
16203 || REGNO (XEXP (e, 0)) != SP_REGNUM
16204 || GET_CODE (XEXP (e, 1)) != PLUS)
16207 offset = -INTVAL (XEXP (XEXP (e, 1), 1));
16208 nregs = XVECLEN (p, 0) - 1;
16210 reg = REGNO (XEXP (XVECEXP (p, 0, 1), 1));
16213 /* The function prologue may also push pc, but not annotate it as it is
16214 never restored. We turn this into a stack pointer adjustment. */
16215 if (nregs * 4 == offset - 4)
16217 fprintf (asm_out_file, "\t.pad #4\n");
16221 fprintf (asm_out_file, "\t.save {");
16223 else if (IS_VFP_REGNUM (reg))
16226 fprintf (asm_out_file, "\t.vsave {");
16228 else if (reg >= FIRST_FPA_REGNUM && reg <= LAST_FPA_REGNUM)
16230 /* FPA registers are done differently. */
16231 asm_fprintf (asm_out_file, "\t.save %r, %wd\n", reg, nregs);
16235 /* Unknown register type. */
16238 /* If the stack increment doesn't match the size of the saved registers,
16239 something has gone horribly wrong. */
16240 if (offset != nregs * reg_size)
16245 /* The remaining insns will describe the stores. */
16246 for (i = 1; i <= nregs; i++)
16248 /* Expect (set (mem <addr>) (reg)).
16249 Where <addr> is (reg:SP) or (plus (reg:SP) (const_int)). */
16250 e = XVECEXP (p, 0, i);
16251 if (GET_CODE (e) != SET
16252 || GET_CODE (XEXP (e, 0)) != MEM
16253 || GET_CODE (XEXP (e, 1)) != REG)
16256 reg = REGNO (XEXP (e, 1));
16261 fprintf (asm_out_file, ", ");
16262 /* We can't use %r for vfp because we need to use the
16263 double precision register names. */
16264 if (IS_VFP_REGNUM (reg))
16265 asm_fprintf (asm_out_file, "d%d", (reg - FIRST_VFP_REGNUM) / 2);
16267 asm_fprintf (asm_out_file, "%r", reg);
16269 #ifdef ENABLE_CHECKING
16270 /* Check that the addresses are consecutive. */
16271 e = XEXP (XEXP (e, 0), 0);
16272 if (GET_CODE (e) == PLUS)
16274 offset += reg_size;
16275 if (GET_CODE (XEXP (e, 0)) != REG
16276 || REGNO (XEXP (e, 0)) != SP_REGNUM
16277 || GET_CODE (XEXP (e, 1)) != CONST_INT
16278 || offset != INTVAL (XEXP (e, 1)))
16282 || GET_CODE (e) != REG
16283 || REGNO (e) != SP_REGNUM)
16287 fprintf (asm_out_file, "}\n");
16290 /* Emit unwind directives for a SET. */
16293 arm_unwind_emit_set (FILE * asm_out_file, rtx p)
16301 switch (GET_CODE (e0))
16304 /* Pushing a single register. */
16305 if (GET_CODE (XEXP (e0, 0)) != PRE_DEC
16306 || GET_CODE (XEXP (XEXP (e0, 0), 0)) != REG
16307 || REGNO (XEXP (XEXP (e0, 0), 0)) != SP_REGNUM)
16310 asm_fprintf (asm_out_file, "\t.save ");
16311 if (IS_VFP_REGNUM (REGNO (e1)))
16312 asm_fprintf(asm_out_file, "{d%d}\n",
16313 (REGNO (e1) - FIRST_VFP_REGNUM) / 2);
16315 asm_fprintf(asm_out_file, "{%r}\n", REGNO (e1));
16319 if (REGNO (e0) == SP_REGNUM)
16321 /* A stack increment. */
16322 if (GET_CODE (e1) != PLUS
16323 || GET_CODE (XEXP (e1, 0)) != REG
16324 || REGNO (XEXP (e1, 0)) != SP_REGNUM
16325 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
16328 asm_fprintf (asm_out_file, "\t.pad #%wd\n",
16329 -INTVAL (XEXP (e1, 1)));
16331 else if (REGNO (e0) == HARD_FRAME_POINTER_REGNUM)
16333 HOST_WIDE_INT offset;
16335 if (GET_CODE (e1) == PLUS)
16337 if (GET_CODE (XEXP (e1, 0)) != REG
16338 || GET_CODE (XEXP (e1, 1)) != CONST_INT)
16340 reg = REGNO (XEXP (e1, 0));
16341 offset = INTVAL (XEXP (e1, 1));
16342 asm_fprintf (asm_out_file, "\t.setfp %r, %r, #%wd\n",
16343 HARD_FRAME_POINTER_REGNUM, reg,
16344 INTVAL (XEXP (e1, 1)));
16346 else if (GET_CODE (e1) == REG)
16349 asm_fprintf (asm_out_file, "\t.setfp %r, %r\n",
16350 HARD_FRAME_POINTER_REGNUM, reg);
16355 else if (GET_CODE (e1) == REG && REGNO (e1) == SP_REGNUM)
16357 /* Move from sp to reg. */
16358 asm_fprintf (asm_out_file, "\t.movsp %r\n", REGNO (e0));
16360 else if (GET_CODE (e1) == PLUS
16361 && GET_CODE (XEXP (e1, 0)) == REG
16362 && REGNO (XEXP (e1, 0)) == SP_REGNUM
16363 && GET_CODE (XEXP (e1, 1)) == CONST_INT)
16365 /* Set reg to offset from sp. */
16366 asm_fprintf (asm_out_file, "\t.movsp %r, #%d\n",
16367 REGNO (e0), (int)INTVAL(XEXP (e1, 1)));
16369 else if (GET_CODE (e1) == UNSPEC && XINT (e1, 1) == UNSPEC_STACK_ALIGN)
16371 /* Stack pointer save before alignment. */
16373 asm_fprintf (asm_out_file, "\t.unwind_raw 0, 0x%x @ vsp = r%d\n",
16386 /* Emit unwind directives for the given insn. */
16389 arm_unwind_emit (FILE * asm_out_file, rtx insn)
16393 if (!ARM_EABI_UNWIND_TABLES)
16396 if (GET_CODE (insn) == NOTE || !RTX_FRAME_RELATED_P (insn))
16399 pat = find_reg_note (insn, REG_FRAME_RELATED_EXPR, NULL_RTX);
16401 pat = XEXP (pat, 0);
16403 pat = PATTERN (insn);
16405 switch (GET_CODE (pat))
16408 arm_unwind_emit_set (asm_out_file, pat);
16412 /* Store multiple. */
16413 arm_unwind_emit_sequence (asm_out_file, pat);
16422 /* Output a reference from a function exception table to the type_info
16423 object X. The EABI specifies that the symbol should be relocated by
16424 an R_ARM_TARGET2 relocation. */
16427 arm_output_ttype (rtx x)
16429 fputs ("\t.word\t", asm_out_file);
16430 output_addr_const (asm_out_file, x);
16431 /* Use special relocations for symbol references. */
16432 if (GET_CODE (x) != CONST_INT)
16433 fputs ("(TARGET2)", asm_out_file);
16434 fputc ('\n', asm_out_file);
16438 #endif /* TARGET_UNWIND_INFO */
16441 /* Handle UNSPEC DWARF call frame instructions. These are needed for dynamic
16442 stack alignment. */
16445 arm_dwarf_handle_frame_unspec (const char *label, rtx pattern, int index)
16447 rtx unspec = SET_SRC (pattern);
16448 gcc_assert (GET_CODE (unspec) == UNSPEC);
16452 case UNSPEC_STACK_ALIGN:
16453 /* ??? We should set the CFA = (SP & ~7). At this point we haven't
16454 put anything on the stack, so hopefully it won't matter.
16455 CFA = SP will be correct after alignment. */
16456 dwarf2out_reg_save_reg (label, stack_pointer_rtx,
16457 SET_DEST (pattern));
16460 gcc_unreachable ();
16465 /* Output unwind directives for the start/end of a function. */
16468 arm_output_fn_unwind (FILE * f, bool prologue)
16470 if (!ARM_EABI_UNWIND_TABLES)
16474 fputs ("\t.fnstart\n", f);
16476 fputs ("\t.fnend\n", f);
16480 arm_emit_tls_decoration (FILE *fp, rtx x)
16482 enum tls_reloc reloc;
16485 val = XVECEXP (x, 0, 0);
16486 reloc = INTVAL (XVECEXP (x, 0, 1));
16488 output_addr_const (fp, val);
16493 fputs ("(tlsgd)", fp);
16496 fputs ("(tlsldm)", fp);
16499 fputs ("(tlsldo)", fp);
16502 fputs ("(gottpoff)", fp);
16505 fputs ("(tpoff)", fp);
16508 gcc_unreachable ();
16516 fputs (" + (. - ", fp);
16517 output_addr_const (fp, XVECEXP (x, 0, 2));
16519 output_addr_const (fp, XVECEXP (x, 0, 3));
16530 arm_output_addr_const_extra (FILE *fp, rtx x)
16532 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
16533 return arm_emit_tls_decoration (fp, x);
16534 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PIC_LABEL)
16537 int labelno = INTVAL (XVECEXP (x, 0, 0));
16539 ASM_GENERATE_INTERNAL_LABEL (label, "LPIC", labelno);
16540 assemble_name_raw (fp, label);
16544 else if (GET_CODE (x) == CONST_VECTOR)
16545 return arm_emit_vector_const (fp, x);
16550 /* Output assembly for a shift instruction.
16551 SET_FLAGS determines how the instruction modifies the condition codes.
16552 0 - Do not set condition codes.
16553 1 - Set condition codes.
16554 2 - Use smallest instruction. */
16556 arm_output_shift(rtx * operands, int set_flags)
16559 static const char flag_chars[3] = {'?', '.', '!'};
16564 c = flag_chars[set_flags];
16565 if (TARGET_UNIFIED_ASM)
16567 shift = shift_op(operands[3], &val);
16571 operands[2] = GEN_INT(val);
16572 sprintf (pattern, "%s%%%c\t%%0, %%1, %%2", shift, c);
16575 sprintf (pattern, "mov%%%c\t%%0, %%1", c);
16578 sprintf (pattern, "mov%%%c\t%%0, %%1%%S3", c);
16579 output_asm_insn (pattern, operands);
16583 /* Output a Thumb-2 casesi instruction. */
16585 thumb2_output_casesi (rtx *operands)
16587 rtx diff_vec = PATTERN (next_real_insn (operands[2]));
16589 gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
16591 output_asm_insn ("cmp\t%0, %1", operands);
16592 output_asm_insn ("bhi\t%l3", operands);
16593 switch (GET_MODE(diff_vec))
16596 return "tbb\t[%|pc, %0]";
16598 return "tbh\t[%|pc, %0, lsl #1]";
16602 output_asm_insn ("adr\t%4, %l2", operands);
16603 output_asm_insn ("ldr\t%5, [%4, %0, lsl #2]", operands);
16604 output_asm_insn ("add\t%4, %4, %5", operands);
16609 output_asm_insn ("adr\t%4, %l2", operands);
16610 return "ldr\t%|pc, [%4, %0, lsl #2]";
16613 gcc_unreachable ();
16617 #include "gt-arm.h"