1 /* Subroutines for insn-output.c for Motorola 68000 family.
2 Copyright (C) 1987, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000,
3 2001, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
30 #include "hard-reg-set.h"
32 #include "insn-config.h"
33 #include "conditions.h"
35 #include "insn-attr.h"
42 #include "target-def.h"
46 /* ??? Need to add a dependency between m68k.o and sched-int.h. */
47 #include "sched-int.h"
48 #include "insn-codes.h"
51 enum reg_class regno_reg_class[] =
53 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
54 DATA_REGS, DATA_REGS, DATA_REGS, DATA_REGS,
55 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
56 ADDR_REGS, ADDR_REGS, ADDR_REGS, ADDR_REGS,
57 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
58 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
63 /* The minimum number of integer registers that we want to save with the
64 movem instruction. Using two movel instructions instead of a single
65 moveml is about 15% faster for the 68020 and 68030 at no expense in
67 #define MIN_MOVEM_REGS 3
69 /* The minimum number of floating point registers that we want to save
70 with the fmovem instruction. */
71 #define MIN_FMOVEM_REGS 1
73 /* Structure describing stack frame layout. */
76 /* Stack pointer to frame pointer offset. */
79 /* Offset of FPU registers. */
80 HOST_WIDE_INT foffset;
82 /* Frame size in bytes (rounded up). */
85 /* Data and address register. */
87 unsigned int reg_mask;
91 unsigned int fpu_mask;
93 /* Offsets relative to ARG_POINTER. */
94 HOST_WIDE_INT frame_pointer_offset;
95 HOST_WIDE_INT stack_pointer_offset;
97 /* Function which the above information refers to. */
101 /* Current frame information calculated by m68k_compute_frame_layout(). */
102 static struct m68k_frame current_frame;
104 /* Structure describing an m68k address.
106 If CODE is UNKNOWN, the address is BASE + INDEX * SCALE + OFFSET,
107 with null fields evaluating to 0. Here:
109 - BASE satisfies m68k_legitimate_base_reg_p
110 - INDEX satisfies m68k_legitimate_index_reg_p
111 - OFFSET satisfies m68k_legitimate_constant_address_p
113 INDEX is either HImode or SImode. The other fields are SImode.
115 If CODE is PRE_DEC, the address is -(BASE). If CODE is POST_INC,
116 the address is (BASE)+. */
117 struct m68k_address {
125 static int m68k_sched_adjust_cost (rtx, rtx, rtx, int);
126 static int m68k_sched_issue_rate (void);
127 static int m68k_sched_variable_issue (FILE *, int, rtx, int);
128 static void m68k_sched_md_init_global (FILE *, int, int);
129 static void m68k_sched_md_finish_global (FILE *, int);
130 static void m68k_sched_md_init (FILE *, int, int);
131 static void m68k_sched_dfa_pre_advance_cycle (void);
132 static void m68k_sched_dfa_post_advance_cycle (void);
133 static int m68k_sched_first_cycle_multipass_dfa_lookahead (void);
135 static bool m68k_can_eliminate (const int, const int);
136 static bool m68k_legitimate_address_p (enum machine_mode, rtx, bool);
137 static bool m68k_handle_option (size_t, const char *, int);
138 static rtx find_addr_reg (rtx);
139 static const char *singlemove_string (rtx *);
140 static void m68k_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
141 HOST_WIDE_INT, tree);
142 static rtx m68k_struct_value_rtx (tree, int);
143 static tree m68k_handle_fndecl_attribute (tree *node, tree name,
144 tree args, int flags,
146 static void m68k_compute_frame_layout (void);
147 static bool m68k_save_reg (unsigned int regno, bool interrupt_handler);
148 static bool m68k_ok_for_sibcall_p (tree, tree);
149 static bool m68k_tls_symbol_p (rtx);
150 static rtx m68k_legitimize_address (rtx, rtx, enum machine_mode);
151 static bool m68k_rtx_costs (rtx, int, int, int *, bool);
152 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
153 static bool m68k_return_in_memory (const_tree, const_tree);
155 static void m68k_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
156 static void m68k_trampoline_init (rtx, tree, rtx);
159 /* Specify the identification number of the library being built */
160 const char *m68k_library_id_string = "_current_shared_library_a5_offset_";
162 /* Initialize the GCC target structure. */
164 #if INT_OP_GROUP == INT_OP_DOT_WORD
165 #undef TARGET_ASM_ALIGNED_HI_OP
166 #define TARGET_ASM_ALIGNED_HI_OP "\t.word\t"
169 #if INT_OP_GROUP == INT_OP_NO_DOT
170 #undef TARGET_ASM_BYTE_OP
171 #define TARGET_ASM_BYTE_OP "\tbyte\t"
172 #undef TARGET_ASM_ALIGNED_HI_OP
173 #define TARGET_ASM_ALIGNED_HI_OP "\tshort\t"
174 #undef TARGET_ASM_ALIGNED_SI_OP
175 #define TARGET_ASM_ALIGNED_SI_OP "\tlong\t"
178 #if INT_OP_GROUP == INT_OP_DC
179 #undef TARGET_ASM_BYTE_OP
180 #define TARGET_ASM_BYTE_OP "\tdc.b\t"
181 #undef TARGET_ASM_ALIGNED_HI_OP
182 #define TARGET_ASM_ALIGNED_HI_OP "\tdc.w\t"
183 #undef TARGET_ASM_ALIGNED_SI_OP
184 #define TARGET_ASM_ALIGNED_SI_OP "\tdc.l\t"
187 #undef TARGET_ASM_UNALIGNED_HI_OP
188 #define TARGET_ASM_UNALIGNED_HI_OP TARGET_ASM_ALIGNED_HI_OP
189 #undef TARGET_ASM_UNALIGNED_SI_OP
190 #define TARGET_ASM_UNALIGNED_SI_OP TARGET_ASM_ALIGNED_SI_OP
192 #undef TARGET_ASM_OUTPUT_MI_THUNK
193 #define TARGET_ASM_OUTPUT_MI_THUNK m68k_output_mi_thunk
194 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
195 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
197 #undef TARGET_ASM_FILE_START_APP_OFF
198 #define TARGET_ASM_FILE_START_APP_OFF true
200 #undef TARGET_LEGITIMIZE_ADDRESS
201 #define TARGET_LEGITIMIZE_ADDRESS m68k_legitimize_address
203 #undef TARGET_SCHED_ADJUST_COST
204 #define TARGET_SCHED_ADJUST_COST m68k_sched_adjust_cost
206 #undef TARGET_SCHED_ISSUE_RATE
207 #define TARGET_SCHED_ISSUE_RATE m68k_sched_issue_rate
209 #undef TARGET_SCHED_VARIABLE_ISSUE
210 #define TARGET_SCHED_VARIABLE_ISSUE m68k_sched_variable_issue
212 #undef TARGET_SCHED_INIT_GLOBAL
213 #define TARGET_SCHED_INIT_GLOBAL m68k_sched_md_init_global
215 #undef TARGET_SCHED_FINISH_GLOBAL
216 #define TARGET_SCHED_FINISH_GLOBAL m68k_sched_md_finish_global
218 #undef TARGET_SCHED_INIT
219 #define TARGET_SCHED_INIT m68k_sched_md_init
221 #undef TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE
222 #define TARGET_SCHED_DFA_PRE_ADVANCE_CYCLE m68k_sched_dfa_pre_advance_cycle
224 #undef TARGET_SCHED_DFA_POST_ADVANCE_CYCLE
225 #define TARGET_SCHED_DFA_POST_ADVANCE_CYCLE m68k_sched_dfa_post_advance_cycle
227 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
228 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
229 m68k_sched_first_cycle_multipass_dfa_lookahead
231 #undef TARGET_HANDLE_OPTION
232 #define TARGET_HANDLE_OPTION m68k_handle_option
234 #undef TARGET_RTX_COSTS
235 #define TARGET_RTX_COSTS m68k_rtx_costs
237 #undef TARGET_ATTRIBUTE_TABLE
238 #define TARGET_ATTRIBUTE_TABLE m68k_attribute_table
240 #undef TARGET_PROMOTE_PROTOTYPES
241 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
243 #undef TARGET_STRUCT_VALUE_RTX
244 #define TARGET_STRUCT_VALUE_RTX m68k_struct_value_rtx
246 #undef TARGET_CANNOT_FORCE_CONST_MEM
247 #define TARGET_CANNOT_FORCE_CONST_MEM m68k_illegitimate_symbolic_constant_p
249 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
250 #define TARGET_FUNCTION_OK_FOR_SIBCALL m68k_ok_for_sibcall_p
252 #if M68K_HONOR_TARGET_STRICT_ALIGNMENT
253 #undef TARGET_RETURN_IN_MEMORY
254 #define TARGET_RETURN_IN_MEMORY m68k_return_in_memory
258 #undef TARGET_HAVE_TLS
259 #define TARGET_HAVE_TLS (true)
261 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
262 #define TARGET_ASM_OUTPUT_DWARF_DTPREL m68k_output_dwarf_dtprel
265 #undef TARGET_LEGITIMATE_ADDRESS_P
266 #define TARGET_LEGITIMATE_ADDRESS_P m68k_legitimate_address_p
268 #undef TARGET_CAN_ELIMINATE
269 #define TARGET_CAN_ELIMINATE m68k_can_eliminate
271 #undef TARGET_TRAMPOLINE_INIT
272 #define TARGET_TRAMPOLINE_INIT m68k_trampoline_init
274 static const struct attribute_spec m68k_attribute_table[] =
276 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
277 { "interrupt", 0, 0, true, false, false, m68k_handle_fndecl_attribute },
278 { "interrupt_handler", 0, 0, true, false, false, m68k_handle_fndecl_attribute },
279 { "interrupt_thread", 0, 0, true, false, false, m68k_handle_fndecl_attribute },
280 { NULL, 0, 0, false, false, false, NULL }
283 struct gcc_target targetm = TARGET_INITIALIZER;
285 /* Base flags for 68k ISAs. */
286 #define FL_FOR_isa_00 FL_ISA_68000
287 #define FL_FOR_isa_10 (FL_FOR_isa_00 | FL_ISA_68010)
288 /* FL_68881 controls the default setting of -m68881. gcc has traditionally
289 generated 68881 code for 68020 and 68030 targets unless explicitly told
291 #define FL_FOR_isa_20 (FL_FOR_isa_10 | FL_ISA_68020 \
292 | FL_BITFIELD | FL_68881)
293 #define FL_FOR_isa_40 (FL_FOR_isa_20 | FL_ISA_68040)
294 #define FL_FOR_isa_cpu32 (FL_FOR_isa_10 | FL_ISA_68020)
296 /* Base flags for ColdFire ISAs. */
297 #define FL_FOR_isa_a (FL_COLDFIRE | FL_ISA_A)
298 #define FL_FOR_isa_aplus (FL_FOR_isa_a | FL_ISA_APLUS | FL_CF_USP)
299 /* Note ISA_B doesn't necessarily include USP (user stack pointer) support. */
300 #define FL_FOR_isa_b (FL_FOR_isa_a | FL_ISA_B | FL_CF_HWDIV)
301 /* ISA_C is not upwardly compatible with ISA_B. */
302 #define FL_FOR_isa_c (FL_FOR_isa_a | FL_ISA_C | FL_CF_USP)
306 /* Traditional 68000 instruction sets. */
312 /* ColdFire instruction set variants. */
320 /* Information about one of the -march, -mcpu or -mtune arguments. */
321 struct m68k_target_selection
323 /* The argument being described. */
326 /* For -mcpu, this is the device selected by the option.
327 For -mtune and -march, it is a representative device
328 for the microarchitecture or ISA respectively. */
329 enum target_device device;
331 /* The M68K_DEVICE fields associated with DEVICE. See the comment
332 in m68k-devices.def for details. FAMILY is only valid for -mcpu. */
334 enum uarch_type microarch;
339 /* A list of all devices in m68k-devices.def. Used for -mcpu selection. */
340 static const struct m68k_target_selection all_devices[] =
342 #define M68K_DEVICE(NAME,ENUM_VALUE,FAMILY,MULTILIB,MICROARCH,ISA,FLAGS) \
343 { NAME, ENUM_VALUE, FAMILY, u##MICROARCH, ISA, FLAGS | FL_FOR_##ISA },
344 #include "m68k-devices.def"
346 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
349 /* A list of all ISAs, mapping each one to a representative device.
350 Used for -march selection. */
351 static const struct m68k_target_selection all_isas[] =
353 { "68000", m68000, NULL, u68000, isa_00, FL_FOR_isa_00 },
354 { "68010", m68010, NULL, u68010, isa_10, FL_FOR_isa_10 },
355 { "68020", m68020, NULL, u68020, isa_20, FL_FOR_isa_20 },
356 { "68030", m68030, NULL, u68030, isa_20, FL_FOR_isa_20 },
357 { "68040", m68040, NULL, u68040, isa_40, FL_FOR_isa_40 },
358 { "68060", m68060, NULL, u68060, isa_40, FL_FOR_isa_40 },
359 { "cpu32", cpu32, NULL, ucpu32, isa_20, FL_FOR_isa_cpu32 },
360 { "isaa", mcf5206e, NULL, ucfv2, isa_a, (FL_FOR_isa_a
362 { "isaaplus", mcf5271, NULL, ucfv2, isa_aplus, (FL_FOR_isa_aplus
364 { "isab", mcf5407, NULL, ucfv4, isa_b, FL_FOR_isa_b },
365 { "isac", unk_device, NULL, ucfv4, isa_c, (FL_FOR_isa_c
367 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
370 /* A list of all microarchitectures, mapping each one to a representative
371 device. Used for -mtune selection. */
372 static const struct m68k_target_selection all_microarchs[] =
374 { "68000", m68000, NULL, u68000, isa_00, FL_FOR_isa_00 },
375 { "68010", m68010, NULL, u68010, isa_10, FL_FOR_isa_10 },
376 { "68020", m68020, NULL, u68020, isa_20, FL_FOR_isa_20 },
377 { "68020-40", m68020, NULL, u68020_40, isa_20, FL_FOR_isa_20 },
378 { "68020-60", m68020, NULL, u68020_60, isa_20, FL_FOR_isa_20 },
379 { "68030", m68030, NULL, u68030, isa_20, FL_FOR_isa_20 },
380 { "68040", m68040, NULL, u68040, isa_40, FL_FOR_isa_40 },
381 { "68060", m68060, NULL, u68060, isa_40, FL_FOR_isa_40 },
382 { "cpu32", cpu32, NULL, ucpu32, isa_20, FL_FOR_isa_cpu32 },
383 { "cfv1", mcf51qe, NULL, ucfv1, isa_c, FL_FOR_isa_c },
384 { "cfv2", mcf5206, NULL, ucfv2, isa_a, FL_FOR_isa_a },
385 { "cfv3", mcf5307, NULL, ucfv3, isa_a, (FL_FOR_isa_a
387 { "cfv4", mcf5407, NULL, ucfv4, isa_b, FL_FOR_isa_b },
388 { "cfv4e", mcf547x, NULL, ucfv4e, isa_b, (FL_FOR_isa_b
392 { NULL, unk_device, NULL, unk_arch, isa_max, 0 }
395 /* The entries associated with the -mcpu, -march and -mtune settings,
396 or null for options that have not been used. */
397 const struct m68k_target_selection *m68k_cpu_entry;
398 const struct m68k_target_selection *m68k_arch_entry;
399 const struct m68k_target_selection *m68k_tune_entry;
401 /* Which CPU we are generating code for. */
402 enum target_device m68k_cpu;
404 /* Which microarchitecture to tune for. */
405 enum uarch_type m68k_tune;
407 /* Which FPU to use. */
408 enum fpu_type m68k_fpu;
410 /* The set of FL_* flags that apply to the target processor. */
411 unsigned int m68k_cpu_flags;
413 /* The set of FL_* flags that apply to the processor to be tuned for. */
414 unsigned int m68k_tune_flags;
416 /* Asm templates for calling or jumping to an arbitrary symbolic address,
417 or NULL if such calls or jumps are not supported. The address is held
419 const char *m68k_symbolic_call;
420 const char *m68k_symbolic_jump;
422 /* Enum variable that corresponds to m68k_symbolic_call values. */
423 enum M68K_SYMBOLIC_CALL m68k_symbolic_call_var;
426 /* See whether TABLE has an entry with name NAME. Return true and
427 store the entry in *ENTRY if so, otherwise return false and
428 leave *ENTRY alone. */
431 m68k_find_selection (const struct m68k_target_selection **entry,
432 const struct m68k_target_selection *table,
437 for (i = 0; table[i].name; i++)
438 if (strcmp (table[i].name, name) == 0)
446 /* Implement TARGET_HANDLE_OPTION. */
449 m68k_handle_option (size_t code, const char *arg, int value)
454 return m68k_find_selection (&m68k_arch_entry, all_isas, arg);
457 return m68k_find_selection (&m68k_cpu_entry, all_devices, arg);
460 return m68k_find_selection (&m68k_tune_entry, all_microarchs, arg);
463 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5206");
466 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5206e");
469 return m68k_find_selection (&m68k_cpu_entry, all_devices, "528x");
472 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5307");
475 return m68k_find_selection (&m68k_cpu_entry, all_devices, "5407");
478 return m68k_find_selection (&m68k_cpu_entry, all_devices, "547x");
482 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68000");
485 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68010");
489 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68020");
492 return (m68k_find_selection (&m68k_tune_entry, all_microarchs,
494 && m68k_find_selection (&m68k_cpu_entry, all_devices, "68020"));
497 return (m68k_find_selection (&m68k_tune_entry, all_microarchs,
499 && m68k_find_selection (&m68k_cpu_entry, all_devices, "68020"));
502 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68030");
505 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68040");
508 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68060");
511 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68302");
515 return m68k_find_selection (&m68k_cpu_entry, all_devices, "68332");
517 case OPT_mshared_library_id_:
518 if (value > MAX_LIBRARY_ID)
519 error ("-mshared-library-id=%s is not between 0 and %d",
520 arg, MAX_LIBRARY_ID);
524 asprintf (&tmp, "%d", (value * -4) - 4);
525 m68k_library_id_string = tmp;
534 /* Sometimes certain combinations of command options do not make
535 sense on a particular target machine. You can define a macro
536 `OVERRIDE_OPTIONS' to take account of this. This macro, if
537 defined, is executed once just after all the command options have
540 Don't use this macro to turn on various extra optimizations for
541 `-O'. That is what `OPTIMIZATION_OPTIONS' is for. */
544 override_options (void)
546 const struct m68k_target_selection *entry;
547 unsigned long target_mask;
555 -march=ARCH should generate code that runs any processor
556 implementing architecture ARCH. -mcpu=CPU should override -march
557 and should generate code that runs on processor CPU, making free
558 use of any instructions that CPU understands. -mtune=UARCH applies
559 on top of -mcpu or -march and optimizes the code for UARCH. It does
560 not change the target architecture. */
563 /* Complain if the -march setting is for a different microarchitecture,
564 or includes flags that the -mcpu setting doesn't. */
566 && (m68k_arch_entry->microarch != m68k_cpu_entry->microarch
567 || (m68k_arch_entry->flags & ~m68k_cpu_entry->flags) != 0))
568 warning (0, "-mcpu=%s conflicts with -march=%s",
569 m68k_cpu_entry->name, m68k_arch_entry->name);
571 entry = m68k_cpu_entry;
574 entry = m68k_arch_entry;
577 entry = all_devices + TARGET_CPU_DEFAULT;
579 m68k_cpu_flags = entry->flags;
581 /* Use the architecture setting to derive default values for
585 /* ColdFire is lenient about alignment. */
586 if (!TARGET_COLDFIRE)
587 target_mask |= MASK_STRICT_ALIGNMENT;
589 if ((m68k_cpu_flags & FL_BITFIELD) != 0)
590 target_mask |= MASK_BITFIELD;
591 if ((m68k_cpu_flags & FL_CF_HWDIV) != 0)
592 target_mask |= MASK_CF_HWDIV;
593 if ((m68k_cpu_flags & (FL_68881 | FL_CF_FPU)) != 0)
594 target_mask |= MASK_HARD_FLOAT;
595 target_flags |= target_mask & ~target_flags_explicit;
597 /* Set the directly-usable versions of the -mcpu and -mtune settings. */
598 m68k_cpu = entry->device;
601 m68k_tune = m68k_tune_entry->microarch;
602 m68k_tune_flags = m68k_tune_entry->flags;
604 #ifdef M68K_DEFAULT_TUNE
605 else if (!m68k_cpu_entry && !m68k_arch_entry)
607 enum target_device dev;
608 dev = all_microarchs[M68K_DEFAULT_TUNE].device;
609 m68k_tune_flags = all_devices[dev]->flags;
614 m68k_tune = entry->microarch;
615 m68k_tune_flags = entry->flags;
618 /* Set the type of FPU. */
619 m68k_fpu = (!TARGET_HARD_FLOAT ? FPUTYPE_NONE
620 : (m68k_cpu_flags & FL_COLDFIRE) != 0 ? FPUTYPE_COLDFIRE
623 /* Sanity check to ensure that msep-data and mid-sahred-library are not
624 * both specified together. Doing so simply doesn't make sense.
626 if (TARGET_SEP_DATA && TARGET_ID_SHARED_LIBRARY)
627 error ("cannot specify both -msep-data and -mid-shared-library");
629 /* If we're generating code for a separate A5 relative data segment,
630 * we've got to enable -fPIC as well. This might be relaxable to
631 * -fpic but it hasn't been tested properly.
633 if (TARGET_SEP_DATA || TARGET_ID_SHARED_LIBRARY)
636 /* -mpcrel -fPIC uses 32-bit pc-relative displacements. Raise an
637 error if the target does not support them. */
638 if (TARGET_PCREL && !TARGET_68020 && flag_pic == 2)
639 error ("-mpcrel -fPIC is not currently supported on selected cpu");
641 /* ??? A historic way of turning on pic, or is this intended to
642 be an embedded thing that doesn't have the same name binding
643 significance that it does on hosted ELF systems? */
644 if (TARGET_PCREL && flag_pic == 0)
649 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_JSR;
651 m68k_symbolic_jump = "jra %a0";
653 else if (TARGET_ID_SHARED_LIBRARY)
654 /* All addresses must be loaded from the GOT. */
656 else if (TARGET_68020 || TARGET_ISAB || TARGET_ISAC)
659 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_C;
661 m68k_symbolic_call_var = M68K_SYMBOLIC_CALL_BSR_P;
664 /* No unconditional long branch */;
665 else if (TARGET_PCREL)
666 m68k_symbolic_jump = "bra%.l %c0";
668 m68k_symbolic_jump = "bra%.l %p0";
669 /* Turn off function cse if we are doing PIC. We always want
670 function call to be done as `bsr foo@PLTPC'. */
671 /* ??? It's traditional to do this for -mpcrel too, but it isn't
672 clear how intentional that is. */
673 flag_no_function_cse = 1;
676 switch (m68k_symbolic_call_var)
678 case M68K_SYMBOLIC_CALL_JSR:
679 m68k_symbolic_call = "jsr %a0";
682 case M68K_SYMBOLIC_CALL_BSR_C:
683 m68k_symbolic_call = "bsr%.l %c0";
686 case M68K_SYMBOLIC_CALL_BSR_P:
687 m68k_symbolic_call = "bsr%.l %p0";
690 case M68K_SYMBOLIC_CALL_NONE:
691 gcc_assert (m68k_symbolic_call == NULL);
698 #ifndef ASM_OUTPUT_ALIGN_WITH_NOP
699 if (align_labels > 2)
701 warning (0, "-falign-labels=%d is not supported", align_labels);
706 warning (0, "-falign-loops=%d is not supported", align_loops);
711 SUBTARGET_OVERRIDE_OPTIONS;
713 /* Setup scheduling options. */
715 m68k_sched_cpu = CPU_CFV1;
717 m68k_sched_cpu = CPU_CFV2;
719 m68k_sched_cpu = CPU_CFV3;
721 m68k_sched_cpu = CPU_CFV4;
724 m68k_sched_cpu = CPU_UNKNOWN;
725 flag_schedule_insns = 0;
726 flag_schedule_insns_after_reload = 0;
727 flag_modulo_sched = 0;
730 if (m68k_sched_cpu != CPU_UNKNOWN)
732 if ((m68k_cpu_flags & (FL_CF_EMAC | FL_CF_EMAC_B)) != 0)
733 m68k_sched_mac = MAC_CF_EMAC;
734 else if ((m68k_cpu_flags & FL_CF_MAC) != 0)
735 m68k_sched_mac = MAC_CF_MAC;
737 m68k_sched_mac = MAC_NO;
741 /* Generate a macro of the form __mPREFIX_cpu_NAME, where PREFIX is the
742 given argument and NAME is the argument passed to -mcpu. Return NULL
743 if -mcpu was not passed. */
746 m68k_cpp_cpu_ident (const char *prefix)
750 return concat ("__m", prefix, "_cpu_", m68k_cpu_entry->name, NULL);
753 /* Generate a macro of the form __mPREFIX_family_NAME, where PREFIX is the
754 given argument and NAME is the name of the representative device for
755 the -mcpu argument's family. Return NULL if -mcpu was not passed. */
758 m68k_cpp_cpu_family (const char *prefix)
762 return concat ("__m", prefix, "_family_", m68k_cpu_entry->family, NULL);
765 /* Return m68k_fk_interrupt_handler if FUNC has an "interrupt" or
766 "interrupt_handler" attribute and interrupt_thread if FUNC has an
767 "interrupt_thread" attribute. Otherwise, return
768 m68k_fk_normal_function. */
770 enum m68k_function_kind
771 m68k_get_function_kind (tree func)
775 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
777 a = lookup_attribute ("interrupt", DECL_ATTRIBUTES (func));
779 return m68k_fk_interrupt_handler;
781 a = lookup_attribute ("interrupt_handler", DECL_ATTRIBUTES (func));
783 return m68k_fk_interrupt_handler;
785 a = lookup_attribute ("interrupt_thread", DECL_ATTRIBUTES (func));
787 return m68k_fk_interrupt_thread;
789 return m68k_fk_normal_function;
792 /* Handle an attribute requiring a FUNCTION_DECL; arguments as in
793 struct attribute_spec.handler. */
795 m68k_handle_fndecl_attribute (tree *node, tree name,
796 tree args ATTRIBUTE_UNUSED,
797 int flags ATTRIBUTE_UNUSED,
800 if (TREE_CODE (*node) != FUNCTION_DECL)
802 warning (OPT_Wattributes, "%qE attribute only applies to functions",
804 *no_add_attrs = true;
807 if (m68k_get_function_kind (*node) != m68k_fk_normal_function)
809 error ("multiple interrupt attributes not allowed");
810 *no_add_attrs = true;
814 && !strcmp (IDENTIFIER_POINTER (name), "interrupt_thread"))
816 error ("interrupt_thread is available only on fido");
817 *no_add_attrs = true;
824 m68k_compute_frame_layout (void)
828 enum m68k_function_kind func_kind =
829 m68k_get_function_kind (current_function_decl);
830 bool interrupt_handler = func_kind == m68k_fk_interrupt_handler;
831 bool interrupt_thread = func_kind == m68k_fk_interrupt_thread;
833 /* Only compute the frame once per function.
834 Don't cache information until reload has been completed. */
835 if (current_frame.funcdef_no == current_function_funcdef_no
839 current_frame.size = (get_frame_size () + 3) & -4;
843 /* Interrupt thread does not need to save any register. */
844 if (!interrupt_thread)
845 for (regno = 0; regno < 16; regno++)
846 if (m68k_save_reg (regno, interrupt_handler))
848 mask |= 1 << (regno - D0_REG);
851 current_frame.offset = saved * 4;
852 current_frame.reg_no = saved;
853 current_frame.reg_mask = mask;
855 current_frame.foffset = 0;
857 if (TARGET_HARD_FLOAT)
859 /* Interrupt thread does not need to save any register. */
860 if (!interrupt_thread)
861 for (regno = 16; regno < 24; regno++)
862 if (m68k_save_reg (regno, interrupt_handler))
864 mask |= 1 << (regno - FP0_REG);
867 current_frame.foffset = saved * TARGET_FP_REG_SIZE;
868 current_frame.offset += current_frame.foffset;
870 current_frame.fpu_no = saved;
871 current_frame.fpu_mask = mask;
873 /* Remember what function this frame refers to. */
874 current_frame.funcdef_no = current_function_funcdef_no;
877 /* Worker function for TARGET_CAN_ELIMINATE. */
880 m68k_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
882 return (to == STACK_POINTER_REGNUM ? ! frame_pointer_needed : true);
886 m68k_initial_elimination_offset (int from, int to)
889 /* The arg pointer points 8 bytes before the start of the arguments,
890 as defined by FIRST_PARM_OFFSET. This makes it coincident with the
891 frame pointer in most frames. */
892 argptr_offset = frame_pointer_needed ? 0 : UNITS_PER_WORD;
893 if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
894 return argptr_offset;
896 m68k_compute_frame_layout ();
898 gcc_assert (to == STACK_POINTER_REGNUM);
901 case ARG_POINTER_REGNUM:
902 return current_frame.offset + current_frame.size - argptr_offset;
903 case FRAME_POINTER_REGNUM:
904 return current_frame.offset + current_frame.size;
910 /* Refer to the array `regs_ever_live' to determine which registers
911 to save; `regs_ever_live[I]' is nonzero if register number I
912 is ever used in the function. This function is responsible for
913 knowing which registers should not be saved even if used.
914 Return true if we need to save REGNO. */
917 m68k_save_reg (unsigned int regno, bool interrupt_handler)
919 if (flag_pic && regno == PIC_REG)
921 if (crtl->saves_all_registers)
923 if (crtl->uses_pic_offset_table)
925 /* Reload may introduce constant pool references into a function
926 that thitherto didn't need a PIC register. Note that the test
927 above will not catch that case because we will only set
928 crtl->uses_pic_offset_table when emitting
929 the address reloads. */
930 if (crtl->uses_const_pool)
934 if (crtl->calls_eh_return)
939 unsigned int test = EH_RETURN_DATA_REGNO (i);
940 if (test == INVALID_REGNUM)
947 /* Fixed regs we never touch. */
948 if (fixed_regs[regno])
951 /* The frame pointer (if it is such) is handled specially. */
952 if (regno == FRAME_POINTER_REGNUM && frame_pointer_needed)
955 /* Interrupt handlers must also save call_used_regs
956 if they are live or when calling nested functions. */
957 if (interrupt_handler)
959 if (df_regs_ever_live_p (regno))
962 if (!current_function_is_leaf && call_used_regs[regno])
966 /* Never need to save registers that aren't touched. */
967 if (!df_regs_ever_live_p (regno))
970 /* Otherwise save everything that isn't call-clobbered. */
971 return !call_used_regs[regno];
974 /* Emit RTL for a MOVEM or FMOVEM instruction. BASE + OFFSET represents
975 the lowest memory address. COUNT is the number of registers to be
976 moved, with register REGNO + I being moved if bit I of MASK is set.
977 STORE_P specifies the direction of the move and ADJUST_STACK_P says
978 whether or not this is pre-decrement (if STORE_P) or post-increment
979 (if !STORE_P) operation. */
982 m68k_emit_movem (rtx base, HOST_WIDE_INT offset,
983 unsigned int count, unsigned int regno,
984 unsigned int mask, bool store_p, bool adjust_stack_p)
987 rtx body, addr, src, operands[2];
988 enum machine_mode mode;
990 body = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (adjust_stack_p + count));
991 mode = reg_raw_mode[regno];
996 src = plus_constant (base, (count
997 * GET_MODE_SIZE (mode)
998 * (HOST_WIDE_INT) (store_p ? -1 : 1)));
999 XVECEXP (body, 0, i++) = gen_rtx_SET (VOIDmode, base, src);
1002 for (; mask != 0; mask >>= 1, regno++)
1005 addr = plus_constant (base, offset);
1006 operands[!store_p] = gen_frame_mem (mode, addr);
1007 operands[store_p] = gen_rtx_REG (mode, regno);
1008 XVECEXP (body, 0, i++)
1009 = gen_rtx_SET (VOIDmode, operands[0], operands[1]);
1010 offset += GET_MODE_SIZE (mode);
1012 gcc_assert (i == XVECLEN (body, 0));
1014 return emit_insn (body);
1017 /* Make INSN a frame-related instruction. */
1020 m68k_set_frame_related (rtx insn)
1025 RTX_FRAME_RELATED_P (insn) = 1;
1026 body = PATTERN (insn);
1027 if (GET_CODE (body) == PARALLEL)
1028 for (i = 0; i < XVECLEN (body, 0); i++)
1029 RTX_FRAME_RELATED_P (XVECEXP (body, 0, i)) = 1;
1032 /* Emit RTL for the "prologue" define_expand. */
1035 m68k_expand_prologue (void)
1037 HOST_WIDE_INT fsize_with_regs;
1038 rtx limit, src, dest, insn;
1040 m68k_compute_frame_layout ();
1042 /* If the stack limit is a symbol, we can check it here,
1043 before actually allocating the space. */
1044 if (crtl->limit_stack
1045 && GET_CODE (stack_limit_rtx) == SYMBOL_REF)
1047 limit = plus_constant (stack_limit_rtx, current_frame.size + 4);
1048 if (!LEGITIMATE_CONSTANT_P (limit))
1050 emit_move_insn (gen_rtx_REG (Pmode, D0_REG), limit);
1051 limit = gen_rtx_REG (Pmode, D0_REG);
1053 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode,
1054 stack_pointer_rtx, limit),
1055 stack_pointer_rtx, limit,
1059 fsize_with_regs = current_frame.size;
1060 if (TARGET_COLDFIRE)
1062 /* ColdFire's move multiple instructions do not allow pre-decrement
1063 addressing. Add the size of movem saves to the initial stack
1064 allocation instead. */
1065 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1066 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1067 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1068 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1071 if (frame_pointer_needed)
1073 if (fsize_with_regs == 0 && TUNE_68040)
1075 /* On the 68040, two separate moves are faster than link.w 0. */
1076 dest = gen_frame_mem (Pmode,
1077 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1078 m68k_set_frame_related (emit_move_insn (dest, frame_pointer_rtx));
1079 m68k_set_frame_related (emit_move_insn (frame_pointer_rtx,
1080 stack_pointer_rtx));
1082 else if (fsize_with_regs < 0x8000 || TARGET_68020)
1083 m68k_set_frame_related
1084 (emit_insn (gen_link (frame_pointer_rtx,
1085 GEN_INT (-4 - fsize_with_regs))));
1088 m68k_set_frame_related
1089 (emit_insn (gen_link (frame_pointer_rtx, GEN_INT (-4))));
1090 m68k_set_frame_related
1091 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1093 GEN_INT (-fsize_with_regs))));
1096 /* If the frame pointer is needed, emit a special barrier that
1097 will prevent the scheduler from moving stores to the frame
1098 before the stack adjustment. */
1099 emit_insn (gen_stack_tie (stack_pointer_rtx, frame_pointer_rtx));
1101 else if (fsize_with_regs != 0)
1102 m68k_set_frame_related
1103 (emit_insn (gen_addsi3 (stack_pointer_rtx,
1105 GEN_INT (-fsize_with_regs))));
1107 if (current_frame.fpu_mask)
1109 gcc_assert (current_frame.fpu_no >= MIN_FMOVEM_REGS);
1111 m68k_set_frame_related
1112 (m68k_emit_movem (stack_pointer_rtx,
1113 current_frame.fpu_no * -GET_MODE_SIZE (XFmode),
1114 current_frame.fpu_no, FP0_REG,
1115 current_frame.fpu_mask, true, true));
1120 /* If we're using moveml to save the integer registers,
1121 the stack pointer will point to the bottom of the moveml
1122 save area. Find the stack offset of the first FP register. */
1123 if (current_frame.reg_no < MIN_MOVEM_REGS)
1126 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1127 m68k_set_frame_related
1128 (m68k_emit_movem (stack_pointer_rtx, offset,
1129 current_frame.fpu_no, FP0_REG,
1130 current_frame.fpu_mask, true, false));
1134 /* If the stack limit is not a symbol, check it here.
1135 This has the disadvantage that it may be too late... */
1136 if (crtl->limit_stack)
1138 if (REG_P (stack_limit_rtx))
1139 emit_insn (gen_ctrapsi4 (gen_rtx_LTU (VOIDmode, stack_pointer_rtx,
1141 stack_pointer_rtx, stack_limit_rtx,
1144 else if (GET_CODE (stack_limit_rtx) != SYMBOL_REF)
1145 warning (0, "stack limit expression is not supported");
1148 if (current_frame.reg_no < MIN_MOVEM_REGS)
1150 /* Store each register separately in the same order moveml does. */
1153 for (i = 16; i-- > 0; )
1154 if (current_frame.reg_mask & (1 << i))
1156 src = gen_rtx_REG (SImode, D0_REG + i);
1157 dest = gen_frame_mem (SImode,
1158 gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx));
1159 m68k_set_frame_related (emit_insn (gen_movsi (dest, src)));
1164 if (TARGET_COLDFIRE)
1165 /* The required register save space has already been allocated.
1166 The first register should be stored at (%sp). */
1167 m68k_set_frame_related
1168 (m68k_emit_movem (stack_pointer_rtx, 0,
1169 current_frame.reg_no, D0_REG,
1170 current_frame.reg_mask, true, false));
1172 m68k_set_frame_related
1173 (m68k_emit_movem (stack_pointer_rtx,
1174 current_frame.reg_no * -GET_MODE_SIZE (SImode),
1175 current_frame.reg_no, D0_REG,
1176 current_frame.reg_mask, true, true));
1179 if (!TARGET_SEP_DATA
1180 && crtl->uses_pic_offset_table)
1181 insn = emit_insn (gen_load_got (pic_offset_table_rtx));
1184 /* Return true if a simple (return) instruction is sufficient for this
1185 instruction (i.e. if no epilogue is needed). */
1188 m68k_use_return_insn (void)
1190 if (!reload_completed || frame_pointer_needed || get_frame_size () != 0)
1193 m68k_compute_frame_layout ();
1194 return current_frame.offset == 0;
1197 /* Emit RTL for the "epilogue" or "sibcall_epilogue" define_expand;
1198 SIBCALL_P says which.
1200 The function epilogue should not depend on the current stack pointer!
1201 It should use the frame pointer only, if there is a frame pointer.
1202 This is mandatory because of alloca; we also take advantage of it to
1203 omit stack adjustments before returning. */
1206 m68k_expand_epilogue (bool sibcall_p)
1208 HOST_WIDE_INT fsize, fsize_with_regs;
1209 bool big, restore_from_sp;
1211 m68k_compute_frame_layout ();
1213 fsize = current_frame.size;
1215 restore_from_sp = false;
1217 /* FIXME : current_function_is_leaf below is too strong.
1218 What we really need to know there is if there could be pending
1219 stack adjustment needed at that point. */
1220 restore_from_sp = (!frame_pointer_needed
1221 || (!cfun->calls_alloca
1222 && current_function_is_leaf));
1224 /* fsize_with_regs is the size we need to adjust the sp when
1225 popping the frame. */
1226 fsize_with_regs = fsize;
1227 if (TARGET_COLDFIRE && restore_from_sp)
1229 /* ColdFire's move multiple instructions do not allow post-increment
1230 addressing. Add the size of movem loads to the final deallocation
1232 if (current_frame.reg_no >= MIN_MOVEM_REGS)
1233 fsize_with_regs += current_frame.reg_no * GET_MODE_SIZE (SImode);
1234 if (current_frame.fpu_no >= MIN_FMOVEM_REGS)
1235 fsize_with_regs += current_frame.fpu_no * GET_MODE_SIZE (DFmode);
1238 if (current_frame.offset + fsize >= 0x8000
1240 && (current_frame.reg_mask || current_frame.fpu_mask))
1243 && (current_frame.reg_no >= MIN_MOVEM_REGS
1244 || current_frame.fpu_no >= MIN_FMOVEM_REGS))
1246 /* ColdFire's move multiple instructions do not support the
1247 (d8,Ax,Xi) addressing mode, so we're as well using a normal
1248 stack-based restore. */
1249 emit_move_insn (gen_rtx_REG (Pmode, A1_REG),
1250 GEN_INT (-(current_frame.offset + fsize)));
1251 emit_insn (gen_addsi3 (stack_pointer_rtx,
1252 gen_rtx_REG (Pmode, A1_REG),
1253 frame_pointer_rtx));
1254 restore_from_sp = true;
1258 emit_move_insn (gen_rtx_REG (Pmode, A1_REG), GEN_INT (-fsize));
1264 if (current_frame.reg_no < MIN_MOVEM_REGS)
1266 /* Restore each register separately in the same order moveml does. */
1268 HOST_WIDE_INT offset;
1270 offset = current_frame.offset + fsize;
1271 for (i = 0; i < 16; i++)
1272 if (current_frame.reg_mask & (1 << i))
1278 /* Generate the address -OFFSET(%fp,%a1.l). */
1279 addr = gen_rtx_REG (Pmode, A1_REG);
1280 addr = gen_rtx_PLUS (Pmode, addr, frame_pointer_rtx);
1281 addr = plus_constant (addr, -offset);
1283 else if (restore_from_sp)
1284 addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
1286 addr = plus_constant (frame_pointer_rtx, -offset);
1287 emit_move_insn (gen_rtx_REG (SImode, D0_REG + i),
1288 gen_frame_mem (SImode, addr));
1289 offset -= GET_MODE_SIZE (SImode);
1292 else if (current_frame.reg_mask)
1295 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1296 gen_rtx_REG (Pmode, A1_REG),
1298 -(current_frame.offset + fsize),
1299 current_frame.reg_no, D0_REG,
1300 current_frame.reg_mask, false, false);
1301 else if (restore_from_sp)
1302 m68k_emit_movem (stack_pointer_rtx, 0,
1303 current_frame.reg_no, D0_REG,
1304 current_frame.reg_mask, false,
1307 m68k_emit_movem (frame_pointer_rtx,
1308 -(current_frame.offset + fsize),
1309 current_frame.reg_no, D0_REG,
1310 current_frame.reg_mask, false, false);
1313 if (current_frame.fpu_no > 0)
1316 m68k_emit_movem (gen_rtx_PLUS (Pmode,
1317 gen_rtx_REG (Pmode, A1_REG),
1319 -(current_frame.foffset + fsize),
1320 current_frame.fpu_no, FP0_REG,
1321 current_frame.fpu_mask, false, false);
1322 else if (restore_from_sp)
1324 if (TARGET_COLDFIRE)
1328 /* If we used moveml to restore the integer registers, the
1329 stack pointer will still point to the bottom of the moveml
1330 save area. Find the stack offset of the first FP
1332 if (current_frame.reg_no < MIN_MOVEM_REGS)
1335 offset = current_frame.reg_no * GET_MODE_SIZE (SImode);
1336 m68k_emit_movem (stack_pointer_rtx, offset,
1337 current_frame.fpu_no, FP0_REG,
1338 current_frame.fpu_mask, false, false);
1341 m68k_emit_movem (stack_pointer_rtx, 0,
1342 current_frame.fpu_no, FP0_REG,
1343 current_frame.fpu_mask, false, true);
1346 m68k_emit_movem (frame_pointer_rtx,
1347 -(current_frame.foffset + fsize),
1348 current_frame.fpu_no, FP0_REG,
1349 current_frame.fpu_mask, false, false);
1352 if (frame_pointer_needed)
1353 emit_insn (gen_unlink (frame_pointer_rtx));
1354 else if (fsize_with_regs)
1355 emit_insn (gen_addsi3 (stack_pointer_rtx,
1357 GEN_INT (fsize_with_regs)));
1359 if (crtl->calls_eh_return)
1360 emit_insn (gen_addsi3 (stack_pointer_rtx,
1362 EH_RETURN_STACKADJ_RTX));
1365 emit_jump_insn (gen_rtx_RETURN (VOIDmode));
1368 /* Return true if X is a valid comparison operator for the dbcc
1371 Note it rejects floating point comparison operators.
1372 (In the future we could use Fdbcc).
1374 It also rejects some comparisons when CC_NO_OVERFLOW is set. */
1377 valid_dbcc_comparison_p_2 (rtx x, enum machine_mode mode ATTRIBUTE_UNUSED)
1379 switch (GET_CODE (x))
1381 case EQ: case NE: case GTU: case LTU:
1385 /* Reject some when CC_NO_OVERFLOW is set. This may be over
1387 case GT: case LT: case GE: case LE:
1388 return ! (cc_prev_status.flags & CC_NO_OVERFLOW);
1394 /* Return nonzero if flags are currently in the 68881 flag register. */
1396 flags_in_68881 (void)
1398 /* We could add support for these in the future */
1399 return cc_status.flags & CC_IN_68881;
1402 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL_P. */
1405 m68k_ok_for_sibcall_p (tree decl, tree exp)
1407 enum m68k_function_kind kind;
1409 /* We cannot use sibcalls for nested functions because we use the
1410 static chain register for indirect calls. */
1411 if (CALL_EXPR_STATIC_CHAIN (exp))
1414 kind = m68k_get_function_kind (current_function_decl);
1415 if (kind == m68k_fk_normal_function)
1416 /* We can always sibcall from a normal function, because it's
1417 undefined if it is calling an interrupt function. */
1420 /* Otherwise we can only sibcall if the function kind is known to be
1422 if (decl && m68k_get_function_kind (decl) == kind)
1428 /* Convert X to a legitimate function call memory reference and return the
1432 m68k_legitimize_call_address (rtx x)
1434 gcc_assert (MEM_P (x));
1435 if (call_operand (XEXP (x, 0), VOIDmode))
1437 return replace_equiv_address (x, force_reg (Pmode, XEXP (x, 0)));
1440 /* Likewise for sibling calls. */
1443 m68k_legitimize_sibcall_address (rtx x)
1445 gcc_assert (MEM_P (x));
1446 if (sibcall_operand (XEXP (x, 0), VOIDmode))
1449 emit_move_insn (gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM), XEXP (x, 0));
1450 return replace_equiv_address (x, gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM));
1453 /* Convert X to a legitimate address and return it if successful. Otherwise
1456 For the 68000, we handle X+REG by loading X into a register R and
1457 using R+REG. R will go in an address reg and indexing will be used.
1458 However, if REG is a broken-out memory address or multiplication,
1459 nothing needs to be done because REG can certainly go in an address reg. */
1462 m68k_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
1464 if (m68k_tls_symbol_p (x))
1465 return m68k_legitimize_tls_address (x);
1467 if (GET_CODE (x) == PLUS)
1469 int ch = (x) != (oldx);
1472 #define COPY_ONCE(Y) if (!copied) { Y = copy_rtx (Y); copied = ch = 1; }
1474 if (GET_CODE (XEXP (x, 0)) == MULT)
1477 XEXP (x, 0) = force_operand (XEXP (x, 0), 0);
1479 if (GET_CODE (XEXP (x, 1)) == MULT)
1482 XEXP (x, 1) = force_operand (XEXP (x, 1), 0);
1486 if (GET_CODE (XEXP (x, 1)) == REG
1487 && GET_CODE (XEXP (x, 0)) == REG)
1489 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1492 x = force_operand (x, 0);
1496 if (memory_address_p (mode, x))
1499 if (GET_CODE (XEXP (x, 0)) == REG
1500 || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
1501 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
1502 && GET_MODE (XEXP (XEXP (x, 0), 0)) == HImode))
1504 rtx temp = gen_reg_rtx (Pmode);
1505 rtx val = force_operand (XEXP (x, 1), 0);
1506 emit_move_insn (temp, val);
1509 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1510 && GET_CODE (XEXP (x, 0)) == REG)
1511 x = force_operand (x, 0);
1513 else if (GET_CODE (XEXP (x, 1)) == REG
1514 || (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
1515 && GET_CODE (XEXP (XEXP (x, 1), 0)) == REG
1516 && GET_MODE (XEXP (XEXP (x, 1), 0)) == HImode))
1518 rtx temp = gen_reg_rtx (Pmode);
1519 rtx val = force_operand (XEXP (x, 0), 0);
1520 emit_move_insn (temp, val);
1523 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT
1524 && GET_CODE (XEXP (x, 1)) == REG)
1525 x = force_operand (x, 0);
1533 /* Output a dbCC; jCC sequence. Note we do not handle the
1534 floating point version of this sequence (Fdbcc). We also
1535 do not handle alternative conditions when CC_NO_OVERFLOW is
1536 set. It is assumed that valid_dbcc_comparison_p and flags_in_68881 will
1537 kick those out before we get here. */
1540 output_dbcc_and_branch (rtx *operands)
1542 switch (GET_CODE (operands[3]))
1545 output_asm_insn ("dbeq %0,%l1\n\tjeq %l2", operands);
1549 output_asm_insn ("dbne %0,%l1\n\tjne %l2", operands);
1553 output_asm_insn ("dbgt %0,%l1\n\tjgt %l2", operands);
1557 output_asm_insn ("dbhi %0,%l1\n\tjhi %l2", operands);
1561 output_asm_insn ("dblt %0,%l1\n\tjlt %l2", operands);
1565 output_asm_insn ("dbcs %0,%l1\n\tjcs %l2", operands);
1569 output_asm_insn ("dbge %0,%l1\n\tjge %l2", operands);
1573 output_asm_insn ("dbcc %0,%l1\n\tjcc %l2", operands);
1577 output_asm_insn ("dble %0,%l1\n\tjle %l2", operands);
1581 output_asm_insn ("dbls %0,%l1\n\tjls %l2", operands);
1588 /* If the decrement is to be done in SImode, then we have
1589 to compensate for the fact that dbcc decrements in HImode. */
1590 switch (GET_MODE (operands[0]))
1593 output_asm_insn ("clr%.w %0\n\tsubq%.l #1,%0\n\tjpl %l1", operands);
1605 output_scc_di (rtx op, rtx operand1, rtx operand2, rtx dest)
1608 enum rtx_code op_code = GET_CODE (op);
1610 /* This does not produce a useful cc. */
1613 /* The m68k cmp.l instruction requires operand1 to be a reg as used
1614 below. Swap the operands and change the op if these requirements
1615 are not fulfilled. */
1616 if (GET_CODE (operand2) == REG && GET_CODE (operand1) != REG)
1620 operand1 = operand2;
1622 op_code = swap_condition (op_code);
1624 loperands[0] = operand1;
1625 if (GET_CODE (operand1) == REG)
1626 loperands[1] = gen_rtx_REG (SImode, REGNO (operand1) + 1);
1628 loperands[1] = adjust_address (operand1, SImode, 4);
1629 if (operand2 != const0_rtx)
1631 loperands[2] = operand2;
1632 if (GET_CODE (operand2) == REG)
1633 loperands[3] = gen_rtx_REG (SImode, REGNO (operand2) + 1);
1635 loperands[3] = adjust_address (operand2, SImode, 4);
1637 loperands[4] = gen_label_rtx ();
1638 if (operand2 != const0_rtx)
1639 output_asm_insn ("cmp%.l %2,%0\n\tjne %l4\n\tcmp%.l %3,%1", loperands);
1642 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[0]))
1643 output_asm_insn ("tst%.l %0", loperands);
1645 output_asm_insn ("cmp%.w #0,%0", loperands);
1647 output_asm_insn ("jne %l4", loperands);
1649 if (TARGET_68020 || TARGET_COLDFIRE || ! ADDRESS_REG_P (loperands[1]))
1650 output_asm_insn ("tst%.l %1", loperands);
1652 output_asm_insn ("cmp%.w #0,%1", loperands);
1655 loperands[5] = dest;
1660 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1661 CODE_LABEL_NUMBER (loperands[4]));
1662 output_asm_insn ("seq %5", loperands);
1666 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1667 CODE_LABEL_NUMBER (loperands[4]));
1668 output_asm_insn ("sne %5", loperands);
1672 loperands[6] = gen_label_rtx ();
1673 output_asm_insn ("shi %5\n\tjra %l6", loperands);
1674 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1675 CODE_LABEL_NUMBER (loperands[4]));
1676 output_asm_insn ("sgt %5", loperands);
1677 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1678 CODE_LABEL_NUMBER (loperands[6]));
1682 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1683 CODE_LABEL_NUMBER (loperands[4]));
1684 output_asm_insn ("shi %5", loperands);
1688 loperands[6] = gen_label_rtx ();
1689 output_asm_insn ("scs %5\n\tjra %l6", loperands);
1690 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1691 CODE_LABEL_NUMBER (loperands[4]));
1692 output_asm_insn ("slt %5", loperands);
1693 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1694 CODE_LABEL_NUMBER (loperands[6]));
1698 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1699 CODE_LABEL_NUMBER (loperands[4]));
1700 output_asm_insn ("scs %5", loperands);
1704 loperands[6] = gen_label_rtx ();
1705 output_asm_insn ("scc %5\n\tjra %l6", loperands);
1706 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1707 CODE_LABEL_NUMBER (loperands[4]));
1708 output_asm_insn ("sge %5", loperands);
1709 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1710 CODE_LABEL_NUMBER (loperands[6]));
1714 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1715 CODE_LABEL_NUMBER (loperands[4]));
1716 output_asm_insn ("scc %5", loperands);
1720 loperands[6] = gen_label_rtx ();
1721 output_asm_insn ("sls %5\n\tjra %l6", loperands);
1722 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1723 CODE_LABEL_NUMBER (loperands[4]));
1724 output_asm_insn ("sle %5", loperands);
1725 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1726 CODE_LABEL_NUMBER (loperands[6]));
1730 (*targetm.asm_out.internal_label) (asm_out_file, "L",
1731 CODE_LABEL_NUMBER (loperands[4]));
1732 output_asm_insn ("sls %5", loperands);
1742 output_btst (rtx *operands, rtx countop, rtx dataop, rtx insn, int signpos)
1744 operands[0] = countop;
1745 operands[1] = dataop;
1747 if (GET_CODE (countop) == CONST_INT)
1749 register int count = INTVAL (countop);
1750 /* If COUNT is bigger than size of storage unit in use,
1751 advance to the containing unit of same size. */
1752 if (count > signpos)
1754 int offset = (count & ~signpos) / 8;
1755 count = count & signpos;
1756 operands[1] = dataop = adjust_address (dataop, QImode, offset);
1758 if (count == signpos)
1759 cc_status.flags = CC_NOT_POSITIVE | CC_Z_IN_NOT_N;
1761 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N;
1763 /* These three statements used to use next_insns_test_no...
1764 but it appears that this should do the same job. */
1766 && next_insn_tests_no_inequality (insn))
1769 && next_insn_tests_no_inequality (insn))
1772 && next_insn_tests_no_inequality (insn))
1774 /* Try to use `movew to ccr' followed by the appropriate branch insn.
1775 On some m68k variants unfortunately that's slower than btst.
1776 On 68000 and higher, that should also work for all HImode operands. */
1777 if (TUNE_CPU32 || TARGET_COLDFIRE || optimize_size)
1779 if (count == 3 && DATA_REG_P (operands[1])
1780 && next_insn_tests_no_inequality (insn))
1782 cc_status.flags = CC_NOT_NEGATIVE | CC_Z_IN_NOT_N | CC_NO_OVERFLOW;
1783 return "move%.w %1,%%ccr";
1785 if (count == 2 && DATA_REG_P (operands[1])
1786 && next_insn_tests_no_inequality (insn))
1788 cc_status.flags = CC_NOT_NEGATIVE | CC_INVERTED | CC_NO_OVERFLOW;
1789 return "move%.w %1,%%ccr";
1791 /* count == 1 followed by bvc/bvs and
1792 count == 0 followed by bcc/bcs are also possible, but need
1793 m68k-specific CC_Z_IN_NOT_V and CC_Z_IN_NOT_C flags. */
1796 cc_status.flags = CC_NOT_NEGATIVE;
1798 return "btst %0,%1";
1801 /* Return true if X is a legitimate base register. STRICT_P says
1802 whether we need strict checking. */
1805 m68k_legitimate_base_reg_p (rtx x, bool strict_p)
1807 /* Allow SUBREG everywhere we allow REG. This results in better code. */
1808 if (!strict_p && GET_CODE (x) == SUBREG)
1813 ? REGNO_OK_FOR_BASE_P (REGNO (x))
1814 : REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x))));
1817 /* Return true if X is a legitimate index register. STRICT_P says
1818 whether we need strict checking. */
1821 m68k_legitimate_index_reg_p (rtx x, bool strict_p)
1823 if (!strict_p && GET_CODE (x) == SUBREG)
1828 ? REGNO_OK_FOR_INDEX_P (REGNO (x))
1829 : REGNO_OK_FOR_INDEX_NONSTRICT_P (REGNO (x))));
1832 /* Return true if X is a legitimate index expression for a (d8,An,Xn) or
1833 (bd,An,Xn) addressing mode. Fill in the INDEX and SCALE fields of
1834 ADDRESS if so. STRICT_P says whether we need strict checking. */
1837 m68k_decompose_index (rtx x, bool strict_p, struct m68k_address *address)
1841 /* Check for a scale factor. */
1843 if ((TARGET_68020 || TARGET_COLDFIRE)
1844 && GET_CODE (x) == MULT
1845 && GET_CODE (XEXP (x, 1)) == CONST_INT
1846 && (INTVAL (XEXP (x, 1)) == 2
1847 || INTVAL (XEXP (x, 1)) == 4
1848 || (INTVAL (XEXP (x, 1)) == 8
1849 && (TARGET_COLDFIRE_FPU || !TARGET_COLDFIRE))))
1851 scale = INTVAL (XEXP (x, 1));
1855 /* Check for a word extension. */
1856 if (!TARGET_COLDFIRE
1857 && GET_CODE (x) == SIGN_EXTEND
1858 && GET_MODE (XEXP (x, 0)) == HImode)
1861 if (m68k_legitimate_index_reg_p (x, strict_p))
1863 address->scale = scale;
1871 /* Return true if X is an illegitimate symbolic constant. */
1874 m68k_illegitimate_symbolic_constant_p (rtx x)
1878 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
1880 split_const (x, &base, &offset);
1881 if (GET_CODE (base) == SYMBOL_REF
1882 && !offset_within_block_p (base, INTVAL (offset)))
1885 return m68k_tls_reference_p (x, false);
1888 /* Return true if X is a legitimate constant address that can reach
1889 bytes in the range [X, X + REACH). STRICT_P says whether we need
1893 m68k_legitimate_constant_address_p (rtx x, unsigned int reach, bool strict_p)
1897 if (!CONSTANT_ADDRESS_P (x))
1901 && !(strict_p && TARGET_PCREL)
1902 && symbolic_operand (x, VOIDmode))
1905 if (M68K_OFFSETS_MUST_BE_WITHIN_SECTIONS_P && reach > 1)
1907 split_const (x, &base, &offset);
1908 if (GET_CODE (base) == SYMBOL_REF
1909 && !offset_within_block_p (base, INTVAL (offset) + reach - 1))
1913 return !m68k_tls_reference_p (x, false);
1916 /* Return true if X is a LABEL_REF for a jump table. Assume that unplaced
1917 labels will become jump tables. */
1920 m68k_jump_table_ref_p (rtx x)
1922 if (GET_CODE (x) != LABEL_REF)
1926 if (!NEXT_INSN (x) && !PREV_INSN (x))
1929 x = next_nonnote_insn (x);
1930 return x && JUMP_TABLE_DATA_P (x);
1933 /* Return true if X is a legitimate address for values of mode MODE.
1934 STRICT_P says whether strict checking is needed. If the address
1935 is valid, describe its components in *ADDRESS. */
1938 m68k_decompose_address (enum machine_mode mode, rtx x,
1939 bool strict_p, struct m68k_address *address)
1943 memset (address, 0, sizeof (*address));
1945 if (mode == BLKmode)
1948 reach = GET_MODE_SIZE (mode);
1950 /* Check for (An) (mode 2). */
1951 if (m68k_legitimate_base_reg_p (x, strict_p))
1957 /* Check for -(An) and (An)+ (modes 3 and 4). */
1958 if ((GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_INC)
1959 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1961 address->code = GET_CODE (x);
1962 address->base = XEXP (x, 0);
1966 /* Check for (d16,An) (mode 5). */
1967 if (GET_CODE (x) == PLUS
1968 && GET_CODE (XEXP (x, 1)) == CONST_INT
1969 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x8000, 0x8000 - reach)
1970 && m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p))
1972 address->base = XEXP (x, 0);
1973 address->offset = XEXP (x, 1);
1977 /* Check for GOT loads. These are (bd,An,Xn) addresses if
1978 TARGET_68020 && flag_pic == 2, otherwise they are (d16,An)
1980 if (GET_CODE (x) == PLUS
1981 && XEXP (x, 0) == pic_offset_table_rtx)
1983 /* As we are processing a PLUS, do not unwrap RELOC32 symbols --
1984 they are invalid in this context. */
1985 if (m68k_unwrap_symbol (XEXP (x, 1), false) != XEXP (x, 1))
1987 address->base = XEXP (x, 0);
1988 address->offset = XEXP (x, 1);
1993 /* The ColdFire FPU only accepts addressing modes 2-5. */
1994 if (TARGET_COLDFIRE_FPU && GET_MODE_CLASS (mode) == MODE_FLOAT)
1997 /* Check for (xxx).w and (xxx).l. Also, in the TARGET_PCREL case,
1998 check for (d16,PC) or (bd,PC,Xn) with a suppressed index register.
1999 All these modes are variations of mode 7. */
2000 if (m68k_legitimate_constant_address_p (x, reach, strict_p))
2002 address->offset = x;
2006 /* Check for (d8,PC,Xn), a mode 7 form. This case is needed for
2009 ??? do_tablejump creates these addresses before placing the target
2010 label, so we have to assume that unplaced labels are jump table
2011 references. It seems unlikely that we would ever generate indexed
2012 accesses to unplaced labels in other cases. */
2013 if (GET_CODE (x) == PLUS
2014 && m68k_jump_table_ref_p (XEXP (x, 1))
2015 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2017 address->offset = XEXP (x, 1);
2021 /* Everything hereafter deals with (d8,An,Xn.SIZE*SCALE) or
2022 (bd,An,Xn.SIZE*SCALE) addresses. */
2026 /* Check for a nonzero base displacement. */
2027 if (GET_CODE (x) == PLUS
2028 && m68k_legitimate_constant_address_p (XEXP (x, 1), reach, strict_p))
2030 address->offset = XEXP (x, 1);
2034 /* Check for a suppressed index register. */
2035 if (m68k_legitimate_base_reg_p (x, strict_p))
2041 /* Check for a suppressed base register. Do not allow this case
2042 for non-symbolic offsets as it effectively gives gcc freedom
2043 to treat data registers as base registers, which can generate
2046 && symbolic_operand (address->offset, VOIDmode)
2047 && m68k_decompose_index (x, strict_p, address))
2052 /* Check for a nonzero base displacement. */
2053 if (GET_CODE (x) == PLUS
2054 && GET_CODE (XEXP (x, 1)) == CONST_INT
2055 && IN_RANGE (INTVAL (XEXP (x, 1)), -0x80, 0x80 - reach))
2057 address->offset = XEXP (x, 1);
2062 /* We now expect the sum of a base and an index. */
2063 if (GET_CODE (x) == PLUS)
2065 if (m68k_legitimate_base_reg_p (XEXP (x, 0), strict_p)
2066 && m68k_decompose_index (XEXP (x, 1), strict_p, address))
2068 address->base = XEXP (x, 0);
2072 if (m68k_legitimate_base_reg_p (XEXP (x, 1), strict_p)
2073 && m68k_decompose_index (XEXP (x, 0), strict_p, address))
2075 address->base = XEXP (x, 1);
2082 /* Return true if X is a legitimate address for values of mode MODE.
2083 STRICT_P says whether strict checking is needed. */
2086 m68k_legitimate_address_p (enum machine_mode mode, rtx x, bool strict_p)
2088 struct m68k_address address;
2090 return m68k_decompose_address (mode, x, strict_p, &address);
2093 /* Return true if X is a memory, describing its address in ADDRESS if so.
2094 Apply strict checking if called during or after reload. */
2097 m68k_legitimate_mem_p (rtx x, struct m68k_address *address)
2100 && m68k_decompose_address (GET_MODE (x), XEXP (x, 0),
2101 reload_in_progress || reload_completed,
2105 /* Return true if X matches the 'Q' constraint. It must be a memory
2106 with a base address and no constant offset or index. */
2109 m68k_matches_q_p (rtx x)
2111 struct m68k_address address;
2113 return (m68k_legitimate_mem_p (x, &address)
2114 && address.code == UNKNOWN
2120 /* Return true if X matches the 'U' constraint. It must be a base address
2121 with a constant offset and no index. */
2124 m68k_matches_u_p (rtx x)
2126 struct m68k_address address;
2128 return (m68k_legitimate_mem_p (x, &address)
2129 && address.code == UNKNOWN
2135 /* Return GOT pointer. */
2140 if (pic_offset_table_rtx == NULL_RTX)
2141 pic_offset_table_rtx = gen_rtx_REG (Pmode, PIC_REG);
2143 crtl->uses_pic_offset_table = 1;
2145 return pic_offset_table_rtx;
2148 /* M68K relocations, used to distinguish GOT and TLS relocations in UNSPEC
2150 enum m68k_reloc { RELOC_GOT, RELOC_TLSGD, RELOC_TLSLDM, RELOC_TLSLDO,
2151 RELOC_TLSIE, RELOC_TLSLE };
2153 #define TLS_RELOC_P(RELOC) ((RELOC) != RELOC_GOT)
2155 /* Wrap symbol X into unspec representing relocation RELOC.
2156 BASE_REG - register that should be added to the result.
2157 TEMP_REG - if non-null, temporary register. */
2160 m68k_wrap_symbol (rtx x, enum m68k_reloc reloc, rtx base_reg, rtx temp_reg)
2164 use_x_p = (base_reg == pic_offset_table_rtx) ? TARGET_XGOT : TARGET_XTLS;
2166 if (TARGET_COLDFIRE && use_x_p)
2167 /* When compiling with -mx{got, tls} switch the code will look like this:
2169 move.l <X>@<RELOC>,<TEMP_REG>
2170 add.l <BASE_REG>,<TEMP_REG> */
2172 /* Wrap X in UNSPEC_??? to tip m68k_output_addr_const_extra
2173 to put @RELOC after reference. */
2174 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2176 x = gen_rtx_CONST (Pmode, x);
2178 if (temp_reg == NULL)
2180 gcc_assert (can_create_pseudo_p ());
2181 temp_reg = gen_reg_rtx (Pmode);
2184 emit_move_insn (temp_reg, x);
2185 emit_insn (gen_addsi3 (temp_reg, temp_reg, base_reg));
2190 x = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x, GEN_INT (reloc)),
2192 x = gen_rtx_CONST (Pmode, x);
2194 x = gen_rtx_PLUS (Pmode, base_reg, x);
2200 /* Helper for m68k_unwrap_symbol.
2201 Also, if unwrapping was successful (that is if (ORIG != <return value>)),
2202 sets *RELOC_PTR to relocation type for the symbol. */
2205 m68k_unwrap_symbol_1 (rtx orig, bool unwrap_reloc32_p,
2206 enum m68k_reloc *reloc_ptr)
2208 if (GET_CODE (orig) == CONST)
2211 enum m68k_reloc dummy;
2215 if (reloc_ptr == NULL)
2218 /* Handle an addend. */
2219 if ((GET_CODE (x) == PLUS || GET_CODE (x) == MINUS)
2220 && CONST_INT_P (XEXP (x, 1)))
2223 if (GET_CODE (x) == UNSPEC)
2225 switch (XINT (x, 1))
2227 case UNSPEC_RELOC16:
2228 orig = XVECEXP (x, 0, 0);
2229 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2232 case UNSPEC_RELOC32:
2233 if (unwrap_reloc32_p)
2235 orig = XVECEXP (x, 0, 0);
2236 *reloc_ptr = (enum m68k_reloc) INTVAL (XVECEXP (x, 0, 1));
2249 /* Unwrap symbol from UNSPEC_RELOC16 and, if unwrap_reloc32_p,
2250 UNSPEC_RELOC32 wrappers. */
2253 m68k_unwrap_symbol (rtx orig, bool unwrap_reloc32_p)
2255 return m68k_unwrap_symbol_1 (orig, unwrap_reloc32_p, NULL);
2258 /* Helper for m68k_final_prescan_insn. */
2261 m68k_final_prescan_insn_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
2265 if (m68k_unwrap_symbol (x, true) != x)
2266 /* For rationale of the below, see comment in m68k_final_prescan_insn. */
2270 gcc_assert (GET_CODE (x) == CONST);
2273 if (GET_CODE (plus) == PLUS || GET_CODE (plus) == MINUS)
2278 unspec = XEXP (plus, 0);
2279 gcc_assert (GET_CODE (unspec) == UNSPEC);
2280 addend = XEXP (plus, 1);
2281 gcc_assert (CONST_INT_P (addend));
2283 /* We now have all the pieces, rearrange them. */
2285 /* Move symbol to plus. */
2286 XEXP (plus, 0) = XVECEXP (unspec, 0, 0);
2288 /* Move plus inside unspec. */
2289 XVECEXP (unspec, 0, 0) = plus;
2291 /* Move unspec to top level of const. */
2292 XEXP (x, 0) = unspec;
2301 /* Prescan insn before outputing assembler for it. */
2304 m68k_final_prescan_insn (rtx insn ATTRIBUTE_UNUSED,
2305 rtx *operands, int n_operands)
2309 /* Combine and, possibly, other optimizations may do good job
2311 (const (unspec [(symbol)]))
2313 (const (plus (unspec [(symbol)])
2315 The problem with this is emitting @TLS or @GOT decorations.
2316 The decoration is emitted when processing (unspec), so the
2317 result would be "#symbol@TLSLE+N" instead of "#symbol+N@TLSLE".
2319 It seems that the easiest solution to this is to convert such
2321 (const (unspec [(plus (symbol)
2323 Note, that the top level of operand remains intact, so we don't have
2324 to patch up anything outside of the operand. */
2326 for (i = 0; i < n_operands; ++i)
2332 for_each_rtx (&op, m68k_final_prescan_insn_1, NULL);
2336 /* Move X to a register and add REG_EQUAL note pointing to ORIG.
2337 If REG is non-null, use it; generate new pseudo otherwise. */
2340 m68k_move_to_reg (rtx x, rtx orig, rtx reg)
2344 if (reg == NULL_RTX)
2346 gcc_assert (can_create_pseudo_p ());
2347 reg = gen_reg_rtx (Pmode);
2350 insn = emit_move_insn (reg, x);
2351 /* Put a REG_EQUAL note on this insn, so that it can be optimized
2353 set_unique_reg_note (insn, REG_EQUAL, orig);
2358 /* Does the same as m68k_wrap_symbol, but returns a memory reference to
2362 m68k_wrap_symbol_into_got_ref (rtx x, enum m68k_reloc reloc, rtx temp_reg)
2364 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), temp_reg);
2366 x = gen_rtx_MEM (Pmode, x);
2367 MEM_READONLY_P (x) = 1;
2372 /* Legitimize PIC addresses. If the address is already
2373 position-independent, we return ORIG. Newly generated
2374 position-independent addresses go to REG. If we need more
2375 than one register, we lose.
2377 An address is legitimized by making an indirect reference
2378 through the Global Offset Table with the name of the symbol
2381 The assembler and linker are responsible for placing the
2382 address of the symbol in the GOT. The function prologue
2383 is responsible for initializing a5 to the starting address
2386 The assembler is also responsible for translating a symbol name
2387 into a constant displacement from the start of the GOT.
2389 A quick example may make things a little clearer:
2391 When not generating PIC code to store the value 12345 into _foo
2392 we would generate the following code:
2396 When generating PIC two transformations are made. First, the compiler
2397 loads the address of foo into a register. So the first transformation makes:
2402 The code in movsi will intercept the lea instruction and call this
2403 routine which will transform the instructions into:
2405 movel a5@(_foo:w), a0
2409 That (in a nutshell) is how *all* symbol and label references are
2413 legitimize_pic_address (rtx orig, enum machine_mode mode ATTRIBUTE_UNUSED,
2418 /* First handle a simple SYMBOL_REF or LABEL_REF */
2419 if (GET_CODE (orig) == SYMBOL_REF || GET_CODE (orig) == LABEL_REF)
2423 pic_ref = m68k_wrap_symbol_into_got_ref (orig, RELOC_GOT, reg);
2424 pic_ref = m68k_move_to_reg (pic_ref, orig, reg);
2426 else if (GET_CODE (orig) == CONST)
2430 /* Make sure this has not already been legitimized. */
2431 if (m68k_unwrap_symbol (orig, true) != orig)
2436 /* legitimize both operands of the PLUS */
2437 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
2439 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
2440 orig = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
2441 base == reg ? 0 : reg);
2443 if (GET_CODE (orig) == CONST_INT)
2444 pic_ref = plus_constant (base, INTVAL (orig));
2446 pic_ref = gen_rtx_PLUS (Pmode, base, orig);
2452 /* The __tls_get_addr symbol. */
2453 static GTY(()) rtx m68k_tls_get_addr;
2455 /* Return SYMBOL_REF for __tls_get_addr. */
2458 m68k_get_tls_get_addr (void)
2460 if (m68k_tls_get_addr == NULL_RTX)
2461 m68k_tls_get_addr = init_one_libfunc ("__tls_get_addr");
2463 return m68k_tls_get_addr;
2466 /* Return libcall result in A0 instead of usual D0. */
2467 static bool m68k_libcall_value_in_a0_p = false;
2469 /* Emit instruction sequence that calls __tls_get_addr. X is
2470 the TLS symbol we are referencing and RELOC is the symbol type to use
2471 (either TLSGD or TLSLDM). EQV is the REG_EQUAL note for the sequence
2472 emitted. A pseudo register with result of __tls_get_addr call is
2476 m68k_call_tls_get_addr (rtx x, rtx eqv, enum m68k_reloc reloc)
2482 /* Emit the call sequence. */
2485 /* FIXME: Unfortunately, emit_library_call_value does not
2486 consider (plus (%a5) (const (unspec))) to be a good enough
2487 operand for push, so it forces it into a register. The bad
2488 thing about this is that combiner, due to copy propagation and other
2489 optimizations, sometimes can not later fix this. As a consequence,
2490 additional register may be allocated resulting in a spill.
2491 For reference, see args processing loops in
2492 calls.c:emit_library_call_value_1.
2493 For testcase, see gcc.target/m68k/tls-{gd, ld}.c */
2494 x = m68k_wrap_symbol (x, reloc, m68k_get_gp (), NULL_RTX);
2496 /* __tls_get_addr() is not a libcall, but emitting a libcall_value
2497 is the simpliest way of generating a call. The difference between
2498 __tls_get_addr() and libcall is that the result is returned in D0
2499 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2500 which temporarily switches returning the result to A0. */
2502 m68k_libcall_value_in_a0_p = true;
2503 a0 = emit_library_call_value (m68k_get_tls_get_addr (), NULL_RTX, LCT_PURE,
2504 Pmode, 1, x, Pmode);
2505 m68k_libcall_value_in_a0_p = false;
2507 insns = get_insns ();
2510 gcc_assert (can_create_pseudo_p ());
2511 dest = gen_reg_rtx (Pmode);
2512 emit_libcall_block (insns, dest, a0, eqv);
2517 /* The __tls_get_addr symbol. */
2518 static GTY(()) rtx m68k_read_tp;
2520 /* Return SYMBOL_REF for __m68k_read_tp. */
2523 m68k_get_m68k_read_tp (void)
2525 if (m68k_read_tp == NULL_RTX)
2526 m68k_read_tp = init_one_libfunc ("__m68k_read_tp");
2528 return m68k_read_tp;
2531 /* Emit instruction sequence that calls __m68k_read_tp.
2532 A pseudo register with result of __m68k_read_tp call is returned. */
2535 m68k_call_m68k_read_tp (void)
2544 /* __m68k_read_tp() is not a libcall, but emitting a libcall_value
2545 is the simpliest way of generating a call. The difference between
2546 __m68k_read_tp() and libcall is that the result is returned in D0
2547 instead of A0. To workaround this, we use m68k_libcall_value_in_a0_p
2548 which temporarily switches returning the result to A0. */
2550 /* Emit the call sequence. */
2551 m68k_libcall_value_in_a0_p = true;
2552 a0 = emit_library_call_value (m68k_get_m68k_read_tp (), NULL_RTX, LCT_PURE,
2554 m68k_libcall_value_in_a0_p = false;
2555 insns = get_insns ();
2558 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2559 share the m68k_read_tp result with other IE/LE model accesses. */
2560 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx), UNSPEC_RELOC32);
2562 gcc_assert (can_create_pseudo_p ());
2563 dest = gen_reg_rtx (Pmode);
2564 emit_libcall_block (insns, dest, a0, eqv);
2569 /* Return a legitimized address for accessing TLS SYMBOL_REF X.
2570 For explanations on instructions sequences see TLS/NPTL ABI for m68k and
2574 m68k_legitimize_tls_address (rtx orig)
2576 switch (SYMBOL_REF_TLS_MODEL (orig))
2578 case TLS_MODEL_GLOBAL_DYNAMIC:
2579 orig = m68k_call_tls_get_addr (orig, orig, RELOC_TLSGD);
2582 case TLS_MODEL_LOCAL_DYNAMIC:
2588 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2589 share the LDM result with other LD model accesses. */
2590 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2593 a0 = m68k_call_tls_get_addr (orig, eqv, RELOC_TLSLDM);
2595 x = m68k_wrap_symbol (orig, RELOC_TLSLDO, a0, NULL_RTX);
2597 if (can_create_pseudo_p ())
2598 x = m68k_move_to_reg (x, orig, NULL_RTX);
2604 case TLS_MODEL_INITIAL_EXEC:
2609 a0 = m68k_call_m68k_read_tp ();
2611 x = m68k_wrap_symbol_into_got_ref (orig, RELOC_TLSIE, NULL_RTX);
2612 x = gen_rtx_PLUS (Pmode, x, a0);
2614 if (can_create_pseudo_p ())
2615 x = m68k_move_to_reg (x, orig, NULL_RTX);
2621 case TLS_MODEL_LOCAL_EXEC:
2626 a0 = m68k_call_m68k_read_tp ();
2628 x = m68k_wrap_symbol (orig, RELOC_TLSLE, a0, NULL_RTX);
2630 if (can_create_pseudo_p ())
2631 x = m68k_move_to_reg (x, orig, NULL_RTX);
2644 /* Return true if X is a TLS symbol. */
2647 m68k_tls_symbol_p (rtx x)
2649 if (!TARGET_HAVE_TLS)
2652 if (GET_CODE (x) != SYMBOL_REF)
2655 return SYMBOL_REF_TLS_MODEL (x) != 0;
2658 /* Helper for m68k_tls_referenced_p. */
2661 m68k_tls_reference_p_1 (rtx *x_ptr, void *data ATTRIBUTE_UNUSED)
2663 /* Note: this is not the same as m68k_tls_symbol_p. */
2664 if (GET_CODE (*x_ptr) == SYMBOL_REF)
2665 return SYMBOL_REF_TLS_MODEL (*x_ptr) != 0 ? 1 : 0;
2667 /* Don't recurse into legitimate TLS references. */
2668 if (m68k_tls_reference_p (*x_ptr, true))
2674 /* If !LEGITIMATE_P, return true if X is a TLS symbol reference,
2675 though illegitimate one.
2676 If LEGITIMATE_P, return true if X is a legitimate TLS symbol reference. */
2679 m68k_tls_reference_p (rtx x, bool legitimate_p)
2681 if (!TARGET_HAVE_TLS)
2685 return for_each_rtx (&x, m68k_tls_reference_p_1, NULL) == 1 ? true : false;
2688 enum m68k_reloc reloc = RELOC_GOT;
2690 return (m68k_unwrap_symbol_1 (x, true, &reloc) != x
2691 && TLS_RELOC_P (reloc));
2697 #define USE_MOVQ(i) ((unsigned) ((i) + 128) <= 255)
2699 /* Return the type of move that should be used for integer I. */
2702 m68k_const_method (HOST_WIDE_INT i)
2709 /* The ColdFire doesn't have byte or word operations. */
2710 /* FIXME: This may not be useful for the m68060 either. */
2711 if (!TARGET_COLDFIRE)
2713 /* if -256 < N < 256 but N is not in range for a moveq
2714 N^ff will be, so use moveq #N^ff, dreg; not.b dreg. */
2715 if (USE_MOVQ (i ^ 0xff))
2717 /* Likewise, try with not.w */
2718 if (USE_MOVQ (i ^ 0xffff))
2720 /* This is the only value where neg.w is useful */
2725 /* Try also with swap. */
2727 if (USE_MOVQ ((u >> 16) | (u << 16)))
2732 /* Try using MVZ/MVS with an immediate value to load constants. */
2733 if (i >= 0 && i <= 65535)
2735 if (i >= -32768 && i <= 32767)
2739 /* Otherwise, use move.l */
2743 /* Return the cost of moving constant I into a data register. */
2746 const_int_cost (HOST_WIDE_INT i)
2748 switch (m68k_const_method (i))
2751 /* Constants between -128 and 127 are cheap due to moveq. */
2759 /* Constants easily generated by moveq + not.b/not.w/neg.w/swap. */
2769 m68k_rtx_costs (rtx x, int code, int outer_code, int *total,
2770 bool speed ATTRIBUTE_UNUSED)
2775 /* Constant zero is super cheap due to clr instruction. */
2776 if (x == const0_rtx)
2779 *total = const_int_cost (INTVAL (x));
2789 /* Make 0.0 cheaper than other floating constants to
2790 encourage creating tstsf and tstdf insns. */
2791 if (outer_code == COMPARE
2792 && (x == CONST0_RTX (SFmode) || x == CONST0_RTX (DFmode)))
2798 /* These are vaguely right for a 68020. */
2799 /* The costs for long multiply have been adjusted to work properly
2800 in synth_mult on the 68020, relative to an average of the time
2801 for add and the time for shift, taking away a little more because
2802 sometimes move insns are needed. */
2803 /* div?.w is relatively cheaper on 68000 counted in COSTS_N_INSNS
2808 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2809 : (TUNE_CFV2 && TUNE_MAC) ? 4 \
2811 : TARGET_COLDFIRE ? 3 : 13)
2816 : TUNE_68000_10 ? 5 \
2817 : (TUNE_CFV2 && TUNE_EMAC) ? 3 \
2818 : (TUNE_CFV2 && TUNE_MAC) ? 2 \
2820 : TARGET_COLDFIRE ? 2 : 8)
2823 (TARGET_CF_HWDIV ? 11 \
2824 : TUNE_68000_10 || TARGET_COLDFIRE ? 12 : 27)
2827 /* An lea costs about three times as much as a simple add. */
2828 if (GET_MODE (x) == SImode
2829 && GET_CODE (XEXP (x, 1)) == REG
2830 && GET_CODE (XEXP (x, 0)) == MULT
2831 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
2832 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
2833 && (INTVAL (XEXP (XEXP (x, 0), 1)) == 2
2834 || INTVAL (XEXP (XEXP (x, 0), 1)) == 4
2835 || INTVAL (XEXP (XEXP (x, 0), 1)) == 8))
2837 /* lea an@(dx:l:i),am */
2838 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 2 : 3);
2848 *total = COSTS_N_INSNS(1);
2853 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
2855 if (INTVAL (XEXP (x, 1)) < 16)
2856 *total = COSTS_N_INSNS (2) + INTVAL (XEXP (x, 1)) / 2;
2858 /* We're using clrw + swap for these cases. */
2859 *total = COSTS_N_INSNS (4) + (INTVAL (XEXP (x, 1)) - 16) / 2;
2862 *total = COSTS_N_INSNS (10); /* Worst case. */
2865 /* A shift by a big integer takes an extra instruction. */
2866 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2867 && (INTVAL (XEXP (x, 1)) == 16))
2869 *total = COSTS_N_INSNS (2); /* clrw;swap */
2872 if (GET_CODE (XEXP (x, 1)) == CONST_INT
2873 && !(INTVAL (XEXP (x, 1)) > 0
2874 && INTVAL (XEXP (x, 1)) <= 8))
2876 *total = COSTS_N_INSNS (TARGET_COLDFIRE ? 1 : 3); /* lsr #i,dn */
2882 if ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
2883 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
2884 && GET_MODE (x) == SImode)
2885 *total = COSTS_N_INSNS (MULW_COST);
2886 else if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2887 *total = COSTS_N_INSNS (MULW_COST);
2889 *total = COSTS_N_INSNS (MULL_COST);
2896 if (GET_MODE (x) == QImode || GET_MODE (x) == HImode)
2897 *total = COSTS_N_INSNS (DIVW_COST); /* div.w */
2898 else if (TARGET_CF_HWDIV)
2899 *total = COSTS_N_INSNS (18);
2901 *total = COSTS_N_INSNS (43); /* div.l */
2905 if (outer_code == COMPARE)
2914 /* Return an instruction to move CONST_INT OPERANDS[1] into data register
2918 output_move_const_into_data_reg (rtx *operands)
2922 i = INTVAL (operands[1]);
2923 switch (m68k_const_method (i))
2926 return "mvzw %1,%0";
2928 return "mvsw %1,%0";
2930 return "moveq %1,%0";
2933 operands[1] = GEN_INT (i ^ 0xff);
2934 return "moveq %1,%0\n\tnot%.b %0";
2937 operands[1] = GEN_INT (i ^ 0xffff);
2938 return "moveq %1,%0\n\tnot%.w %0";
2941 return "moveq #-128,%0\n\tneg%.w %0";
2946 operands[1] = GEN_INT ((u << 16) | (u >> 16));
2947 return "moveq %1,%0\n\tswap %0";
2950 return "move%.l %1,%0";
2956 /* Return true if I can be handled by ISA B's mov3q instruction. */
2959 valid_mov3q_const (HOST_WIDE_INT i)
2961 return TARGET_ISAB && (i == -1 || IN_RANGE (i, 1, 7));
2964 /* Return an instruction to move CONST_INT OPERANDS[1] into OPERANDS[0].
2965 I is the value of OPERANDS[1]. */
2968 output_move_simode_const (rtx *operands)
2974 src = INTVAL (operands[1]);
2976 && (DATA_REG_P (dest) || MEM_P (dest))
2977 /* clr insns on 68000 read before writing. */
2978 && ((TARGET_68010 || TARGET_COLDFIRE)
2979 || !(MEM_P (dest) && MEM_VOLATILE_P (dest))))
2981 else if (GET_MODE (dest) == SImode && valid_mov3q_const (src))
2982 return "mov3q%.l %1,%0";
2983 else if (src == 0 && ADDRESS_REG_P (dest))
2984 return "sub%.l %0,%0";
2985 else if (DATA_REG_P (dest))
2986 return output_move_const_into_data_reg (operands);
2987 else if (ADDRESS_REG_P (dest) && IN_RANGE (src, -0x8000, 0x7fff))
2989 if (valid_mov3q_const (src))
2990 return "mov3q%.l %1,%0";
2991 return "move%.w %1,%0";
2993 else if (MEM_P (dest)
2994 && GET_CODE (XEXP (dest, 0)) == PRE_DEC
2995 && REGNO (XEXP (XEXP (dest, 0), 0)) == STACK_POINTER_REGNUM
2996 && IN_RANGE (src, -0x8000, 0x7fff))
2998 if (valid_mov3q_const (src))
2999 return "mov3q%.l %1,%-";
3002 return "move%.l %1,%0";
3006 output_move_simode (rtx *operands)
3008 if (GET_CODE (operands[1]) == CONST_INT)
3009 return output_move_simode_const (operands);
3010 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3011 || GET_CODE (operands[1]) == CONST)
3012 && push_operand (operands[0], SImode))
3014 else if ((GET_CODE (operands[1]) == SYMBOL_REF
3015 || GET_CODE (operands[1]) == CONST)
3016 && ADDRESS_REG_P (operands[0]))
3017 return "lea %a1,%0";
3018 return "move%.l %1,%0";
3022 output_move_himode (rtx *operands)
3024 if (GET_CODE (operands[1]) == CONST_INT)
3026 if (operands[1] == const0_rtx
3027 && (DATA_REG_P (operands[0])
3028 || GET_CODE (operands[0]) == MEM)
3029 /* clr insns on 68000 read before writing. */
3030 && ((TARGET_68010 || TARGET_COLDFIRE)
3031 || !(GET_CODE (operands[0]) == MEM
3032 && MEM_VOLATILE_P (operands[0]))))
3034 else if (operands[1] == const0_rtx
3035 && ADDRESS_REG_P (operands[0]))
3036 return "sub%.l %0,%0";
3037 else if (DATA_REG_P (operands[0])
3038 && INTVAL (operands[1]) < 128
3039 && INTVAL (operands[1]) >= -128)
3040 return "moveq %1,%0";
3041 else if (INTVAL (operands[1]) < 0x8000
3042 && INTVAL (operands[1]) >= -0x8000)
3043 return "move%.w %1,%0";
3045 else if (CONSTANT_P (operands[1]))
3046 return "move%.l %1,%0";
3047 return "move%.w %1,%0";
3051 output_move_qimode (rtx *operands)
3053 /* 68k family always modifies the stack pointer by at least 2, even for
3054 byte pushes. The 5200 (ColdFire) does not do this. */
3056 /* This case is generated by pushqi1 pattern now. */
3057 gcc_assert (!(GET_CODE (operands[0]) == MEM
3058 && GET_CODE (XEXP (operands[0], 0)) == PRE_DEC
3059 && XEXP (XEXP (operands[0], 0), 0) == stack_pointer_rtx
3060 && ! ADDRESS_REG_P (operands[1])
3061 && ! TARGET_COLDFIRE));
3063 /* clr and st insns on 68000 read before writing. */
3064 if (!ADDRESS_REG_P (operands[0])
3065 && ((TARGET_68010 || TARGET_COLDFIRE)
3066 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3068 if (operands[1] == const0_rtx)
3070 if ((!TARGET_COLDFIRE || DATA_REG_P (operands[0]))
3071 && GET_CODE (operands[1]) == CONST_INT
3072 && (INTVAL (operands[1]) & 255) == 255)
3078 if (GET_CODE (operands[1]) == CONST_INT
3079 && DATA_REG_P (operands[0])
3080 && INTVAL (operands[1]) < 128
3081 && INTVAL (operands[1]) >= -128)
3082 return "moveq %1,%0";
3083 if (operands[1] == const0_rtx && ADDRESS_REG_P (operands[0]))
3084 return "sub%.l %0,%0";
3085 if (GET_CODE (operands[1]) != CONST_INT && CONSTANT_P (operands[1]))
3086 return "move%.l %1,%0";
3087 /* 68k family (including the 5200 ColdFire) does not support byte moves to
3088 from address registers. */
3089 if (ADDRESS_REG_P (operands[0]) || ADDRESS_REG_P (operands[1]))
3090 return "move%.w %1,%0";
3091 return "move%.b %1,%0";
3095 output_move_stricthi (rtx *operands)
3097 if (operands[1] == const0_rtx
3098 /* clr insns on 68000 read before writing. */
3099 && ((TARGET_68010 || TARGET_COLDFIRE)
3100 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3102 return "move%.w %1,%0";
3106 output_move_strictqi (rtx *operands)
3108 if (operands[1] == const0_rtx
3109 /* clr insns on 68000 read before writing. */
3110 && ((TARGET_68010 || TARGET_COLDFIRE)
3111 || !(GET_CODE (operands[0]) == MEM && MEM_VOLATILE_P (operands[0]))))
3113 return "move%.b %1,%0";
3116 /* Return the best assembler insn template
3117 for moving operands[1] into operands[0] as a fullword. */
3120 singlemove_string (rtx *operands)
3122 if (GET_CODE (operands[1]) == CONST_INT)
3123 return output_move_simode_const (operands);
3124 return "move%.l %1,%0";
3128 /* Output assembler or rtl code to perform a doubleword move insn
3129 with operands OPERANDS.
3130 Pointers to 3 helper functions should be specified:
3131 HANDLE_REG_ADJUST to adjust a register by a small value,
3132 HANDLE_COMPADR to compute an address and
3133 HANDLE_MOVSI to move 4 bytes. */
3136 handle_move_double (rtx operands[2],
3137 void (*handle_reg_adjust) (rtx, int),
3138 void (*handle_compadr) (rtx [2]),
3139 void (*handle_movsi) (rtx [2]))
3143 REGOP, OFFSOP, MEMOP, PUSHOP, POPOP, CNSTOP, RNDOP
3148 rtx addreg0 = 0, addreg1 = 0;
3149 int dest_overlapped_low = 0;
3150 int size = GET_MODE_SIZE (GET_MODE (operands[0]));
3155 /* First classify both operands. */
3157 if (REG_P (operands[0]))
3159 else if (offsettable_memref_p (operands[0]))
3161 else if (GET_CODE (XEXP (operands[0], 0)) == POST_INC)
3163 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_DEC)
3165 else if (GET_CODE (operands[0]) == MEM)
3170 if (REG_P (operands[1]))
3172 else if (CONSTANT_P (operands[1]))
3174 else if (offsettable_memref_p (operands[1]))
3176 else if (GET_CODE (XEXP (operands[1], 0)) == POST_INC)
3178 else if (GET_CODE (XEXP (operands[1], 0)) == PRE_DEC)
3180 else if (GET_CODE (operands[1]) == MEM)
3185 /* Check for the cases that the operand constraints are not supposed
3186 to allow to happen. Generating code for these cases is
3188 gcc_assert (optype0 != RNDOP && optype1 != RNDOP);
3190 /* If one operand is decrementing and one is incrementing
3191 decrement the former register explicitly
3192 and change that operand into ordinary indexing. */
3194 if (optype0 == PUSHOP && optype1 == POPOP)
3196 operands[0] = XEXP (XEXP (operands[0], 0), 0);
3198 handle_reg_adjust (operands[0], -size);
3200 if (GET_MODE (operands[1]) == XFmode)
3201 operands[0] = gen_rtx_MEM (XFmode, operands[0]);
3202 else if (GET_MODE (operands[0]) == DFmode)
3203 operands[0] = gen_rtx_MEM (DFmode, operands[0]);
3205 operands[0] = gen_rtx_MEM (DImode, operands[0]);
3208 if (optype0 == POPOP && optype1 == PUSHOP)
3210 operands[1] = XEXP (XEXP (operands[1], 0), 0);
3212 handle_reg_adjust (operands[1], -size);
3214 if (GET_MODE (operands[1]) == XFmode)
3215 operands[1] = gen_rtx_MEM (XFmode, operands[1]);
3216 else if (GET_MODE (operands[1]) == DFmode)
3217 operands[1] = gen_rtx_MEM (DFmode, operands[1]);
3219 operands[1] = gen_rtx_MEM (DImode, operands[1]);
3223 /* If an operand is an unoffsettable memory ref, find a register
3224 we can increment temporarily to make it refer to the second word. */
3226 if (optype0 == MEMOP)
3227 addreg0 = find_addr_reg (XEXP (operands[0], 0));
3229 if (optype1 == MEMOP)
3230 addreg1 = find_addr_reg (XEXP (operands[1], 0));
3232 /* Ok, we can do one word at a time.
3233 Normally we do the low-numbered word first,
3234 but if either operand is autodecrementing then we
3235 do the high-numbered word first.
3237 In either case, set up in LATEHALF the operands to use
3238 for the high-numbered word and in some cases alter the
3239 operands in OPERANDS to be suitable for the low-numbered word. */
3243 if (optype0 == REGOP)
3245 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 2);
3246 middlehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3248 else if (optype0 == OFFSOP)
3250 middlehalf[0] = adjust_address (operands[0], SImode, 4);
3251 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3255 middlehalf[0] = adjust_address (operands[0], SImode, 0);
3256 latehalf[0] = adjust_address (operands[0], SImode, 0);
3259 if (optype1 == REGOP)
3261 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 2);
3262 middlehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3264 else if (optype1 == OFFSOP)
3266 middlehalf[1] = adjust_address (operands[1], SImode, 4);
3267 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3269 else if (optype1 == CNSTOP)
3271 if (GET_CODE (operands[1]) == CONST_DOUBLE)
3276 REAL_VALUE_FROM_CONST_DOUBLE (r, operands[1]);
3277 REAL_VALUE_TO_TARGET_LONG_DOUBLE (r, l);
3278 operands[1] = GEN_INT (l[0]);
3279 middlehalf[1] = GEN_INT (l[1]);
3280 latehalf[1] = GEN_INT (l[2]);
3284 /* No non-CONST_DOUBLE constant should ever appear
3286 gcc_assert (!CONSTANT_P (operands[1]));
3291 middlehalf[1] = adjust_address (operands[1], SImode, 0);
3292 latehalf[1] = adjust_address (operands[1], SImode, 0);
3296 /* size is not 12: */
3298 if (optype0 == REGOP)
3299 latehalf[0] = gen_rtx_REG (SImode, REGNO (operands[0]) + 1);
3300 else if (optype0 == OFFSOP)
3301 latehalf[0] = adjust_address (operands[0], SImode, size - 4);
3303 latehalf[0] = adjust_address (operands[0], SImode, 0);
3305 if (optype1 == REGOP)
3306 latehalf[1] = gen_rtx_REG (SImode, REGNO (operands[1]) + 1);
3307 else if (optype1 == OFFSOP)
3308 latehalf[1] = adjust_address (operands[1], SImode, size - 4);
3309 else if (optype1 == CNSTOP)
3310 split_double (operands[1], &operands[1], &latehalf[1]);
3312 latehalf[1] = adjust_address (operands[1], SImode, 0);
3315 /* If insn is effectively movd N(sp),-(sp) then we will do the
3316 high word first. We should use the adjusted operand 1 (which is N+4(sp))
3317 for the low word as well, to compensate for the first decrement of sp. */
3318 if (optype0 == PUSHOP
3319 && REGNO (XEXP (XEXP (operands[0], 0), 0)) == STACK_POINTER_REGNUM
3320 && reg_overlap_mentioned_p (stack_pointer_rtx, operands[1]))
3321 operands[1] = middlehalf[1] = latehalf[1];
3323 /* For (set (reg:DI N) (mem:DI ... (reg:SI N) ...)),
3324 if the upper part of reg N does not appear in the MEM, arrange to
3325 emit the move late-half first. Otherwise, compute the MEM address
3326 into the upper part of N and use that as a pointer to the memory
3328 if (optype0 == REGOP
3329 && (optype1 == OFFSOP || optype1 == MEMOP))
3331 rtx testlow = gen_rtx_REG (SImode, REGNO (operands[0]));
3333 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3334 && reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3336 /* If both halves of dest are used in the src memory address,
3337 compute the address into latehalf of dest.
3338 Note that this can't happen if the dest is two data regs. */
3340 xops[0] = latehalf[0];
3341 xops[1] = XEXP (operands[1], 0);
3343 handle_compadr (xops);
3344 if (GET_MODE (operands[1]) == XFmode)
3346 operands[1] = gen_rtx_MEM (XFmode, latehalf[0]);
3347 middlehalf[1] = adjust_address (operands[1], DImode, size - 8);
3348 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3352 operands[1] = gen_rtx_MEM (DImode, latehalf[0]);
3353 latehalf[1] = adjust_address (operands[1], DImode, size - 4);
3357 && reg_overlap_mentioned_p (middlehalf[0],
3358 XEXP (operands[1], 0)))
3360 /* Check for two regs used by both source and dest.
3361 Note that this can't happen if the dest is all data regs.
3362 It can happen if the dest is d6, d7, a0.
3363 But in that case, latehalf is an addr reg, so
3364 the code at compadr does ok. */
3366 if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0))
3367 || reg_overlap_mentioned_p (latehalf[0], XEXP (operands[1], 0)))
3370 /* JRV says this can't happen: */
3371 gcc_assert (!addreg0 && !addreg1);
3373 /* Only the middle reg conflicts; simply put it last. */
3374 handle_movsi (operands);
3375 handle_movsi (latehalf);
3376 handle_movsi (middlehalf);
3380 else if (reg_overlap_mentioned_p (testlow, XEXP (operands[1], 0)))
3381 /* If the low half of dest is mentioned in the source memory
3382 address, the arrange to emit the move late half first. */
3383 dest_overlapped_low = 1;
3386 /* If one or both operands autodecrementing,
3387 do the two words, high-numbered first. */
3389 /* Likewise, the first move would clobber the source of the second one,
3390 do them in the other order. This happens only for registers;
3391 such overlap can't happen in memory unless the user explicitly
3392 sets it up, and that is an undefined circumstance. */
3394 if (optype0 == PUSHOP || optype1 == PUSHOP
3395 || (optype0 == REGOP && optype1 == REGOP
3396 && ((middlehalf[1] && REGNO (operands[0]) == REGNO (middlehalf[1]))
3397 || REGNO (operands[0]) == REGNO (latehalf[1])))
3398 || dest_overlapped_low)
3400 /* Make any unoffsettable addresses point at high-numbered word. */
3402 handle_reg_adjust (addreg0, size - 4);
3404 handle_reg_adjust (addreg1, size - 4);
3407 handle_movsi (latehalf);
3409 /* Undo the adds we just did. */
3411 handle_reg_adjust (addreg0, -4);
3413 handle_reg_adjust (addreg1, -4);
3417 handle_movsi (middlehalf);
3420 handle_reg_adjust (addreg0, -4);
3422 handle_reg_adjust (addreg1, -4);
3425 /* Do low-numbered word. */
3427 handle_movsi (operands);
3431 /* Normal case: do the two words, low-numbered first. */
3433 handle_movsi (operands);
3435 /* Do the middle one of the three words for long double */
3439 handle_reg_adjust (addreg0, 4);
3441 handle_reg_adjust (addreg1, 4);
3443 handle_movsi (middlehalf);
3446 /* Make any unoffsettable addresses point at high-numbered word. */
3448 handle_reg_adjust (addreg0, 4);
3450 handle_reg_adjust (addreg1, 4);
3453 handle_movsi (latehalf);
3455 /* Undo the adds we just did. */
3457 handle_reg_adjust (addreg0, -(size - 4));
3459 handle_reg_adjust (addreg1, -(size - 4));
3464 /* Output assembler code to adjust REG by N. */
3466 output_reg_adjust (rtx reg, int n)
3470 gcc_assert (GET_MODE (reg) == SImode
3471 && -12 <= n && n != 0 && n <= 12);
3476 s = "add%.l #12,%0";
3480 s = "addq%.l #8,%0";
3484 s = "addq%.l #4,%0";
3488 s = "sub%.l #12,%0";
3492 s = "subq%.l #8,%0";
3496 s = "subq%.l #4,%0";
3504 output_asm_insn (s, ®);