1 /* Subroutines for insn-output.c for Matsushita MN10300 series
2 Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
3 2005, 2006, 2007, 2008, 2009, 2010 Free Software Foundation, Inc.
4 Contributed by Jeff Law (law@cygnus.com).
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
29 #include "hard-reg-set.h"
30 #include "insn-config.h"
31 #include "conditions.h"
33 #include "insn-attr.h"
41 #include "diagnostic-core.h"
44 #include "target-def.h"
47 /* This is used in the am33_2.0-linux-gnu port, in which global symbol
48 names are not prefixed by underscores, to tell whether to prefix a
49 label with a plus sign or not, so that the assembler can tell
50 symbol names from register names. */
51 int mn10300_protect_label;
53 /* The selected processor. */
54 enum processor_type mn10300_processor = PROCESSOR_DEFAULT;
56 /* Processor type to select for tuning. */
57 static const char * mn10300_tune_string = NULL;
59 /* Selected processor type for tuning. */
60 enum processor_type mn10300_tune_cpu = PROCESSOR_DEFAULT;
62 /* The size of the callee register save area. Right now we save everything
63 on entry since it costs us nothing in code size. It does cost us from a
64 speed standpoint, so we want to optimize this sooner or later. */
65 #define REG_SAVE_BYTES (4 * df_regs_ever_live_p (2) \
66 + 4 * df_regs_ever_live_p (3) \
67 + 4 * df_regs_ever_live_p (6) \
68 + 4 * df_regs_ever_live_p (7) \
69 + 16 * (df_regs_ever_live_p (14) \
70 || df_regs_ever_live_p (15) \
71 || df_regs_ever_live_p (16) \
72 || df_regs_ever_live_p (17)))
74 /* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
75 static const struct default_options mn10300_option_optimization_table[] =
77 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
78 { OPT_LEVELS_NONE, 0, NULL, 0 }
86 static int cc_flags_for_mode(enum machine_mode);
87 static int cc_flags_for_code(enum rtx_code);
89 /* Implement TARGET_HANDLE_OPTION. */
92 mn10300_handle_option (size_t code,
93 const char *arg ATTRIBUTE_UNUSED,
99 mn10300_processor = value ? PROCESSOR_AM33 : PROCESSOR_MN10300;
103 mn10300_processor = (value
105 : MIN (PROCESSOR_AM33, PROCESSOR_DEFAULT));
109 mn10300_processor = (value ? PROCESSOR_AM34 : PROCESSOR_DEFAULT);
113 mn10300_tune_string = arg;
121 /* Implement TARGET_OPTION_OVERRIDE. */
124 mn10300_option_override (void)
127 target_flags &= ~MASK_MULT_BUG;
130 /* Disable scheduling for the MN10300 as we do
131 not have timing information available for it. */
132 flag_schedule_insns = 0;
133 flag_schedule_insns_after_reload = 0;
135 /* Force enable splitting of wide types, as otherwise it is trivial
136 to run out of registers. Indeed, this works so well that register
137 allocation problems are now more common *without* optimization,
138 when this flag is not enabled by default. */
139 flag_split_wide_types = 1;
142 if (mn10300_tune_string)
144 if (strcasecmp (mn10300_tune_string, "mn10300") == 0)
145 mn10300_tune_cpu = PROCESSOR_MN10300;
146 else if (strcasecmp (mn10300_tune_string, "am33") == 0)
147 mn10300_tune_cpu = PROCESSOR_AM33;
148 else if (strcasecmp (mn10300_tune_string, "am33-2") == 0)
149 mn10300_tune_cpu = PROCESSOR_AM33_2;
150 else if (strcasecmp (mn10300_tune_string, "am34") == 0)
151 mn10300_tune_cpu = PROCESSOR_AM34;
153 error ("-mtune= expects mn10300, am33, am33-2, or am34");
158 mn10300_file_start (void)
160 default_file_start ();
163 fprintf (asm_out_file, "\t.am33_2\n");
164 else if (TARGET_AM33)
165 fprintf (asm_out_file, "\t.am33\n");
168 /* Print operand X using operand code CODE to assembly language output file
172 mn10300_print_operand (FILE *file, rtx x, int code)
179 enum rtx_code cmp = GET_CODE (x);
180 enum machine_mode mode = GET_MODE (XEXP (x, 0));
185 cmp = reverse_condition (cmp);
186 have_flags = cc_flags_for_mode (mode);
197 /* bge is smaller than bnc. */
198 str = (have_flags & CC_FLAG_V ? "ge" : "nc");
201 str = (have_flags & CC_FLAG_V ? "lt" : "ns");
249 gcc_checking_assert ((cc_flags_for_code (cmp) & ~have_flags) == 0);
255 /* This is used for the operand to a call instruction;
256 if it's a REG, enclose it in parens, else output
257 the operand normally. */
261 mn10300_print_operand (file, x, 0);
265 mn10300_print_operand (file, x, 0);
269 switch (GET_CODE (x))
273 output_address (XEXP (x, 0));
278 fprintf (file, "fd%d", REGNO (x) - 18);
286 /* These are the least significant word in a 64bit value. */
288 switch (GET_CODE (x))
292 output_address (XEXP (x, 0));
297 fprintf (file, "%s", reg_names[REGNO (x)]);
301 fprintf (file, "%s", reg_names[subreg_regno (x)]);
309 switch (GET_MODE (x))
312 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
313 REAL_VALUE_TO_TARGET_DOUBLE (rv, val);
314 fprintf (file, "0x%lx", val[0]);
317 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
318 REAL_VALUE_TO_TARGET_SINGLE (rv, val[0]);
319 fprintf (file, "0x%lx", val[0]);
323 mn10300_print_operand_address (file,
324 GEN_INT (CONST_DOUBLE_LOW (x)));
335 split_double (x, &low, &high);
336 fprintf (file, "%ld", (long)INTVAL (low));
345 /* Similarly, but for the most significant word. */
347 switch (GET_CODE (x))
351 x = adjust_address (x, SImode, 4);
352 output_address (XEXP (x, 0));
357 fprintf (file, "%s", reg_names[REGNO (x) + 1]);
361 fprintf (file, "%s", reg_names[subreg_regno (x) + 1]);
369 switch (GET_MODE (x))
372 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
373 REAL_VALUE_TO_TARGET_DOUBLE (rv, val);
374 fprintf (file, "0x%lx", val[1]);
380 mn10300_print_operand_address (file,
381 GEN_INT (CONST_DOUBLE_HIGH (x)));
392 split_double (x, &low, &high);
393 fprintf (file, "%ld", (long)INTVAL (high));
404 if (REG_P (XEXP (x, 0)))
405 output_address (gen_rtx_PLUS (SImode, XEXP (x, 0), const0_rtx));
407 output_address (XEXP (x, 0));
412 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
413 fprintf (file, "%d", (int)((~INTVAL (x)) & 0xff));
417 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
418 fprintf (file, "%d", (int)(INTVAL (x) & 0xff));
421 /* For shift counts. The hardware ignores the upper bits of
422 any immediate, but the assembler will flag an out of range
423 shift count as an error. So we mask off the high bits
424 of the immediate here. */
428 fprintf (file, "%d", (int)(INTVAL (x) & 0x1f));
434 switch (GET_CODE (x))
438 output_address (XEXP (x, 0));
447 fprintf (file, "%s", reg_names[REGNO (x)]);
451 fprintf (file, "%s", reg_names[subreg_regno (x)]);
454 /* This will only be single precision.... */
460 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
461 REAL_VALUE_TO_TARGET_SINGLE (rv, val);
462 fprintf (file, "0x%lx", val);
472 mn10300_print_operand_address (file, x);
481 /* Output assembly language output for the address ADDR to FILE. */
484 mn10300_print_operand_address (FILE *file, rtx addr)
486 switch (GET_CODE (addr))
489 mn10300_print_operand (file, XEXP (addr, 0), 0);
494 mn10300_print_operand (file, XEXP (addr, 0), 0);
497 mn10300_print_operand (file, XEXP (addr, 1), 0);
501 mn10300_print_operand (file, addr, 0);
505 rtx base = XEXP (addr, 0);
506 rtx index = XEXP (addr, 1);
508 if (REG_P (index) && !REG_OK_FOR_INDEX_P (index))
514 gcc_assert (REG_P (index) && REG_OK_FOR_INDEX_P (index));
516 gcc_assert (REG_OK_FOR_BASE_P (base));
518 mn10300_print_operand (file, index, 0);
520 mn10300_print_operand (file, base, 0);
524 output_addr_const (file, addr);
527 output_addr_const (file, addr);
532 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
534 Used for PIC-specific UNSPECs. */
537 mn10300_asm_output_addr_const_extra (FILE *file, rtx x)
539 if (GET_CODE (x) == UNSPEC)
544 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
545 output_addr_const (file, XVECEXP (x, 0, 0));
548 output_addr_const (file, XVECEXP (x, 0, 0));
549 fputs ("@GOT", file);
552 output_addr_const (file, XVECEXP (x, 0, 0));
553 fputs ("@GOTOFF", file);
556 output_addr_const (file, XVECEXP (x, 0, 0));
557 fputs ("@PLT", file);
559 case UNSPEC_GOTSYM_OFF:
560 assemble_name (file, GOT_SYMBOL_NAME);
562 output_addr_const (file, XVECEXP (x, 0, 0));
574 /* Count the number of FP registers that have to be saved. */
576 fp_regs_to_save (void)
583 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
584 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
590 /* Print a set of registers in the format required by "movm" and "ret".
591 Register K is saved if bit K of MASK is set. The data and address
592 registers can be stored individually, but the extended registers cannot.
593 We assume that the mask already takes that into account. For instance,
594 bits 14 to 17 must have the same value. */
597 mn10300_print_reg_list (FILE *file, int mask)
605 for (i = 0; i < FIRST_EXTENDED_REGNUM; i++)
606 if ((mask & (1 << i)) != 0)
610 fputs (reg_names [i], file);
614 if ((mask & 0x3c000) != 0)
616 gcc_assert ((mask & 0x3c000) == 0x3c000);
619 fputs ("exreg1", file);
626 /* If the MDR register is never clobbered, we can use the RETF instruction
627 which takes the address from the MDR register. This is 3 cycles faster
628 than having to load the address from the stack. */
631 mn10300_can_use_retf_insn (void)
633 /* Don't bother if we're not optimizing. In this case we won't
634 have proper access to df_regs_ever_live_p. */
638 /* EH returns alter the saved return address; MDR is not current. */
639 if (crtl->calls_eh_return)
642 /* Obviously not if MDR is ever clobbered. */
643 if (df_regs_ever_live_p (MDR_REG))
646 /* ??? Careful not to use this during expand_epilogue etc. */
647 gcc_assert (!in_sequence_p ());
648 return leaf_function_p ();
652 mn10300_can_use_rets_insn (void)
654 return !mn10300_initial_offset (ARG_POINTER_REGNUM, STACK_POINTER_REGNUM);
657 /* Returns the set of live, callee-saved registers as a bitmask. The
658 callee-saved extended registers cannot be stored individually, so
659 all of them will be included in the mask if any one of them is used. */
662 mn10300_get_live_callee_saved_regs (void)
668 for (i = 0; i <= LAST_EXTENDED_REGNUM; i++)
669 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
671 if ((mask & 0x3c000) != 0)
680 RTX_FRAME_RELATED_P (r) = 1;
684 /* Generate an instruction that pushes several registers onto the stack.
685 Register K will be saved if bit K in MASK is set. The function does
686 nothing if MASK is zero.
688 To be compatible with the "movm" instruction, the lowest-numbered
689 register must be stored in the lowest slot. If MASK is the set
690 { R1,...,RN }, where R1...RN are ordered least first, the generated
691 instruction will have the form:
694 (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
695 (set (mem:SI (plus:SI (reg:SI 9)
699 (set (mem:SI (plus:SI (reg:SI 9)
704 mn10300_gen_multiple_store (unsigned int mask)
706 /* The order in which registers are stored, from SP-4 through SP-N*4. */
707 static const unsigned int store_order[8] = {
708 /* e2, e3: never saved */
709 FIRST_EXTENDED_REGNUM + 4,
710 FIRST_EXTENDED_REGNUM + 5,
711 FIRST_EXTENDED_REGNUM + 6,
712 FIRST_EXTENDED_REGNUM + 7,
713 /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
714 FIRST_DATA_REGNUM + 2,
715 FIRST_DATA_REGNUM + 3,
716 FIRST_ADDRESS_REGNUM + 2,
717 FIRST_ADDRESS_REGNUM + 3,
718 /* d0, d1, a0, a1, mdr, lir, lar: never saved. */
728 for (i = count = 0; i < ARRAY_SIZE(store_order); ++i)
730 unsigned regno = store_order[i];
732 if (((mask >> regno) & 1) == 0)
736 x = plus_constant (stack_pointer_rtx, count * -4);
737 x = gen_frame_mem (SImode, x);
738 x = gen_rtx_SET (VOIDmode, x, gen_rtx_REG (SImode, regno));
741 /* Remove the register from the mask so that... */
742 mask &= ~(1u << regno);
745 /* ... we can make sure that we didn't try to use a register
746 not listed in the store order. */
747 gcc_assert (mask == 0);
749 /* Create the instruction that updates the stack pointer. */
750 x = plus_constant (stack_pointer_rtx, count * -4);
751 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
754 /* We need one PARALLEL element to update the stack pointer and
755 an additional element for each register that is stored. */
756 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (count + 1, elts));
761 mn10300_expand_prologue (void)
763 HOST_WIDE_INT size = mn10300_frame_size ();
765 /* If we use any of the callee-saved registers, save them now. */
766 mn10300_gen_multiple_store (mn10300_get_live_callee_saved_regs ());
768 if (TARGET_AM33_2 && fp_regs_to_save ())
770 int num_regs_to_save = fp_regs_to_save (), i;
776 save_sp_partial_merge,
780 unsigned int strategy_size = (unsigned)-1, this_strategy_size;
783 /* We have several different strategies to save FP registers.
784 We can store them using SP offsets, which is beneficial if
785 there are just a few registers to save, or we can use `a0' in
786 post-increment mode (`a0' is the only call-clobbered address
787 register that is never used to pass information to a
788 function). Furthermore, if we don't need a frame pointer, we
789 can merge the two SP adds into a single one, but this isn't
790 always beneficial; sometimes we can just split the two adds
791 so that we don't exceed a 16-bit constant size. The code
792 below will select which strategy to use, so as to generate
793 smallest code. Ties are broken in favor or shorter sequences
794 (in terms of number of instructions). */
796 #define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
797 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
798 #define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
799 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
801 /* We add 0 * (S) in two places to promote to the type of S,
802 so that all arms of the conditional have the same type. */
803 #define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
804 (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
805 : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
806 + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
808 #define SIZE_FMOV_SP_(S,N) \
809 (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
810 SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
811 (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
812 #define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
814 /* Consider alternative save_sp_merge only if we don't need the
815 frame pointer and size is nonzero. */
816 if (! frame_pointer_needed && size)
818 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
819 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
820 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
821 this_strategy_size += SIZE_FMOV_SP (size, num_regs_to_save);
823 if (this_strategy_size < strategy_size)
825 strategy = save_sp_merge;
826 strategy_size = this_strategy_size;
830 /* Consider alternative save_sp_no_merge unconditionally. */
831 /* Insn: add -4 * num_regs_to_save, sp. */
832 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
833 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
834 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
837 /* Insn: add -size, sp. */
838 this_strategy_size += SIZE_ADD_SP (-size);
841 if (this_strategy_size < strategy_size)
843 strategy = save_sp_no_merge;
844 strategy_size = this_strategy_size;
847 /* Consider alternative save_sp_partial_merge only if we don't
848 need a frame pointer and size is reasonably large. */
849 if (! frame_pointer_needed && size + 4 * num_regs_to_save > 128)
851 /* Insn: add -128, sp. */
852 this_strategy_size = SIZE_ADD_SP (-128);
853 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
854 this_strategy_size += SIZE_FMOV_SP (128 - 4 * num_regs_to_save,
858 /* Insn: add 128-size, sp. */
859 this_strategy_size += SIZE_ADD_SP (128 - size);
862 if (this_strategy_size < strategy_size)
864 strategy = save_sp_partial_merge;
865 strategy_size = this_strategy_size;
869 /* Consider alternative save_a0_merge only if we don't need a
870 frame pointer, size is nonzero and the user hasn't
871 changed the calling conventions of a0. */
872 if (! frame_pointer_needed && size
873 && call_really_used_regs [FIRST_ADDRESS_REGNUM]
874 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
876 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
877 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
878 /* Insn: mov sp, a0. */
879 this_strategy_size++;
882 /* Insn: add size, a0. */
883 this_strategy_size += SIZE_ADD_AX (size);
885 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
886 this_strategy_size += 3 * num_regs_to_save;
888 if (this_strategy_size < strategy_size)
890 strategy = save_a0_merge;
891 strategy_size = this_strategy_size;
895 /* Consider alternative save_a0_no_merge if the user hasn't
896 changed the calling conventions of a0. */
897 if (call_really_used_regs [FIRST_ADDRESS_REGNUM]
898 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
900 /* Insn: add -4 * num_regs_to_save, sp. */
901 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
902 /* Insn: mov sp, a0. */
903 this_strategy_size++;
904 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
905 this_strategy_size += 3 * num_regs_to_save;
908 /* Insn: add -size, sp. */
909 this_strategy_size += SIZE_ADD_SP (-size);
912 if (this_strategy_size < strategy_size)
914 strategy = save_a0_no_merge;
915 strategy_size = this_strategy_size;
919 /* Emit the initial SP add, common to all strategies. */
922 case save_sp_no_merge:
923 case save_a0_no_merge:
924 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
926 GEN_INT (-4 * num_regs_to_save))));
930 case save_sp_partial_merge:
931 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
934 xsize = 128 - 4 * num_regs_to_save;
940 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
942 GEN_INT (-(size + 4 * num_regs_to_save)))));
943 /* We'll have to adjust FP register saves according to the
946 /* Since we've already created the stack frame, don't do it
947 again at the end of the function. */
955 /* Now prepare register a0, if we have decided to use it. */
959 case save_sp_no_merge:
960 case save_sp_partial_merge:
965 case save_a0_no_merge:
966 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM);
967 F (emit_insn (gen_movsi (reg, stack_pointer_rtx)));
969 F (emit_insn (gen_addsi3 (reg, reg, GEN_INT (xsize))));
970 reg = gen_rtx_POST_INC (SImode, reg);
977 /* Now actually save the FP registers. */
978 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
979 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
987 /* If we aren't using `a0', use an SP offset. */
990 addr = gen_rtx_PLUS (SImode,
995 addr = stack_pointer_rtx;
1000 F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode, addr),
1001 gen_rtx_REG (SFmode, i))));
1005 /* Now put the frame pointer into the frame pointer register. */
1006 if (frame_pointer_needed)
1007 F (emit_move_insn (frame_pointer_rtx, stack_pointer_rtx));
1009 /* Allocate stack for this frame. */
1011 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
1015 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
1016 emit_insn (gen_load_pic ());
1020 mn10300_expand_epilogue (void)
1022 HOST_WIDE_INT size = mn10300_frame_size ();
1023 int reg_save_bytes = REG_SAVE_BYTES;
1025 if (TARGET_AM33_2 && fp_regs_to_save ())
1027 int num_regs_to_save = fp_regs_to_save (), i;
1030 /* We have several options to restore FP registers. We could
1031 load them from SP offsets, but, if there are enough FP
1032 registers to restore, we win if we use a post-increment
1035 /* If we have a frame pointer, it's the best option, because we
1036 already know it has the value we want. */
1037 if (frame_pointer_needed)
1038 reg = gen_rtx_REG (SImode, FRAME_POINTER_REGNUM);
1039 /* Otherwise, we may use `a1', since it's call-clobbered and
1040 it's never used for return values. But only do so if it's
1041 smaller than using SP offsets. */
1044 enum { restore_sp_post_adjust,
1045 restore_sp_pre_adjust,
1046 restore_sp_partial_adjust,
1047 restore_a1 } strategy;
1048 unsigned int this_strategy_size, strategy_size = (unsigned)-1;
1050 /* Consider using sp offsets before adjusting sp. */
1051 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1052 this_strategy_size = SIZE_FMOV_SP (size, num_regs_to_save);
1053 /* If size is too large, we'll have to adjust SP with an
1055 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1057 /* Insn: add size + 4 * num_regs_to_save, sp. */
1058 this_strategy_size += SIZE_ADD_SP (size + 4 * num_regs_to_save);
1060 /* If we don't have to restore any non-FP registers,
1061 we'll be able to save one byte by using rets. */
1062 if (! reg_save_bytes)
1063 this_strategy_size--;
1065 if (this_strategy_size < strategy_size)
1067 strategy = restore_sp_post_adjust;
1068 strategy_size = this_strategy_size;
1071 /* Consider using sp offsets after adjusting sp. */
1072 /* Insn: add size, sp. */
1073 this_strategy_size = SIZE_ADD_SP (size);
1074 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1075 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
1076 /* We're going to use ret to release the FP registers
1077 save area, so, no savings. */
1079 if (this_strategy_size < strategy_size)
1081 strategy = restore_sp_pre_adjust;
1082 strategy_size = this_strategy_size;
1085 /* Consider using sp offsets after partially adjusting sp.
1086 When size is close to 32Kb, we may be able to adjust SP
1087 with an imm16 add instruction while still using fmov
1089 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1091 /* Insn: add size + 4 * num_regs_to_save
1092 + reg_save_bytes - 252,sp. */
1093 this_strategy_size = SIZE_ADD_SP (size + 4 * num_regs_to_save
1094 + reg_save_bytes - 252);
1095 /* Insn: fmov (##,sp),fs#, fo each fs# to be restored. */
1096 this_strategy_size += SIZE_FMOV_SP (252 - reg_save_bytes
1097 - 4 * num_regs_to_save,
1099 /* We're going to use ret to release the FP registers
1100 save area, so, no savings. */
1102 if (this_strategy_size < strategy_size)
1104 strategy = restore_sp_partial_adjust;
1105 strategy_size = this_strategy_size;
1109 /* Consider using a1 in post-increment mode, as long as the
1110 user hasn't changed the calling conventions of a1. */
1111 if (call_really_used_regs [FIRST_ADDRESS_REGNUM + 1]
1112 && ! fixed_regs[FIRST_ADDRESS_REGNUM+1])
1114 /* Insn: mov sp,a1. */
1115 this_strategy_size = 1;
1118 /* Insn: add size,a1. */
1119 this_strategy_size += SIZE_ADD_AX (size);
1121 /* Insn: fmov (a1+),fs#, for each fs# to be restored. */
1122 this_strategy_size += 3 * num_regs_to_save;
1123 /* If size is large enough, we may be able to save a
1125 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1127 /* Insn: mov a1,sp. */
1128 this_strategy_size += 2;
1130 /* If we don't have to restore any non-FP registers,
1131 we'll be able to save one byte by using rets. */
1132 if (! reg_save_bytes)
1133 this_strategy_size--;
1135 if (this_strategy_size < strategy_size)
1137 strategy = restore_a1;
1138 strategy_size = this_strategy_size;
1144 case restore_sp_post_adjust:
1147 case restore_sp_pre_adjust:
1148 emit_insn (gen_addsi3 (stack_pointer_rtx,
1154 case restore_sp_partial_adjust:
1155 emit_insn (gen_addsi3 (stack_pointer_rtx,
1157 GEN_INT (size + 4 * num_regs_to_save
1158 + reg_save_bytes - 252)));
1159 size = 252 - reg_save_bytes - 4 * num_regs_to_save;
1163 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM + 1);
1164 emit_insn (gen_movsi (reg, stack_pointer_rtx));
1166 emit_insn (gen_addsi3 (reg, reg, GEN_INT (size)));
1174 /* Adjust the selected register, if any, for post-increment. */
1176 reg = gen_rtx_POST_INC (SImode, reg);
1178 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
1179 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
1187 /* If we aren't using a post-increment register, use an
1189 addr = gen_rtx_PLUS (SImode,
1194 addr = stack_pointer_rtx;
1198 emit_insn (gen_movsf (gen_rtx_REG (SFmode, i),
1199 gen_rtx_MEM (SFmode, addr)));
1202 /* If we were using the restore_a1 strategy and the number of
1203 bytes to be released won't fit in the `ret' byte, copy `a1'
1204 to `sp', to avoid having to use `add' to adjust it. */
1205 if (! frame_pointer_needed && reg && size + reg_save_bytes > 255)
1207 emit_move_insn (stack_pointer_rtx, XEXP (reg, 0));
1212 /* Maybe cut back the stack, except for the register save area.
1214 If the frame pointer exists, then use the frame pointer to
1217 If the stack size + register save area is more than 255 bytes,
1218 then the stack must be cut back here since the size + register
1219 save size is too big for a ret/retf instruction.
1221 Else leave it alone, it will be cut back as part of the
1222 ret/retf instruction, or there wasn't any stack to begin with.
1224 Under no circumstances should the register save area be
1225 deallocated here, that would leave a window where an interrupt
1226 could occur and trash the register save area. */
1227 if (frame_pointer_needed)
1229 emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
1232 else if (size + reg_save_bytes > 255)
1234 emit_insn (gen_addsi3 (stack_pointer_rtx,
1240 /* Adjust the stack and restore callee-saved registers, if any. */
1241 if (mn10300_can_use_rets_insn ())
1242 emit_jump_insn (gen_rtx_RETURN (VOIDmode));
1244 emit_jump_insn (gen_return_ret (GEN_INT (size + REG_SAVE_BYTES)));
1247 /* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
1248 This function is for MATCH_PARALLEL and so assumes OP is known to be
1249 parallel. If OP is a multiple store, return a mask indicating which
1250 registers it saves. Return 0 otherwise. */
1253 mn10300_store_multiple_operation (rtx op,
1254 enum machine_mode mode ATTRIBUTE_UNUSED)
1262 count = XVECLEN (op, 0);
1266 /* Check that first instruction has the form (set (sp) (plus A B)) */
1267 elt = XVECEXP (op, 0, 0);
1268 if (GET_CODE (elt) != SET
1269 || (! REG_P (SET_DEST (elt)))
1270 || REGNO (SET_DEST (elt)) != STACK_POINTER_REGNUM
1271 || GET_CODE (SET_SRC (elt)) != PLUS)
1274 /* Check that A is the stack pointer and B is the expected stack size.
1275 For OP to match, each subsequent instruction should push a word onto
1276 the stack. We therefore expect the first instruction to create
1277 COUNT-1 stack slots. */
1278 elt = SET_SRC (elt);
1279 if ((! REG_P (XEXP (elt, 0)))
1280 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
1281 || (! CONST_INT_P (XEXP (elt, 1)))
1282 || INTVAL (XEXP (elt, 1)) != -(count - 1) * 4)
1286 for (i = 1; i < count; i++)
1288 /* Check that element i is a (set (mem M) R). */
1289 /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1290 Remember: the ordering is *not* monotonic. */
1291 elt = XVECEXP (op, 0, i);
1292 if (GET_CODE (elt) != SET
1293 || (! MEM_P (SET_DEST (elt)))
1294 || (! REG_P (SET_SRC (elt))))
1297 /* Remember which registers are to be saved. */
1298 last = REGNO (SET_SRC (elt));
1299 mask |= (1 << last);
1301 /* Check that M has the form (plus (sp) (const_int -I*4)) */
1302 elt = XEXP (SET_DEST (elt), 0);
1303 if (GET_CODE (elt) != PLUS
1304 || (! REG_P (XEXP (elt, 0)))
1305 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
1306 || (! CONST_INT_P (XEXP (elt, 1)))
1307 || INTVAL (XEXP (elt, 1)) != -i * 4)
1311 /* All or none of the callee-saved extended registers must be in the set. */
1312 if ((mask & 0x3c000) != 0
1313 && (mask & 0x3c000) != 0x3c000)
1319 /* Implement TARGET_PREFERRED_RELOAD_CLASS. */
1322 mn10300_preferred_reload_class (rtx x, reg_class_t rclass)
1324 if (x == stack_pointer_rtx && rclass != SP_REGS)
1325 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
1328 && !HARD_REGISTER_P (x))
1329 || (GET_CODE (x) == SUBREG
1330 && REG_P (SUBREG_REG (x))
1331 && !HARD_REGISTER_P (SUBREG_REG (x))))
1332 return LIMIT_RELOAD_CLASS (GET_MODE (x), rclass);
1337 /* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
1340 mn10300_preferred_output_reload_class (rtx x, reg_class_t rclass)
1342 if (x == stack_pointer_rtx && rclass != SP_REGS)
1343 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
1347 /* Implement TARGET_SECONDARY_RELOAD. */
1350 mn10300_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1351 enum machine_mode mode, secondary_reload_info *sri)
1353 enum reg_class rclass = (enum reg_class) rclass_i;
1354 enum reg_class xclass = NO_REGS;
1355 unsigned int xregno = INVALID_REGNUM;
1360 if (xregno >= FIRST_PSEUDO_REGISTER)
1361 xregno = true_regnum (x);
1362 if (xregno != INVALID_REGNUM)
1363 xclass = REGNO_REG_CLASS (xregno);
1368 /* Memory load/stores less than a full word wide can't have an
1369 address or stack pointer destination. They must use a data
1370 register as an intermediate register. */
1371 if (rclass != DATA_REGS
1372 && (mode == QImode || mode == HImode)
1373 && xclass == NO_REGS)
1376 /* We can only move SP to/from an address register. */
1378 && rclass == SP_REGS
1379 && xclass != ADDRESS_REGS)
1380 return ADDRESS_REGS;
1382 && xclass == SP_REGS
1383 && rclass != ADDRESS_REGS
1384 && rclass != SP_OR_ADDRESS_REGS)
1385 return ADDRESS_REGS;
1388 /* We can't directly load sp + const_int into a register;
1389 we must use an address register as an scratch. */
1391 && rclass != SP_REGS
1392 && rclass != SP_OR_ADDRESS_REGS
1393 && rclass != SP_OR_GENERAL_REGS
1394 && GET_CODE (x) == PLUS
1395 && (XEXP (x, 0) == stack_pointer_rtx
1396 || XEXP (x, 1) == stack_pointer_rtx))
1398 sri->icode = CODE_FOR_reload_plus_sp_const;
1402 /* We can only move MDR to/from a data register. */
1403 if (rclass == MDR_REGS && xclass != DATA_REGS)
1405 if (xclass == MDR_REGS && rclass != DATA_REGS)
1408 /* We can't load/store an FP register from a constant address. */
1410 && (rclass == FP_REGS || xclass == FP_REGS)
1411 && (xclass == NO_REGS || rclass == NO_REGS))
1415 if (xregno >= FIRST_PSEUDO_REGISTER && xregno != INVALID_REGNUM)
1417 addr = reg_equiv_mem [xregno];
1419 addr = XEXP (addr, 0);
1424 if (addr && CONSTANT_ADDRESS_P (addr))
1425 return GENERAL_REGS;
1428 /* Otherwise assume no secondary reloads are needed. */
1433 mn10300_frame_size (void)
1435 /* size includes the fixed stack space needed for function calls. */
1436 int size = get_frame_size () + crtl->outgoing_args_size;
1438 /* And space for the return pointer. */
1439 size += crtl->outgoing_args_size ? 4 : 0;
1445 mn10300_initial_offset (int from, int to)
1449 gcc_assert (from == ARG_POINTER_REGNUM || from == FRAME_POINTER_REGNUM);
1450 gcc_assert (to == FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
1452 if (to == STACK_POINTER_REGNUM)
1453 diff = mn10300_frame_size ();
1455 /* The difference between the argument pointer and the frame pointer
1456 is the size of the callee register save area. */
1457 if (from == ARG_POINTER_REGNUM)
1459 diff += REG_SAVE_BYTES;
1460 diff += 4 * fp_regs_to_save ();
1466 /* Worker function for TARGET_RETURN_IN_MEMORY. */
1469 mn10300_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
1471 /* Return values > 8 bytes in length in memory. */
1472 return (int_size_in_bytes (type) > 8
1473 || int_size_in_bytes (type) == 0
1474 || TYPE_MODE (type) == BLKmode);
1477 /* Flush the argument registers to the stack for a stdarg function;
1478 return the new argument pointer. */
1480 mn10300_builtin_saveregs (void)
1483 tree fntype = TREE_TYPE (current_function_decl);
1484 int argadj = ((!stdarg_p (fntype))
1485 ? UNITS_PER_WORD : 0);
1486 alias_set_type set = get_varargs_alias_set ();
1489 offset = plus_constant (crtl->args.arg_offset_rtx, argadj);
1491 offset = crtl->args.arg_offset_rtx;
1493 mem = gen_rtx_MEM (SImode, crtl->args.internal_arg_pointer);
1494 set_mem_alias_set (mem, set);
1495 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
1497 mem = gen_rtx_MEM (SImode,
1498 plus_constant (crtl->args.internal_arg_pointer, 4));
1499 set_mem_alias_set (mem, set);
1500 emit_move_insn (mem, gen_rtx_REG (SImode, 1));
1502 return copy_to_reg (expand_binop (Pmode, add_optab,
1503 crtl->args.internal_arg_pointer,
1504 offset, 0, 0, OPTAB_LIB_WIDEN));
1508 mn10300_va_start (tree valist, rtx nextarg)
1510 nextarg = expand_builtin_saveregs ();
1511 std_expand_builtin_va_start (valist, nextarg);
1514 /* Return true when a parameter should be passed by reference. */
1517 mn10300_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
1518 enum machine_mode mode, const_tree type,
1519 bool named ATTRIBUTE_UNUSED)
1521 unsigned HOST_WIDE_INT size;
1524 size = int_size_in_bytes (type);
1526 size = GET_MODE_SIZE (mode);
1528 return (size > 8 || size == 0);
1531 /* Return an RTX to represent where a value with mode MODE will be returned
1532 from a function. If the result is NULL_RTX, the argument is pushed. */
1535 mn10300_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
1536 const_tree type, bool named ATTRIBUTE_UNUSED)
1538 rtx result = NULL_RTX;
1541 /* We only support using 2 data registers as argument registers. */
1544 /* Figure out the size of the object to be passed. */
1545 if (mode == BLKmode)
1546 size = int_size_in_bytes (type);
1548 size = GET_MODE_SIZE (mode);
1550 cum->nbytes = (cum->nbytes + 3) & ~3;
1552 /* Don't pass this arg via a register if all the argument registers
1554 if (cum->nbytes > nregs * UNITS_PER_WORD)
1557 /* Don't pass this arg via a register if it would be split between
1558 registers and memory. */
1559 if (type == NULL_TREE
1560 && cum->nbytes + size > nregs * UNITS_PER_WORD)
1563 switch (cum->nbytes / UNITS_PER_WORD)
1566 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM);
1569 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM + 1);
1578 /* Update the data in CUM to advance over an argument
1579 of mode MODE and data type TYPE.
1580 (TYPE is null for libcalls where that information may not be available.) */
1583 mn10300_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
1584 const_tree type, bool named ATTRIBUTE_UNUSED)
1586 cum->nbytes += (mode != BLKmode
1587 ? (GET_MODE_SIZE (mode) + 3) & ~3
1588 : (int_size_in_bytes (type) + 3) & ~3);
1591 /* Return the number of bytes of registers to use for an argument passed
1592 partially in registers and partially in memory. */
1595 mn10300_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
1596 tree type, bool named ATTRIBUTE_UNUSED)
1600 /* We only support using 2 data registers as argument registers. */
1603 /* Figure out the size of the object to be passed. */
1604 if (mode == BLKmode)
1605 size = int_size_in_bytes (type);
1607 size = GET_MODE_SIZE (mode);
1609 cum->nbytes = (cum->nbytes + 3) & ~3;
1611 /* Don't pass this arg via a register if all the argument registers
1613 if (cum->nbytes > nregs * UNITS_PER_WORD)
1616 if (cum->nbytes + size <= nregs * UNITS_PER_WORD)
1619 /* Don't pass this arg via a register if it would be split between
1620 registers and memory. */
1621 if (type == NULL_TREE
1622 && cum->nbytes + size > nregs * UNITS_PER_WORD)
1625 return nregs * UNITS_PER_WORD - cum->nbytes;
1628 /* Return the location of the function's value. This will be either
1629 $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1630 $d0 and $a0 if the -mreturn-pointer-on-do flag is set. Note that
1631 we only return the PARALLEL for outgoing values; we do not want
1632 callers relying on this extra copy. */
1635 mn10300_function_value (const_tree valtype,
1636 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1640 enum machine_mode mode = TYPE_MODE (valtype);
1642 if (! POINTER_TYPE_P (valtype))
1643 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1644 else if (! TARGET_PTR_A0D0 || ! outgoing
1645 || cfun->returns_struct)
1646 return gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM);
1648 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (2));
1650 = gen_rtx_EXPR_LIST (VOIDmode,
1651 gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM),
1655 = gen_rtx_EXPR_LIST (VOIDmode,
1656 gen_rtx_REG (mode, FIRST_DATA_REGNUM),
1661 /* Implements TARGET_LIBCALL_VALUE. */
1664 mn10300_libcall_value (enum machine_mode mode,
1665 const_rtx fun ATTRIBUTE_UNUSED)
1667 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1670 /* Implements FUNCTION_VALUE_REGNO_P. */
1673 mn10300_function_value_regno_p (const unsigned int regno)
1675 return (regno == FIRST_DATA_REGNUM || regno == FIRST_ADDRESS_REGNUM);
1678 /* Output an addition operation. */
1681 mn10300_output_add (rtx operands[3], bool need_flags)
1683 rtx dest, src1, src2;
1684 unsigned int dest_regnum, src1_regnum, src2_regnum;
1685 enum reg_class src1_class, src2_class, dest_class;
1691 dest_regnum = true_regnum (dest);
1692 src1_regnum = true_regnum (src1);
1694 dest_class = REGNO_REG_CLASS (dest_regnum);
1695 src1_class = REGNO_REG_CLASS (src1_regnum);
1697 if (GET_CODE (src2) == CONST_INT)
1699 gcc_assert (dest_regnum == src1_regnum);
1701 if (src2 == const1_rtx && !need_flags)
1703 if (INTVAL (src2) == 4 && !need_flags && dest_class != DATA_REGS)
1706 gcc_assert (!need_flags || dest_class != SP_REGS);
1709 else if (CONSTANT_P (src2))
1712 src2_regnum = true_regnum (src2);
1713 src2_class = REGNO_REG_CLASS (src2_regnum);
1715 if (dest_regnum == src1_regnum)
1717 if (dest_regnum == src2_regnum)
1720 /* The rest of the cases are reg = reg+reg. For AM33, we can implement
1721 this directly, as below, but when optimizing for space we can sometimes
1722 do better by using a mov+add. For MN103, we claimed that we could
1723 implement a three-operand add because the various move and add insns
1724 change sizes across register classes, and we can often do better than
1725 reload in choosing which operand to move. */
1726 if (TARGET_AM33 && optimize_insn_for_speed_p ())
1727 return "add %2,%1,%0";
1729 /* Catch cases where no extended register was used. */
1730 if (src1_class != EXTENDED_REGS
1731 && src2_class != EXTENDED_REGS
1732 && dest_class != EXTENDED_REGS)
1734 /* We have to copy one of the sources into the destination, then
1735 add the other source to the destination.
1737 Carefully select which source to copy to the destination; a
1738 naive implementation will waste a byte when the source classes
1739 are different and the destination is an address register.
1740 Selecting the lowest cost register copy will optimize this
1742 if (src1_class == dest_class)
1743 return "mov %1,%0\n\tadd %2,%0";
1745 return "mov %2,%0\n\tadd %1,%0";
1748 /* At least one register is an extended register. */
1750 /* The three operand add instruction on the am33 is a win iff the
1751 output register is an extended register, or if both source
1752 registers are extended registers. */
1753 if (dest_class == EXTENDED_REGS || src1_class == src2_class)
1754 return "add %2,%1,%0";
1756 /* It is better to copy one of the sources to the destination, then
1757 perform a 2 address add. The destination in this case must be
1758 an address or data register and one of the sources must be an
1759 extended register and the remaining source must not be an extended
1762 The best code for this case is to copy the extended reg to the
1763 destination, then emit a two address add. */
1764 if (src1_class == EXTENDED_REGS)
1765 return "mov %1,%0\n\tadd %2,%0";
1767 return "mov %2,%0\n\tadd %1,%0";
1770 /* Return 1 if X contains a symbolic expression. We know these
1771 expressions will have one of a few well defined forms, so
1772 we need only check those forms. */
1775 mn10300_symbolic_operand (rtx op,
1776 enum machine_mode mode ATTRIBUTE_UNUSED)
1778 switch (GET_CODE (op))
1785 return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
1786 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
1787 && CONST_INT_P (XEXP (op, 1)));
1793 /* Try machine dependent ways of modifying an illegitimate address
1794 to be legitimate. If we find one, return the new valid address.
1795 This macro is used in only one place: `memory_address' in explow.c.
1797 OLDX is the address as it was before break_out_memory_refs was called.
1798 In some cases it is useful to look at this to decide what needs to be done.
1800 Normally it is always safe for this macro to do nothing. It exists to
1801 recognize opportunities to optimize the output.
1803 But on a few ports with segmented architectures and indexed addressing
1804 (mn10300, hppa) it is used to rewrite certain problematical addresses. */
1807 mn10300_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1808 enum machine_mode mode ATTRIBUTE_UNUSED)
1810 if (flag_pic && ! mn10300_legitimate_pic_operand_p (x))
1811 x = mn10300_legitimize_pic_address (oldx, NULL_RTX);
1813 /* Uh-oh. We might have an address for x[n-100000]. This needs
1814 special handling to avoid creating an indexed memory address
1815 with x-100000 as the base. */
1816 if (GET_CODE (x) == PLUS
1817 && mn10300_symbolic_operand (XEXP (x, 1), VOIDmode))
1819 /* Ugly. We modify things here so that the address offset specified
1820 by the index expression is computed first, then added to x to form
1821 the entire address. */
1823 rtx regx1, regy1, regy2, y;
1825 /* Strip off any CONST. */
1827 if (GET_CODE (y) == CONST)
1830 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1832 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1833 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1834 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1835 regx1 = force_reg (Pmode,
1836 gen_rtx_fmt_ee (GET_CODE (y), Pmode, regx1,
1838 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1844 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
1845 @GOTOFF in `reg'. */
1848 mn10300_legitimize_pic_address (rtx orig, rtx reg)
1852 if (GET_CODE (orig) == LABEL_REF
1853 || (GET_CODE (orig) == SYMBOL_REF
1854 && (CONSTANT_POOL_ADDRESS_P (orig)
1855 || ! MN10300_GLOBAL_P (orig))))
1858 reg = gen_reg_rtx (Pmode);
1860 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOTOFF);
1861 x = gen_rtx_CONST (SImode, x);
1862 emit_move_insn (reg, x);
1864 x = emit_insn (gen_addsi3 (reg, reg, pic_offset_table_rtx));
1866 else if (GET_CODE (orig) == SYMBOL_REF)
1869 reg = gen_reg_rtx (Pmode);
1871 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOT);
1872 x = gen_rtx_CONST (SImode, x);
1873 x = gen_rtx_PLUS (SImode, pic_offset_table_rtx, x);
1874 x = gen_const_mem (SImode, x);
1876 x = emit_move_insn (reg, x);
1881 set_unique_reg_note (x, REG_EQUAL, orig);
1885 /* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
1886 isn't protected by a PIC unspec; nonzero otherwise. */
1889 mn10300_legitimate_pic_operand_p (rtx x)
1894 if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1897 if (GET_CODE (x) == UNSPEC
1898 && (XINT (x, 1) == UNSPEC_PIC
1899 || XINT (x, 1) == UNSPEC_GOT
1900 || XINT (x, 1) == UNSPEC_GOTOFF
1901 || XINT (x, 1) == UNSPEC_PLT
1902 || XINT (x, 1) == UNSPEC_GOTSYM_OFF))
1905 fmt = GET_RTX_FORMAT (GET_CODE (x));
1906 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
1912 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1913 if (! mn10300_legitimate_pic_operand_p (XVECEXP (x, i, j)))
1916 else if (fmt[i] == 'e'
1917 && ! mn10300_legitimate_pic_operand_p (XEXP (x, i)))
1924 /* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
1925 legitimate, and FALSE otherwise.
1927 On the mn10300, the value in the address register must be
1928 in the same memory space/segment as the effective address.
1930 This is problematical for reload since it does not understand
1931 that base+index != index+base in a memory reference.
1933 Note it is still possible to use reg+reg addressing modes,
1934 it's just much more difficult. For a discussion of a possible
1935 workaround and solution, see the comments in pa.c before the
1936 function record_unscaled_index_insn_codes. */
1939 mn10300_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
1943 if (CONSTANT_ADDRESS_P (x))
1944 return !flag_pic || mn10300_legitimate_pic_operand_p (x);
1946 if (RTX_OK_FOR_BASE_P (x, strict))
1949 if (TARGET_AM33 && (mode == SImode || mode == SFmode || mode == HImode))
1951 if (GET_CODE (x) == POST_INC)
1952 return RTX_OK_FOR_BASE_P (XEXP (x, 0), strict);
1953 if (GET_CODE (x) == POST_MODIFY)
1954 return (RTX_OK_FOR_BASE_P (XEXP (x, 0), strict)
1955 && CONSTANT_ADDRESS_P (XEXP (x, 1)));
1958 if (GET_CODE (x) != PLUS)
1962 index = XEXP (x, 1);
1968 /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
1969 addressing is hard to satisfy. */
1973 return (REGNO_GENERAL_P (REGNO (base), strict)
1974 && REGNO_GENERAL_P (REGNO (index), strict));
1977 if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base), strict))
1980 if (CONST_INT_P (index))
1981 return IN_RANGE (INTVAL (index), -1 - 0x7fffffff, 0x7fffffff);
1983 if (CONSTANT_ADDRESS_P (index))
1984 return !flag_pic || mn10300_legitimate_pic_operand_p (index);
1990 mn10300_regno_in_class_p (unsigned regno, int rclass, bool strict)
1992 if (regno >= FIRST_PSEUDO_REGISTER)
1998 regno = reg_renumber[regno];
2000 return TEST_HARD_REG_BIT (reg_class_contents[rclass], regno);
2004 mn10300_legitimize_reload_address (rtx x,
2005 enum machine_mode mode ATTRIBUTE_UNUSED,
2006 int opnum, int type,
2007 int ind_levels ATTRIBUTE_UNUSED)
2009 bool any_change = false;
2011 /* See above re disabling reg+reg addressing for MN103. */
2015 if (GET_CODE (x) != PLUS)
2018 if (XEXP (x, 0) == stack_pointer_rtx)
2020 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
2021 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2022 opnum, (enum reload_type) type);
2025 if (XEXP (x, 1) == stack_pointer_rtx)
2027 push_reload (XEXP (x, 1), NULL_RTX, &XEXP (x, 1), NULL,
2028 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2029 opnum, (enum reload_type) type);
2033 return any_change ? x : NULL_RTX;
2036 /* Used by LEGITIMATE_CONSTANT_P(). Returns TRUE if X is a valid
2037 constant. Note that some "constants" aren't valid, such as TLS
2038 symbols and unconverted GOT-based references, so we eliminate
2042 mn10300_legitimate_constant_p (rtx x)
2044 switch (GET_CODE (x))
2049 if (GET_CODE (x) == PLUS)
2051 if (! CONST_INT_P (XEXP (x, 1)))
2056 /* Only some unspecs are valid as "constants". */
2057 if (GET_CODE (x) == UNSPEC)
2059 switch (XINT (x, 1))
2071 /* We must have drilled down to a symbol. */
2072 if (! mn10300_symbolic_operand (x, Pmode))
2083 /* Undo pic address legitimization for the benefit of debug info. */
2086 mn10300_delegitimize_address (rtx orig_x)
2088 rtx x = orig_x, ret, addend = NULL;
2093 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
2096 if (XEXP (x, 0) == pic_offset_table_rtx)
2098 /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2099 some odd-looking "addresses" that were never valid in the first place.
2100 We need to look harder to avoid warnings being emitted. */
2101 else if (GET_CODE (XEXP (x, 0)) == PLUS)
2103 rtx x0 = XEXP (x, 0);
2104 rtx x00 = XEXP (x0, 0);
2105 rtx x01 = XEXP (x0, 1);
2107 if (x00 == pic_offset_table_rtx)
2109 else if (x01 == pic_offset_table_rtx)
2119 if (GET_CODE (x) != CONST)
2122 if (GET_CODE (x) != UNSPEC)
2125 ret = XVECEXP (x, 0, 0);
2126 if (XINT (x, 1) == UNSPEC_GOTOFF)
2128 else if (XINT (x, 1) == UNSPEC_GOT)
2133 gcc_assert (GET_CODE (ret) == SYMBOL_REF);
2134 if (need_mem != MEM_P (orig_x))
2136 if (need_mem && addend)
2139 ret = gen_rtx_PLUS (Pmode, addend, ret);
2143 /* For addresses, costs are relative to "MOV (Rm),Rn". For AM33 this is
2144 the 3-byte fully general instruction; for MN103 this is the 2-byte form
2145 with an address register. */
2148 mn10300_address_cost (rtx x, bool speed)
2153 switch (GET_CODE (x))
2158 /* We assume all of these require a 32-bit constant, even though
2159 some symbol and label references can be relaxed. */
2160 return speed ? 1 : 4;
2168 /* Assume any symbolic offset is a 32-bit constant. */
2169 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2170 if (IN_RANGE (i, -128, 127))
2171 return speed ? 0 : 1;
2174 if (IN_RANGE (i, -0x800000, 0x7fffff))
2180 index = XEXP (x, 1);
2181 if (register_operand (index, SImode))
2183 /* Attempt to minimize the number of registers in the address.
2184 This is similar to what other ports do. */
2185 if (register_operand (base, SImode))
2189 index = XEXP (x, 0);
2192 /* Assume any symbolic offset is a 32-bit constant. */
2193 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2194 if (IN_RANGE (i, -128, 127))
2195 return speed ? 0 : 1;
2196 if (IN_RANGE (i, -32768, 32767))
2197 return speed ? 0 : 2;
2198 return speed ? 2 : 6;
2201 return rtx_cost (x, MEM, speed);
2205 /* Implement the TARGET_REGISTER_MOVE_COST hook.
2207 Recall that the base value of 2 is required by assumptions elsewhere
2208 in the body of the compiler, and that cost 2 is special-cased as an
2209 early exit from reload meaning no work is required. */
2212 mn10300_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2213 reg_class_t ifrom, reg_class_t ito)
2215 enum reg_class from = (enum reg_class) ifrom;
2216 enum reg_class to = (enum reg_class) ito;
2217 enum reg_class scratch, test;
2219 /* Simplify the following code by unifying the fp register classes. */
2220 if (to == FP_ACC_REGS)
2222 if (from == FP_ACC_REGS)
2225 /* Diagnose invalid moves by costing them as two moves. */
2230 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
2231 else if (to == MDR_REGS)
2232 scratch = DATA_REGS;
2233 else if (to == FP_REGS && to != from)
2234 scratch = GENERAL_REGS;
2238 if (from == SP_REGS)
2239 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
2240 else if (from == MDR_REGS)
2241 scratch = DATA_REGS;
2242 else if (from == FP_REGS && to != from)
2243 scratch = GENERAL_REGS;
2245 if (scratch != NO_REGS && !reg_class_subset_p (test, scratch))
2246 return (mn10300_register_move_cost (VOIDmode, from, scratch)
2247 + mn10300_register_move_cost (VOIDmode, scratch, to));
2249 /* From here on, all we need consider are legal combinations. */
2253 /* The scale here is bytes * 2. */
2255 if (from == to && (to == ADDRESS_REGS || to == DATA_REGS))
2258 if (from == SP_REGS)
2259 return (to == ADDRESS_REGS ? 2 : 6);
2261 /* For MN103, all remaining legal moves are two bytes. */
2266 return (from == ADDRESS_REGS ? 4 : 6);
2268 if ((from == ADDRESS_REGS || from == DATA_REGS)
2269 && (to == ADDRESS_REGS || to == DATA_REGS))
2272 if (to == EXTENDED_REGS)
2273 return (to == from ? 6 : 4);
2275 /* What's left are SP_REGS, FP_REGS, or combinations of the above. */
2280 /* The scale here is cycles * 2. */
2284 if (from == FP_REGS)
2287 /* All legal moves between integral registers are single cycle. */
2292 /* Implement the TARGET_MEMORY_MOVE_COST hook.
2294 Given lack of the form of the address, this must be speed-relative,
2295 though we should never be less expensive than a size-relative register
2296 move cost above. This is not a problem. */
2299 mn10300_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2300 reg_class_t iclass, bool in ATTRIBUTE_UNUSED)
2302 enum reg_class rclass = (enum reg_class) iclass;
2304 if (rclass == FP_REGS)
2309 /* Implement the TARGET_RTX_COSTS hook.
2311 Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2312 to represent cycles. Size-relative costs are in bytes. */
2315 mn10300_rtx_costs (rtx x, int code, int outer_code, int *ptotal, bool speed)
2317 /* This value is used for SYMBOL_REF etc where we want to pretend
2318 we have a full 32-bit constant. */
2319 HOST_WIDE_INT i = 0x12345678;
2329 if (outer_code == SET)
2331 /* 16-bit integer loads have latency 1, 32-bit loads 2. */
2332 if (IN_RANGE (i, -32768, 32767))
2333 total = COSTS_N_INSNS (1);
2335 total = COSTS_N_INSNS (2);
2339 /* 16-bit integer operands don't affect latency;
2340 24-bit and 32-bit operands add a cycle. */
2341 if (IN_RANGE (i, -32768, 32767))
2344 total = COSTS_N_INSNS (1);
2349 if (outer_code == SET)
2353 else if (IN_RANGE (i, -128, 127))
2355 else if (IN_RANGE (i, -32768, 32767))
2362 /* Reference here is ADD An,Dn, vs ADD imm,Dn. */
2363 if (IN_RANGE (i, -128, 127))
2365 else if (IN_RANGE (i, -32768, 32767))
2367 else if (TARGET_AM33 && IN_RANGE (i, -0x01000000, 0x00ffffff))
2379 /* We assume all of these require a 32-bit constant, even though
2380 some symbol and label references can be relaxed. */
2384 switch (XINT (x, 1))
2390 case UNSPEC_GOTSYM_OFF:
2391 /* The PIC unspecs also resolve to a 32-bit constant. */
2395 /* Assume any non-listed unspec is some sort of arithmetic. */
2396 goto do_arith_costs;
2400 /* Notice the size difference of INC and INC4. */
2401 if (!speed && outer_code == SET && CONST_INT_P (XEXP (x, 1)))
2403 i = INTVAL (XEXP (x, 1));
2404 if (i == 1 || i == 4)
2406 total = 1 + rtx_cost (XEXP (x, 0), PLUS, speed);
2410 goto do_arith_costs;
2424 total = (speed ? COSTS_N_INSNS (1) : 2);
2428 /* Notice the size difference of ASL2 and variants. */
2429 if (!speed && CONST_INT_P (XEXP (x, 1)))
2430 switch (INTVAL (XEXP (x, 1)))
2445 total = (speed ? COSTS_N_INSNS (1) : 3);
2449 total = (speed ? COSTS_N_INSNS (3) : 2);
2456 total = (speed ? COSTS_N_INSNS (39)
2457 /* Include space to load+retrieve MDR. */
2458 : code == MOD || code == UMOD ? 6 : 4);
2462 total = mn10300_address_cost (XEXP (x, 0), speed);
2464 total = COSTS_N_INSNS (2 + total);
2468 /* Probably not implemented. Assume external call. */
2469 total = (speed ? COSTS_N_INSNS (10) : 7);
2481 /* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2482 may access it using GOTOFF instead of GOT. */
2485 mn10300_encode_section_info (tree decl, rtx rtl, int first ATTRIBUTE_UNUSED)
2491 symbol = XEXP (rtl, 0);
2492 if (GET_CODE (symbol) != SYMBOL_REF)
2496 SYMBOL_REF_FLAG (symbol) = (*targetm.binds_local_p) (decl);
2499 /* Dispatch tables on the mn10300 are extremely expensive in terms of code
2500 and readonly data size. So we crank up the case threshold value to
2501 encourage a series of if/else comparisons to implement many small switch
2502 statements. In theory, this value could be increased much more if we
2503 were solely optimizing for space, but we keep it "reasonable" to avoid
2504 serious code efficiency lossage. */
2507 mn10300_case_values_threshold (void)
2512 /* Worker function for TARGET_TRAMPOLINE_INIT. */
2515 mn10300_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
2517 rtx mem, disp, fnaddr = XEXP (DECL_RTL (fndecl), 0);
2519 /* This is a strict alignment target, which means that we play
2520 some games to make sure that the locations at which we need
2521 to store <chain> and <disp> wind up at aligned addresses.
2524 0xfc 0xdd mov chain,a1
2526 0xf8 0xed 0x00 btst 0,d1
2530 Note that the two extra insns are effectively nops; they
2531 clobber the flags but do not affect the contents of D0 or D1. */
2533 disp = expand_binop (SImode, sub_optab, fnaddr,
2534 plus_constant (XEXP (m_tramp, 0), 11),
2535 NULL_RTX, 1, OPTAB_DIRECT);
2537 mem = adjust_address (m_tramp, SImode, 0);
2538 emit_move_insn (mem, gen_int_mode (0xddfc0028, SImode));
2539 mem = adjust_address (m_tramp, SImode, 4);
2540 emit_move_insn (mem, chain_value);
2541 mem = adjust_address (m_tramp, SImode, 8);
2542 emit_move_insn (mem, gen_int_mode (0xdc00edf8, SImode));
2543 mem = adjust_address (m_tramp, SImode, 12);
2544 emit_move_insn (mem, disp);
2547 /* Output the assembler code for a C++ thunk function.
2548 THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2549 is the decl for the target function. DELTA is an immediate constant
2550 offset to be added to the THIS parameter. If VCALL_OFFSET is nonzero
2551 the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2552 additionally added to THIS. Finally jump to the entry point of
2556 mn10300_asm_output_mi_thunk (FILE * file,
2557 tree thunk_fndecl ATTRIBUTE_UNUSED,
2558 HOST_WIDE_INT delta,
2559 HOST_WIDE_INT vcall_offset,
2564 /* Get the register holding the THIS parameter. Handle the case
2565 where there is a hidden first argument for a returned structure. */
2566 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
2567 _this = reg_names [FIRST_ARGUMENT_REGNUM + 1];
2569 _this = reg_names [FIRST_ARGUMENT_REGNUM];
2571 fprintf (file, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START);
2574 fprintf (file, "\tadd %d, %s\n", (int) delta, _this);
2578 const char * scratch = reg_names [FIRST_ADDRESS_REGNUM + 1];
2580 fprintf (file, "\tmov %s, %s\n", _this, scratch);
2581 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2582 fprintf (file, "\tadd %d, %s\n", (int) vcall_offset, scratch);
2583 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2584 fprintf (file, "\tadd %s, %s\n", scratch, _this);
2587 fputs ("\tjmp ", file);
2588 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
2592 /* Return true if mn10300_output_mi_thunk would be able to output the
2593 assembler code for the thunk function specified by the arguments
2594 it is passed, and false otherwise. */
2597 mn10300_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
2598 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
2599 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
2600 const_tree function ATTRIBUTE_UNUSED)
2606 mn10300_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
2608 if (REGNO_REG_CLASS (regno) == FP_REGS
2609 || REGNO_REG_CLASS (regno) == FP_ACC_REGS)
2610 /* Do not store integer values in FP registers. */
2611 return GET_MODE_CLASS (mode) == MODE_FLOAT && ((regno & 1) == 0);
2613 if (((regno) & 1) == 0 || GET_MODE_SIZE (mode) == 4)
2616 if (REGNO_REG_CLASS (regno) == DATA_REGS
2617 || (TARGET_AM33 && REGNO_REG_CLASS (regno) == ADDRESS_REGS)
2618 || REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2619 return GET_MODE_SIZE (mode) <= 4;
2625 mn10300_modes_tieable (enum machine_mode mode1, enum machine_mode mode2)
2627 if (GET_MODE_CLASS (mode1) == MODE_FLOAT
2628 && GET_MODE_CLASS (mode2) != MODE_FLOAT)
2631 if (GET_MODE_CLASS (mode2) == MODE_FLOAT
2632 && GET_MODE_CLASS (mode1) != MODE_FLOAT)
2637 || (GET_MODE_SIZE (mode1) <= 4 && GET_MODE_SIZE (mode2) <= 4))
2644 cc_flags_for_mode (enum machine_mode mode)
2649 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C | CC_FLAG_V;
2651 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C;
2653 return CC_FLAG_Z | CC_FLAG_N;
2662 cc_flags_for_code (enum rtx_code code)
2675 case GT: /* ~(Z|(N^V)) */
2676 case LE: /* Z|(N^V) */
2677 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_V;
2683 case GTU: /* ~(C | Z) */
2684 case LEU: /* C | Z */
2685 return CC_FLAG_Z | CC_FLAG_C;
2703 mn10300_select_cc_mode (enum rtx_code code, rtx x, rtx y ATTRIBUTE_UNUSED)
2707 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2708 return CC_FLOATmode;
2710 req = cc_flags_for_code (code);
2712 if (req & CC_FLAG_V)
2714 if (req & CC_FLAG_C)
2720 is_load_insn (rtx insn)
2722 if (GET_CODE (PATTERN (insn)) != SET)
2725 return MEM_P (SET_SRC (PATTERN (insn)));
2729 is_store_insn (rtx insn)
2731 if (GET_CODE (PATTERN (insn)) != SET)
2734 return MEM_P (SET_DEST (PATTERN (insn)));
2737 /* Update scheduling costs for situations that cannot be
2738 described using the attributes and DFA machinery.
2739 DEP is the insn being scheduled.
2740 INSN is the previous insn.
2741 COST is the current cycle cost for DEP. */
2744 mn10300_adjust_sched_cost (rtx insn, rtx link, rtx dep, int cost)
2746 int timings = get_attr_timings (insn);
2751 if (GET_CODE (insn) == PARALLEL)
2752 insn = XVECEXP (insn, 0, 0);
2754 if (GET_CODE (dep) == PARALLEL)
2755 dep = XVECEXP (dep, 0, 0);
2757 /* For the AM34 a load instruction that follows a
2758 store instruction incurs an extra cycle of delay. */
2759 if (mn10300_tune_cpu == PROCESSOR_AM34
2760 && is_load_insn (dep)
2761 && is_store_insn (insn))
2764 /* For the AM34 a non-store, non-branch FPU insn that follows
2765 another FPU insn incurs a one cycle throughput increase. */
2766 else if (mn10300_tune_cpu == PROCESSOR_AM34
2767 && ! is_store_insn (insn)
2769 && GET_CODE (PATTERN (dep)) == SET
2770 && GET_CODE (PATTERN (insn)) == SET
2771 && GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (dep)))) == MODE_FLOAT
2772 && GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (insn)))) == MODE_FLOAT)
2775 /* Resolve the conflict described in section 1-7-4 of
2776 Chapter 3 of the MN103E Series Instruction Manual
2779 "When the preceeding instruction is a CPU load or
2780 store instruction, a following FPU instruction
2781 cannot be executed until the CPU completes the
2782 latency period even though there are no register
2783 or flag dependencies between them." */
2785 /* Only the AM33-2 (and later) CPUs have FPU instructions. */
2786 if (! TARGET_AM33_2)
2789 /* If a data dependence already exists then the cost is correct. */
2790 if (REG_NOTE_KIND (link) == 0)
2793 /* Check that the instruction about to scheduled is an FPU instruction. */
2794 if (GET_CODE (PATTERN (dep)) != SET)
2797 if (GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (dep)))) != MODE_FLOAT)
2800 /* Now check to see if the previous instruction is a load or store. */
2801 if (! is_load_insn (insn) && ! is_store_insn (insn))
2804 /* XXX: Verify: The text of 1-7-4 implies that the restriction
2805 only applies when an INTEGER load/store preceeds an FPU
2806 instruction, but is this true ? For now we assume that it is. */
2807 if (GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (insn)))) != MODE_INT)
2810 /* Extract the latency value from the timings attribute. */
2811 return timings < 100 ? (timings % 10) : (timings % 100);
2815 mn10300_conditional_register_usage (void)
2821 for (i = FIRST_EXTENDED_REGNUM;
2822 i <= LAST_EXTENDED_REGNUM; i++)
2823 fixed_regs[i] = call_used_regs[i] = 1;
2827 for (i = FIRST_FP_REGNUM;
2828 i <= LAST_FP_REGNUM; i++)
2829 fixed_regs[i] = call_used_regs[i] = 1;
2832 fixed_regs[PIC_OFFSET_TABLE_REGNUM] =
2833 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
2836 /* Worker function for TARGET_MD_ASM_CLOBBERS.
2837 We do this in the mn10300 backend to maintain source compatibility
2838 with the old cc0-based compiler. */
2841 mn10300_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
2842 tree inputs ATTRIBUTE_UNUSED,
2845 clobbers = tree_cons (NULL_TREE, build_string (5, "EPSW"),
2850 /* A helper function for splitting cbranch patterns after reload. */
2853 mn10300_split_cbranch (enum machine_mode cmp_mode, rtx cmp_op, rtx label_ref)
2857 flags = gen_rtx_REG (cmp_mode, CC_REG);
2858 x = gen_rtx_COMPARE (cmp_mode, XEXP (cmp_op, 0), XEXP (cmp_op, 1));
2859 x = gen_rtx_SET (VOIDmode, flags, x);
2862 x = gen_rtx_fmt_ee (GET_CODE (cmp_op), VOIDmode, flags, const0_rtx);
2863 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, label_ref, pc_rtx);
2864 x = gen_rtx_SET (VOIDmode, pc_rtx, x);
2868 /* A helper function for matching parallels that set the flags. */
2871 mn10300_match_ccmode (rtx insn, enum machine_mode cc_mode)
2874 enum machine_mode flags_mode;
2876 gcc_checking_assert (XVECLEN (PATTERN (insn), 0) == 2);
2878 op1 = XVECEXP (PATTERN (insn), 0, 1);
2879 gcc_checking_assert (GET_CODE (SET_SRC (op1)) == COMPARE);
2881 flags = SET_DEST (op1);
2882 flags_mode = GET_MODE (flags);
2884 if (GET_MODE (SET_SRC (op1)) != flags_mode)
2886 if (GET_MODE_CLASS (flags_mode) != MODE_CC)
2889 /* Ensure that the mode of FLAGS is compatible with CC_MODE. */
2890 if (cc_flags_for_mode (flags_mode) & ~cc_flags_for_mode (cc_mode))
2897 mn10300_split_and_operand_count (rtx op)
2899 HOST_WIDE_INT val = INTVAL (op);
2904 /* High bit is set, look for bits clear at the bottom. */
2905 count = exact_log2 (-val);
2908 /* This is only size win if we can use the asl2 insn. Otherwise we
2909 would be replacing 1 6-byte insn with 2 3-byte insns. */
2910 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2916 /* High bit is clear, look for bits set at the bottom. */
2917 count = exact_log2 (val + 1);
2919 /* Again, this is only a size win with asl2. */
2920 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2926 /* Initialize the GCC target structure. */
2928 #undef TARGET_EXCEPT_UNWIND_INFO
2929 #define TARGET_EXCEPT_UNWIND_INFO sjlj_except_unwind_info
2931 #undef TARGET_ASM_ALIGNED_HI_OP
2932 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
2934 #undef TARGET_LEGITIMIZE_ADDRESS
2935 #define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
2937 #undef TARGET_ADDRESS_COST
2938 #define TARGET_ADDRESS_COST mn10300_address_cost
2939 #undef TARGET_REGISTER_MOVE_COST
2940 #define TARGET_REGISTER_MOVE_COST mn10300_register_move_cost
2941 #undef TARGET_MEMORY_MOVE_COST
2942 #define TARGET_MEMORY_MOVE_COST mn10300_memory_move_cost
2943 #undef TARGET_RTX_COSTS
2944 #define TARGET_RTX_COSTS mn10300_rtx_costs
2946 #undef TARGET_ASM_FILE_START
2947 #define TARGET_ASM_FILE_START mn10300_file_start
2948 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
2949 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
2951 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
2952 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
2954 #undef TARGET_DEFAULT_TARGET_FLAGS
2955 #define TARGET_DEFAULT_TARGET_FLAGS MASK_MULT_BUG | MASK_PTR_A0D0
2956 #undef TARGET_HANDLE_OPTION
2957 #define TARGET_HANDLE_OPTION mn10300_handle_option
2958 #undef TARGET_OPTION_OVERRIDE
2959 #define TARGET_OPTION_OVERRIDE mn10300_option_override
2960 #undef TARGET_OPTION_OPTIMIZATION_TABLE
2961 #define TARGET_OPTION_OPTIMIZATION_TABLE mn10300_option_optimization_table
2963 #undef TARGET_ENCODE_SECTION_INFO
2964 #define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
2966 #undef TARGET_PROMOTE_PROTOTYPES
2967 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
2968 #undef TARGET_RETURN_IN_MEMORY
2969 #define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
2970 #undef TARGET_PASS_BY_REFERENCE
2971 #define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
2972 #undef TARGET_CALLEE_COPIES
2973 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
2974 #undef TARGET_ARG_PARTIAL_BYTES
2975 #define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
2976 #undef TARGET_FUNCTION_ARG
2977 #define TARGET_FUNCTION_ARG mn10300_function_arg
2978 #undef TARGET_FUNCTION_ARG_ADVANCE
2979 #define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
2981 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
2982 #define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
2983 #undef TARGET_EXPAND_BUILTIN_VA_START
2984 #define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
2986 #undef TARGET_CASE_VALUES_THRESHOLD
2987 #define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
2989 #undef TARGET_LEGITIMATE_ADDRESS_P
2990 #define TARGET_LEGITIMATE_ADDRESS_P mn10300_legitimate_address_p
2991 #undef TARGET_DELEGITIMIZE_ADDRESS
2992 #define TARGET_DELEGITIMIZE_ADDRESS mn10300_delegitimize_address
2994 #undef TARGET_PREFERRED_RELOAD_CLASS
2995 #define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
2996 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
2997 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
2998 mn10300_preferred_output_reload_class
2999 #undef TARGET_SECONDARY_RELOAD
3000 #define TARGET_SECONDARY_RELOAD mn10300_secondary_reload
3002 #undef TARGET_TRAMPOLINE_INIT
3003 #define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3005 #undef TARGET_FUNCTION_VALUE
3006 #define TARGET_FUNCTION_VALUE mn10300_function_value
3007 #undef TARGET_LIBCALL_VALUE
3008 #define TARGET_LIBCALL_VALUE mn10300_libcall_value
3010 #undef TARGET_ASM_OUTPUT_MI_THUNK
3011 #define TARGET_ASM_OUTPUT_MI_THUNK mn10300_asm_output_mi_thunk
3012 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3013 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK mn10300_can_output_mi_thunk
3015 #undef TARGET_SCHED_ADJUST_COST
3016 #define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3018 #undef TARGET_CONDITIONAL_REGISTER_USAGE
3019 #define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3021 #undef TARGET_MD_ASM_CLOBBERS
3022 #define TARGET_MD_ASM_CLOBBERS mn10300_md_asm_clobbers
3024 #undef TARGET_FLAGS_REGNUM
3025 #define TARGET_FLAGS_REGNUM CC_REG
3027 struct gcc_target targetm = TARGET_INITIALIZER;