1 /* Subroutines for insn-output.c for Matsushita MN10300 series
2 Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004,
3 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc.
4 Contributed by Jeff Law (law@cygnus.com).
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
24 #include "coretypes.h"
29 #include "hard-reg-set.h"
30 #include "insn-config.h"
31 #include "conditions.h"
33 #include "insn-attr.h"
41 #include "diagnostic-core.h"
43 #include "tm-constrs.h"
45 #include "target-def.h"
48 /* This is used in the am33_2.0-linux-gnu port, in which global symbol
49 names are not prefixed by underscores, to tell whether to prefix a
50 label with a plus sign or not, so that the assembler can tell
51 symbol names from register names. */
52 int mn10300_protect_label;
54 /* The selected processor. */
55 enum processor_type mn10300_processor = PROCESSOR_DEFAULT;
57 /* Processor type to select for tuning. */
58 static const char * mn10300_tune_string = NULL;
60 /* Selected processor type for tuning. */
61 enum processor_type mn10300_tune_cpu = PROCESSOR_DEFAULT;
63 /* The size of the callee register save area. Right now we save everything
64 on entry since it costs us nothing in code size. It does cost us from a
65 speed standpoint, so we want to optimize this sooner or later. */
66 #define REG_SAVE_BYTES (4 * df_regs_ever_live_p (2) \
67 + 4 * df_regs_ever_live_p (3) \
68 + 4 * df_regs_ever_live_p (6) \
69 + 4 * df_regs_ever_live_p (7) \
70 + 16 * (df_regs_ever_live_p (14) \
71 || df_regs_ever_live_p (15) \
72 || df_regs_ever_live_p (16) \
73 || df_regs_ever_live_p (17)))
75 /* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */
76 static const struct default_options mn10300_option_optimization_table[] =
78 { OPT_LEVELS_1_PLUS, OPT_fomit_frame_pointer, NULL, 1 },
79 { OPT_LEVELS_NONE, 0, NULL, 0 }
87 static int cc_flags_for_mode(enum machine_mode);
88 static int cc_flags_for_code(enum rtx_code);
90 /* Implement TARGET_HANDLE_OPTION. */
93 mn10300_handle_option (size_t code,
94 const char *arg ATTRIBUTE_UNUSED,
100 mn10300_processor = value ? PROCESSOR_AM33 : PROCESSOR_MN10300;
104 mn10300_processor = (value
106 : MIN (PROCESSOR_AM33, PROCESSOR_DEFAULT));
110 mn10300_processor = (value ? PROCESSOR_AM34 : PROCESSOR_DEFAULT);
114 mn10300_tune_string = arg;
122 /* Implement TARGET_OPTION_OVERRIDE. */
125 mn10300_option_override (void)
128 target_flags &= ~MASK_MULT_BUG;
131 /* Disable scheduling for the MN10300 as we do
132 not have timing information available for it. */
133 flag_schedule_insns = 0;
134 flag_schedule_insns_after_reload = 0;
136 /* Force enable splitting of wide types, as otherwise it is trivial
137 to run out of registers. Indeed, this works so well that register
138 allocation problems are now more common *without* optimization,
139 when this flag is not enabled by default. */
140 flag_split_wide_types = 1;
143 if (mn10300_tune_string)
145 if (strcasecmp (mn10300_tune_string, "mn10300") == 0)
146 mn10300_tune_cpu = PROCESSOR_MN10300;
147 else if (strcasecmp (mn10300_tune_string, "am33") == 0)
148 mn10300_tune_cpu = PROCESSOR_AM33;
149 else if (strcasecmp (mn10300_tune_string, "am33-2") == 0)
150 mn10300_tune_cpu = PROCESSOR_AM33_2;
151 else if (strcasecmp (mn10300_tune_string, "am34") == 0)
152 mn10300_tune_cpu = PROCESSOR_AM34;
154 error ("-mtune= expects mn10300, am33, am33-2, or am34");
159 mn10300_file_start (void)
161 default_file_start ();
164 fprintf (asm_out_file, "\t.am33_2\n");
165 else if (TARGET_AM33)
166 fprintf (asm_out_file, "\t.am33\n");
169 /* Note: This list must match the liw_op attribute in mn10300.md. */
171 static const char *liw_op_names[] =
173 "add", "cmp", "sub", "mov",
179 /* Print operand X using operand code CODE to assembly language output file
183 mn10300_print_operand (FILE *file, rtx x, int code)
189 unsigned int liw_op = UINTVAL (x);
191 gcc_assert (TARGET_ALLOW_LIW);
192 gcc_assert (liw_op < LIW_OP_MAX);
193 fputs (liw_op_names[liw_op], file);
200 enum rtx_code cmp = GET_CODE (x);
201 enum machine_mode mode = GET_MODE (XEXP (x, 0));
206 cmp = reverse_condition (cmp);
207 have_flags = cc_flags_for_mode (mode);
218 /* bge is smaller than bnc. */
219 str = (have_flags & CC_FLAG_V ? "ge" : "nc");
222 str = (have_flags & CC_FLAG_V ? "lt" : "ns");
270 gcc_checking_assert ((cc_flags_for_code (cmp) & ~have_flags) == 0);
276 /* This is used for the operand to a call instruction;
277 if it's a REG, enclose it in parens, else output
278 the operand normally. */
282 mn10300_print_operand (file, x, 0);
286 mn10300_print_operand (file, x, 0);
290 switch (GET_CODE (x))
294 output_address (XEXP (x, 0));
299 fprintf (file, "fd%d", REGNO (x) - 18);
307 /* These are the least significant word in a 64bit value. */
309 switch (GET_CODE (x))
313 output_address (XEXP (x, 0));
318 fprintf (file, "%s", reg_names[REGNO (x)]);
322 fprintf (file, "%s", reg_names[subreg_regno (x)]);
330 switch (GET_MODE (x))
333 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
334 REAL_VALUE_TO_TARGET_DOUBLE (rv, val);
335 fprintf (file, "0x%lx", val[0]);
338 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
339 REAL_VALUE_TO_TARGET_SINGLE (rv, val[0]);
340 fprintf (file, "0x%lx", val[0]);
344 mn10300_print_operand_address (file,
345 GEN_INT (CONST_DOUBLE_LOW (x)));
356 split_double (x, &low, &high);
357 fprintf (file, "%ld", (long)INTVAL (low));
366 /* Similarly, but for the most significant word. */
368 switch (GET_CODE (x))
372 x = adjust_address (x, SImode, 4);
373 output_address (XEXP (x, 0));
378 fprintf (file, "%s", reg_names[REGNO (x) + 1]);
382 fprintf (file, "%s", reg_names[subreg_regno (x) + 1]);
390 switch (GET_MODE (x))
393 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
394 REAL_VALUE_TO_TARGET_DOUBLE (rv, val);
395 fprintf (file, "0x%lx", val[1]);
401 mn10300_print_operand_address (file,
402 GEN_INT (CONST_DOUBLE_HIGH (x)));
413 split_double (x, &low, &high);
414 fprintf (file, "%ld", (long)INTVAL (high));
425 if (REG_P (XEXP (x, 0)))
426 output_address (gen_rtx_PLUS (SImode, XEXP (x, 0), const0_rtx));
428 output_address (XEXP (x, 0));
433 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
434 fprintf (file, "%d", (int)((~INTVAL (x)) & 0xff));
438 gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
439 fprintf (file, "%d", (int)(INTVAL (x) & 0xff));
442 /* For shift counts. The hardware ignores the upper bits of
443 any immediate, but the assembler will flag an out of range
444 shift count as an error. So we mask off the high bits
445 of the immediate here. */
449 fprintf (file, "%d", (int)(INTVAL (x) & 0x1f));
455 switch (GET_CODE (x))
459 output_address (XEXP (x, 0));
468 fprintf (file, "%s", reg_names[REGNO (x)]);
472 fprintf (file, "%s", reg_names[subreg_regno (x)]);
475 /* This will only be single precision.... */
481 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
482 REAL_VALUE_TO_TARGET_SINGLE (rv, val);
483 fprintf (file, "0x%lx", val);
493 mn10300_print_operand_address (file, x);
502 /* Output assembly language output for the address ADDR to FILE. */
505 mn10300_print_operand_address (FILE *file, rtx addr)
507 switch (GET_CODE (addr))
510 mn10300_print_operand (file, XEXP (addr, 0), 0);
515 mn10300_print_operand (file, XEXP (addr, 0), 0);
518 mn10300_print_operand (file, XEXP (addr, 1), 0);
522 mn10300_print_operand (file, addr, 0);
526 rtx base = XEXP (addr, 0);
527 rtx index = XEXP (addr, 1);
529 if (REG_P (index) && !REG_OK_FOR_INDEX_P (index))
535 gcc_assert (REG_P (index) && REG_OK_FOR_INDEX_P (index));
537 gcc_assert (REG_OK_FOR_BASE_P (base));
539 mn10300_print_operand (file, index, 0);
541 mn10300_print_operand (file, base, 0);
545 output_addr_const (file, addr);
548 output_addr_const (file, addr);
553 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
555 Used for PIC-specific UNSPECs. */
558 mn10300_asm_output_addr_const_extra (FILE *file, rtx x)
560 if (GET_CODE (x) == UNSPEC)
565 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
566 output_addr_const (file, XVECEXP (x, 0, 0));
569 output_addr_const (file, XVECEXP (x, 0, 0));
570 fputs ("@GOT", file);
573 output_addr_const (file, XVECEXP (x, 0, 0));
574 fputs ("@GOTOFF", file);
577 output_addr_const (file, XVECEXP (x, 0, 0));
578 fputs ("@PLT", file);
580 case UNSPEC_GOTSYM_OFF:
581 assemble_name (file, GOT_SYMBOL_NAME);
583 output_addr_const (file, XVECEXP (x, 0, 0));
595 /* Count the number of FP registers that have to be saved. */
597 fp_regs_to_save (void)
604 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
605 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
611 /* Print a set of registers in the format required by "movm" and "ret".
612 Register K is saved if bit K of MASK is set. The data and address
613 registers can be stored individually, but the extended registers cannot.
614 We assume that the mask already takes that into account. For instance,
615 bits 14 to 17 must have the same value. */
618 mn10300_print_reg_list (FILE *file, int mask)
626 for (i = 0; i < FIRST_EXTENDED_REGNUM; i++)
627 if ((mask & (1 << i)) != 0)
631 fputs (reg_names [i], file);
635 if ((mask & 0x3c000) != 0)
637 gcc_assert ((mask & 0x3c000) == 0x3c000);
640 fputs ("exreg1", file);
647 /* If the MDR register is never clobbered, we can use the RETF instruction
648 which takes the address from the MDR register. This is 3 cycles faster
649 than having to load the address from the stack. */
652 mn10300_can_use_retf_insn (void)
654 /* Don't bother if we're not optimizing. In this case we won't
655 have proper access to df_regs_ever_live_p. */
659 /* EH returns alter the saved return address; MDR is not current. */
660 if (crtl->calls_eh_return)
663 /* Obviously not if MDR is ever clobbered. */
664 if (df_regs_ever_live_p (MDR_REG))
667 /* ??? Careful not to use this during expand_epilogue etc. */
668 gcc_assert (!in_sequence_p ());
669 return leaf_function_p ();
673 mn10300_can_use_rets_insn (void)
675 return !mn10300_initial_offset (ARG_POINTER_REGNUM, STACK_POINTER_REGNUM);
678 /* Returns the set of live, callee-saved registers as a bitmask. The
679 callee-saved extended registers cannot be stored individually, so
680 all of them will be included in the mask if any one of them is used. */
683 mn10300_get_live_callee_saved_regs (void)
689 for (i = 0; i <= LAST_EXTENDED_REGNUM; i++)
690 if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
692 if ((mask & 0x3c000) != 0)
701 RTX_FRAME_RELATED_P (r) = 1;
705 /* Generate an instruction that pushes several registers onto the stack.
706 Register K will be saved if bit K in MASK is set. The function does
707 nothing if MASK is zero.
709 To be compatible with the "movm" instruction, the lowest-numbered
710 register must be stored in the lowest slot. If MASK is the set
711 { R1,...,RN }, where R1...RN are ordered least first, the generated
712 instruction will have the form:
715 (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
716 (set (mem:SI (plus:SI (reg:SI 9)
720 (set (mem:SI (plus:SI (reg:SI 9)
725 mn10300_gen_multiple_store (unsigned int mask)
727 /* The order in which registers are stored, from SP-4 through SP-N*4. */
728 static const unsigned int store_order[8] = {
729 /* e2, e3: never saved */
730 FIRST_EXTENDED_REGNUM + 4,
731 FIRST_EXTENDED_REGNUM + 5,
732 FIRST_EXTENDED_REGNUM + 6,
733 FIRST_EXTENDED_REGNUM + 7,
734 /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
735 FIRST_DATA_REGNUM + 2,
736 FIRST_DATA_REGNUM + 3,
737 FIRST_ADDRESS_REGNUM + 2,
738 FIRST_ADDRESS_REGNUM + 3,
739 /* d0, d1, a0, a1, mdr, lir, lar: never saved. */
749 for (i = count = 0; i < ARRAY_SIZE(store_order); ++i)
751 unsigned regno = store_order[i];
753 if (((mask >> regno) & 1) == 0)
757 x = plus_constant (stack_pointer_rtx, count * -4);
758 x = gen_frame_mem (SImode, x);
759 x = gen_rtx_SET (VOIDmode, x, gen_rtx_REG (SImode, regno));
762 /* Remove the register from the mask so that... */
763 mask &= ~(1u << regno);
766 /* ... we can make sure that we didn't try to use a register
767 not listed in the store order. */
768 gcc_assert (mask == 0);
770 /* Create the instruction that updates the stack pointer. */
771 x = plus_constant (stack_pointer_rtx, count * -4);
772 x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
775 /* We need one PARALLEL element to update the stack pointer and
776 an additional element for each register that is stored. */
777 x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (count + 1, elts));
782 mn10300_expand_prologue (void)
784 HOST_WIDE_INT size = mn10300_frame_size ();
786 /* If we use any of the callee-saved registers, save them now. */
787 mn10300_gen_multiple_store (mn10300_get_live_callee_saved_regs ());
789 if (TARGET_AM33_2 && fp_regs_to_save ())
791 int num_regs_to_save = fp_regs_to_save (), i;
797 save_sp_partial_merge,
801 unsigned int strategy_size = (unsigned)-1, this_strategy_size;
804 /* We have several different strategies to save FP registers.
805 We can store them using SP offsets, which is beneficial if
806 there are just a few registers to save, or we can use `a0' in
807 post-increment mode (`a0' is the only call-clobbered address
808 register that is never used to pass information to a
809 function). Furthermore, if we don't need a frame pointer, we
810 can merge the two SP adds into a single one, but this isn't
811 always beneficial; sometimes we can just split the two adds
812 so that we don't exceed a 16-bit constant size. The code
813 below will select which strategy to use, so as to generate
814 smallest code. Ties are broken in favor or shorter sequences
815 (in terms of number of instructions). */
817 #define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
818 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
819 #define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
820 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
822 /* We add 0 * (S) in two places to promote to the type of S,
823 so that all arms of the conditional have the same type. */
824 #define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
825 (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
826 : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
827 + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
829 #define SIZE_FMOV_SP_(S,N) \
830 (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
831 SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
832 (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
833 #define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
835 /* Consider alternative save_sp_merge only if we don't need the
836 frame pointer and size is nonzero. */
837 if (! frame_pointer_needed && size)
839 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
840 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
841 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
842 this_strategy_size += SIZE_FMOV_SP (size, num_regs_to_save);
844 if (this_strategy_size < strategy_size)
846 strategy = save_sp_merge;
847 strategy_size = this_strategy_size;
851 /* Consider alternative save_sp_no_merge unconditionally. */
852 /* Insn: add -4 * num_regs_to_save, sp. */
853 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
854 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
855 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
858 /* Insn: add -size, sp. */
859 this_strategy_size += SIZE_ADD_SP (-size);
862 if (this_strategy_size < strategy_size)
864 strategy = save_sp_no_merge;
865 strategy_size = this_strategy_size;
868 /* Consider alternative save_sp_partial_merge only if we don't
869 need a frame pointer and size is reasonably large. */
870 if (! frame_pointer_needed && size + 4 * num_regs_to_save > 128)
872 /* Insn: add -128, sp. */
873 this_strategy_size = SIZE_ADD_SP (-128);
874 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
875 this_strategy_size += SIZE_FMOV_SP (128 - 4 * num_regs_to_save,
879 /* Insn: add 128-size, sp. */
880 this_strategy_size += SIZE_ADD_SP (128 - size);
883 if (this_strategy_size < strategy_size)
885 strategy = save_sp_partial_merge;
886 strategy_size = this_strategy_size;
890 /* Consider alternative save_a0_merge only if we don't need a
891 frame pointer, size is nonzero and the user hasn't
892 changed the calling conventions of a0. */
893 if (! frame_pointer_needed && size
894 && call_really_used_regs [FIRST_ADDRESS_REGNUM]
895 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
897 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
898 this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
899 /* Insn: mov sp, a0. */
900 this_strategy_size++;
903 /* Insn: add size, a0. */
904 this_strategy_size += SIZE_ADD_AX (size);
906 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
907 this_strategy_size += 3 * num_regs_to_save;
909 if (this_strategy_size < strategy_size)
911 strategy = save_a0_merge;
912 strategy_size = this_strategy_size;
916 /* Consider alternative save_a0_no_merge if the user hasn't
917 changed the calling conventions of a0. */
918 if (call_really_used_regs [FIRST_ADDRESS_REGNUM]
919 && ! fixed_regs[FIRST_ADDRESS_REGNUM])
921 /* Insn: add -4 * num_regs_to_save, sp. */
922 this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
923 /* Insn: mov sp, a0. */
924 this_strategy_size++;
925 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
926 this_strategy_size += 3 * num_regs_to_save;
929 /* Insn: add -size, sp. */
930 this_strategy_size += SIZE_ADD_SP (-size);
933 if (this_strategy_size < strategy_size)
935 strategy = save_a0_no_merge;
936 strategy_size = this_strategy_size;
940 /* Emit the initial SP add, common to all strategies. */
943 case save_sp_no_merge:
944 case save_a0_no_merge:
945 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
947 GEN_INT (-4 * num_regs_to_save))));
951 case save_sp_partial_merge:
952 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
955 xsize = 128 - 4 * num_regs_to_save;
961 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
963 GEN_INT (-(size + 4 * num_regs_to_save)))));
964 /* We'll have to adjust FP register saves according to the
967 /* Since we've already created the stack frame, don't do it
968 again at the end of the function. */
976 /* Now prepare register a0, if we have decided to use it. */
980 case save_sp_no_merge:
981 case save_sp_partial_merge:
986 case save_a0_no_merge:
987 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM);
988 F (emit_insn (gen_movsi (reg, stack_pointer_rtx)));
990 F (emit_insn (gen_addsi3 (reg, reg, GEN_INT (xsize))));
991 reg = gen_rtx_POST_INC (SImode, reg);
998 /* Now actually save the FP registers. */
999 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
1000 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
1008 /* If we aren't using `a0', use an SP offset. */
1011 addr = gen_rtx_PLUS (SImode,
1016 addr = stack_pointer_rtx;
1021 F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode, addr),
1022 gen_rtx_REG (SFmode, i))));
1026 /* Now put the frame pointer into the frame pointer register. */
1027 if (frame_pointer_needed)
1028 F (emit_move_insn (frame_pointer_rtx, stack_pointer_rtx));
1030 /* Allocate stack for this frame. */
1032 F (emit_insn (gen_addsi3 (stack_pointer_rtx,
1036 if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
1037 emit_insn (gen_load_pic ());
1041 mn10300_expand_epilogue (void)
1043 HOST_WIDE_INT size = mn10300_frame_size ();
1044 int reg_save_bytes = REG_SAVE_BYTES;
1046 if (TARGET_AM33_2 && fp_regs_to_save ())
1048 int num_regs_to_save = fp_regs_to_save (), i;
1051 /* We have several options to restore FP registers. We could
1052 load them from SP offsets, but, if there are enough FP
1053 registers to restore, we win if we use a post-increment
1056 /* If we have a frame pointer, it's the best option, because we
1057 already know it has the value we want. */
1058 if (frame_pointer_needed)
1059 reg = gen_rtx_REG (SImode, FRAME_POINTER_REGNUM);
1060 /* Otherwise, we may use `a1', since it's call-clobbered and
1061 it's never used for return values. But only do so if it's
1062 smaller than using SP offsets. */
1065 enum { restore_sp_post_adjust,
1066 restore_sp_pre_adjust,
1067 restore_sp_partial_adjust,
1068 restore_a1 } strategy;
1069 unsigned int this_strategy_size, strategy_size = (unsigned)-1;
1071 /* Consider using sp offsets before adjusting sp. */
1072 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1073 this_strategy_size = SIZE_FMOV_SP (size, num_regs_to_save);
1074 /* If size is too large, we'll have to adjust SP with an
1076 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1078 /* Insn: add size + 4 * num_regs_to_save, sp. */
1079 this_strategy_size += SIZE_ADD_SP (size + 4 * num_regs_to_save);
1081 /* If we don't have to restore any non-FP registers,
1082 we'll be able to save one byte by using rets. */
1083 if (! reg_save_bytes)
1084 this_strategy_size--;
1086 if (this_strategy_size < strategy_size)
1088 strategy = restore_sp_post_adjust;
1089 strategy_size = this_strategy_size;
1092 /* Consider using sp offsets after adjusting sp. */
1093 /* Insn: add size, sp. */
1094 this_strategy_size = SIZE_ADD_SP (size);
1095 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1096 this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
1097 /* We're going to use ret to release the FP registers
1098 save area, so, no savings. */
1100 if (this_strategy_size < strategy_size)
1102 strategy = restore_sp_pre_adjust;
1103 strategy_size = this_strategy_size;
1106 /* Consider using sp offsets after partially adjusting sp.
1107 When size is close to 32Kb, we may be able to adjust SP
1108 with an imm16 add instruction while still using fmov
1110 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1112 /* Insn: add size + 4 * num_regs_to_save
1113 + reg_save_bytes - 252,sp. */
1114 this_strategy_size = SIZE_ADD_SP (size + 4 * num_regs_to_save
1115 + reg_save_bytes - 252);
1116 /* Insn: fmov (##,sp),fs#, fo each fs# to be restored. */
1117 this_strategy_size += SIZE_FMOV_SP (252 - reg_save_bytes
1118 - 4 * num_regs_to_save,
1120 /* We're going to use ret to release the FP registers
1121 save area, so, no savings. */
1123 if (this_strategy_size < strategy_size)
1125 strategy = restore_sp_partial_adjust;
1126 strategy_size = this_strategy_size;
1130 /* Consider using a1 in post-increment mode, as long as the
1131 user hasn't changed the calling conventions of a1. */
1132 if (call_really_used_regs [FIRST_ADDRESS_REGNUM + 1]
1133 && ! fixed_regs[FIRST_ADDRESS_REGNUM+1])
1135 /* Insn: mov sp,a1. */
1136 this_strategy_size = 1;
1139 /* Insn: add size,a1. */
1140 this_strategy_size += SIZE_ADD_AX (size);
1142 /* Insn: fmov (a1+),fs#, for each fs# to be restored. */
1143 this_strategy_size += 3 * num_regs_to_save;
1144 /* If size is large enough, we may be able to save a
1146 if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1148 /* Insn: mov a1,sp. */
1149 this_strategy_size += 2;
1151 /* If we don't have to restore any non-FP registers,
1152 we'll be able to save one byte by using rets. */
1153 if (! reg_save_bytes)
1154 this_strategy_size--;
1156 if (this_strategy_size < strategy_size)
1158 strategy = restore_a1;
1159 strategy_size = this_strategy_size;
1165 case restore_sp_post_adjust:
1168 case restore_sp_pre_adjust:
1169 emit_insn (gen_addsi3 (stack_pointer_rtx,
1175 case restore_sp_partial_adjust:
1176 emit_insn (gen_addsi3 (stack_pointer_rtx,
1178 GEN_INT (size + 4 * num_regs_to_save
1179 + reg_save_bytes - 252)));
1180 size = 252 - reg_save_bytes - 4 * num_regs_to_save;
1184 reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM + 1);
1185 emit_insn (gen_movsi (reg, stack_pointer_rtx));
1187 emit_insn (gen_addsi3 (reg, reg, GEN_INT (size)));
1195 /* Adjust the selected register, if any, for post-increment. */
1197 reg = gen_rtx_POST_INC (SImode, reg);
1199 for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
1200 if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
1208 /* If we aren't using a post-increment register, use an
1210 addr = gen_rtx_PLUS (SImode,
1215 addr = stack_pointer_rtx;
1219 emit_insn (gen_movsf (gen_rtx_REG (SFmode, i),
1220 gen_rtx_MEM (SFmode, addr)));
1223 /* If we were using the restore_a1 strategy and the number of
1224 bytes to be released won't fit in the `ret' byte, copy `a1'
1225 to `sp', to avoid having to use `add' to adjust it. */
1226 if (! frame_pointer_needed && reg && size + reg_save_bytes > 255)
1228 emit_move_insn (stack_pointer_rtx, XEXP (reg, 0));
1233 /* Maybe cut back the stack, except for the register save area.
1235 If the frame pointer exists, then use the frame pointer to
1238 If the stack size + register save area is more than 255 bytes,
1239 then the stack must be cut back here since the size + register
1240 save size is too big for a ret/retf instruction.
1242 Else leave it alone, it will be cut back as part of the
1243 ret/retf instruction, or there wasn't any stack to begin with.
1245 Under no circumstances should the register save area be
1246 deallocated here, that would leave a window where an interrupt
1247 could occur and trash the register save area. */
1248 if (frame_pointer_needed)
1250 emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
1253 else if (size + reg_save_bytes > 255)
1255 emit_insn (gen_addsi3 (stack_pointer_rtx,
1261 /* Adjust the stack and restore callee-saved registers, if any. */
1262 if (mn10300_can_use_rets_insn ())
1263 emit_jump_insn (gen_rtx_RETURN (VOIDmode));
1265 emit_jump_insn (gen_return_ret (GEN_INT (size + REG_SAVE_BYTES)));
1268 /* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
1269 This function is for MATCH_PARALLEL and so assumes OP is known to be
1270 parallel. If OP is a multiple store, return a mask indicating which
1271 registers it saves. Return 0 otherwise. */
1274 mn10300_store_multiple_operation (rtx op,
1275 enum machine_mode mode ATTRIBUTE_UNUSED)
1283 count = XVECLEN (op, 0);
1287 /* Check that first instruction has the form (set (sp) (plus A B)) */
1288 elt = XVECEXP (op, 0, 0);
1289 if (GET_CODE (elt) != SET
1290 || (! REG_P (SET_DEST (elt)))
1291 || REGNO (SET_DEST (elt)) != STACK_POINTER_REGNUM
1292 || GET_CODE (SET_SRC (elt)) != PLUS)
1295 /* Check that A is the stack pointer and B is the expected stack size.
1296 For OP to match, each subsequent instruction should push a word onto
1297 the stack. We therefore expect the first instruction to create
1298 COUNT-1 stack slots. */
1299 elt = SET_SRC (elt);
1300 if ((! REG_P (XEXP (elt, 0)))
1301 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
1302 || (! CONST_INT_P (XEXP (elt, 1)))
1303 || INTVAL (XEXP (elt, 1)) != -(count - 1) * 4)
1307 for (i = 1; i < count; i++)
1309 /* Check that element i is a (set (mem M) R). */
1310 /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1311 Remember: the ordering is *not* monotonic. */
1312 elt = XVECEXP (op, 0, i);
1313 if (GET_CODE (elt) != SET
1314 || (! MEM_P (SET_DEST (elt)))
1315 || (! REG_P (SET_SRC (elt))))
1318 /* Remember which registers are to be saved. */
1319 last = REGNO (SET_SRC (elt));
1320 mask |= (1 << last);
1322 /* Check that M has the form (plus (sp) (const_int -I*4)) */
1323 elt = XEXP (SET_DEST (elt), 0);
1324 if (GET_CODE (elt) != PLUS
1325 || (! REG_P (XEXP (elt, 0)))
1326 || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
1327 || (! CONST_INT_P (XEXP (elt, 1)))
1328 || INTVAL (XEXP (elt, 1)) != -i * 4)
1332 /* All or none of the callee-saved extended registers must be in the set. */
1333 if ((mask & 0x3c000) != 0
1334 && (mask & 0x3c000) != 0x3c000)
1340 /* Implement TARGET_PREFERRED_RELOAD_CLASS. */
1343 mn10300_preferred_reload_class (rtx x, reg_class_t rclass)
1345 if (x == stack_pointer_rtx && rclass != SP_REGS)
1346 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
1349 && !HARD_REGISTER_P (x))
1350 || (GET_CODE (x) == SUBREG
1351 && REG_P (SUBREG_REG (x))
1352 && !HARD_REGISTER_P (SUBREG_REG (x))))
1353 return LIMIT_RELOAD_CLASS (GET_MODE (x), rclass);
1358 /* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
1361 mn10300_preferred_output_reload_class (rtx x, reg_class_t rclass)
1363 if (x == stack_pointer_rtx && rclass != SP_REGS)
1364 return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
1368 /* Implement TARGET_SECONDARY_RELOAD. */
1371 mn10300_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1372 enum machine_mode mode, secondary_reload_info *sri)
1374 enum reg_class rclass = (enum reg_class) rclass_i;
1375 enum reg_class xclass = NO_REGS;
1376 unsigned int xregno = INVALID_REGNUM;
1381 if (xregno >= FIRST_PSEUDO_REGISTER)
1382 xregno = true_regnum (x);
1383 if (xregno != INVALID_REGNUM)
1384 xclass = REGNO_REG_CLASS (xregno);
1389 /* Memory load/stores less than a full word wide can't have an
1390 address or stack pointer destination. They must use a data
1391 register as an intermediate register. */
1392 if (rclass != DATA_REGS
1393 && (mode == QImode || mode == HImode)
1394 && xclass == NO_REGS)
1397 /* We can only move SP to/from an address register. */
1399 && rclass == SP_REGS
1400 && xclass != ADDRESS_REGS)
1401 return ADDRESS_REGS;
1403 && xclass == SP_REGS
1404 && rclass != ADDRESS_REGS
1405 && rclass != SP_OR_ADDRESS_REGS)
1406 return ADDRESS_REGS;
1409 /* We can't directly load sp + const_int into a register;
1410 we must use an address register as an scratch. */
1412 && rclass != SP_REGS
1413 && rclass != SP_OR_ADDRESS_REGS
1414 && rclass != SP_OR_GENERAL_REGS
1415 && GET_CODE (x) == PLUS
1416 && (XEXP (x, 0) == stack_pointer_rtx
1417 || XEXP (x, 1) == stack_pointer_rtx))
1419 sri->icode = CODE_FOR_reload_plus_sp_const;
1423 /* We can only move MDR to/from a data register. */
1424 if (rclass == MDR_REGS && xclass != DATA_REGS)
1426 if (xclass == MDR_REGS && rclass != DATA_REGS)
1429 /* We can't load/store an FP register from a constant address. */
1431 && (rclass == FP_REGS || xclass == FP_REGS)
1432 && (xclass == NO_REGS || rclass == NO_REGS))
1436 if (xregno >= FIRST_PSEUDO_REGISTER && xregno != INVALID_REGNUM)
1438 addr = reg_equiv_mem [xregno];
1440 addr = XEXP (addr, 0);
1445 if (addr && CONSTANT_ADDRESS_P (addr))
1446 return GENERAL_REGS;
1449 /* Otherwise assume no secondary reloads are needed. */
1454 mn10300_frame_size (void)
1456 /* size includes the fixed stack space needed for function calls. */
1457 int size = get_frame_size () + crtl->outgoing_args_size;
1459 /* And space for the return pointer. */
1460 size += crtl->outgoing_args_size ? 4 : 0;
1466 mn10300_initial_offset (int from, int to)
1470 gcc_assert (from == ARG_POINTER_REGNUM || from == FRAME_POINTER_REGNUM);
1471 gcc_assert (to == FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
1473 if (to == STACK_POINTER_REGNUM)
1474 diff = mn10300_frame_size ();
1476 /* The difference between the argument pointer and the frame pointer
1477 is the size of the callee register save area. */
1478 if (from == ARG_POINTER_REGNUM)
1480 diff += REG_SAVE_BYTES;
1481 diff += 4 * fp_regs_to_save ();
1487 /* Worker function for TARGET_RETURN_IN_MEMORY. */
1490 mn10300_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
1492 /* Return values > 8 bytes in length in memory. */
1493 return (int_size_in_bytes (type) > 8
1494 || int_size_in_bytes (type) == 0
1495 || TYPE_MODE (type) == BLKmode);
1498 /* Flush the argument registers to the stack for a stdarg function;
1499 return the new argument pointer. */
1501 mn10300_builtin_saveregs (void)
1504 tree fntype = TREE_TYPE (current_function_decl);
1505 int argadj = ((!stdarg_p (fntype))
1506 ? UNITS_PER_WORD : 0);
1507 alias_set_type set = get_varargs_alias_set ();
1510 offset = plus_constant (crtl->args.arg_offset_rtx, argadj);
1512 offset = crtl->args.arg_offset_rtx;
1514 mem = gen_rtx_MEM (SImode, crtl->args.internal_arg_pointer);
1515 set_mem_alias_set (mem, set);
1516 emit_move_insn (mem, gen_rtx_REG (SImode, 0));
1518 mem = gen_rtx_MEM (SImode,
1519 plus_constant (crtl->args.internal_arg_pointer, 4));
1520 set_mem_alias_set (mem, set);
1521 emit_move_insn (mem, gen_rtx_REG (SImode, 1));
1523 return copy_to_reg (expand_binop (Pmode, add_optab,
1524 crtl->args.internal_arg_pointer,
1525 offset, 0, 0, OPTAB_LIB_WIDEN));
1529 mn10300_va_start (tree valist, rtx nextarg)
1531 nextarg = expand_builtin_saveregs ();
1532 std_expand_builtin_va_start (valist, nextarg);
1535 /* Return true when a parameter should be passed by reference. */
1538 mn10300_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
1539 enum machine_mode mode, const_tree type,
1540 bool named ATTRIBUTE_UNUSED)
1542 unsigned HOST_WIDE_INT size;
1545 size = int_size_in_bytes (type);
1547 size = GET_MODE_SIZE (mode);
1549 return (size > 8 || size == 0);
1552 /* Return an RTX to represent where a value with mode MODE will be returned
1553 from a function. If the result is NULL_RTX, the argument is pushed. */
1556 mn10300_function_arg (CUMULATIVE_ARGS *cum, enum machine_mode mode,
1557 const_tree type, bool named ATTRIBUTE_UNUSED)
1559 rtx result = NULL_RTX;
1562 /* We only support using 2 data registers as argument registers. */
1565 /* Figure out the size of the object to be passed. */
1566 if (mode == BLKmode)
1567 size = int_size_in_bytes (type);
1569 size = GET_MODE_SIZE (mode);
1571 cum->nbytes = (cum->nbytes + 3) & ~3;
1573 /* Don't pass this arg via a register if all the argument registers
1575 if (cum->nbytes > nregs * UNITS_PER_WORD)
1578 /* Don't pass this arg via a register if it would be split between
1579 registers and memory. */
1580 if (type == NULL_TREE
1581 && cum->nbytes + size > nregs * UNITS_PER_WORD)
1584 switch (cum->nbytes / UNITS_PER_WORD)
1587 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM);
1590 result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM + 1);
1599 /* Update the data in CUM to advance over an argument
1600 of mode MODE and data type TYPE.
1601 (TYPE is null for libcalls where that information may not be available.) */
1604 mn10300_function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
1605 const_tree type, bool named ATTRIBUTE_UNUSED)
1607 cum->nbytes += (mode != BLKmode
1608 ? (GET_MODE_SIZE (mode) + 3) & ~3
1609 : (int_size_in_bytes (type) + 3) & ~3);
1612 /* Return the number of bytes of registers to use for an argument passed
1613 partially in registers and partially in memory. */
1616 mn10300_arg_partial_bytes (CUMULATIVE_ARGS *cum, enum machine_mode mode,
1617 tree type, bool named ATTRIBUTE_UNUSED)
1621 /* We only support using 2 data registers as argument registers. */
1624 /* Figure out the size of the object to be passed. */
1625 if (mode == BLKmode)
1626 size = int_size_in_bytes (type);
1628 size = GET_MODE_SIZE (mode);
1630 cum->nbytes = (cum->nbytes + 3) & ~3;
1632 /* Don't pass this arg via a register if all the argument registers
1634 if (cum->nbytes > nregs * UNITS_PER_WORD)
1637 if (cum->nbytes + size <= nregs * UNITS_PER_WORD)
1640 /* Don't pass this arg via a register if it would be split between
1641 registers and memory. */
1642 if (type == NULL_TREE
1643 && cum->nbytes + size > nregs * UNITS_PER_WORD)
1646 return nregs * UNITS_PER_WORD - cum->nbytes;
1649 /* Return the location of the function's value. This will be either
1650 $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1651 $d0 and $a0 if the -mreturn-pointer-on-do flag is set. Note that
1652 we only return the PARALLEL for outgoing values; we do not want
1653 callers relying on this extra copy. */
1656 mn10300_function_value (const_tree valtype,
1657 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1661 enum machine_mode mode = TYPE_MODE (valtype);
1663 if (! POINTER_TYPE_P (valtype))
1664 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1665 else if (! TARGET_PTR_A0D0 || ! outgoing
1666 || cfun->returns_struct)
1667 return gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM);
1669 rv = gen_rtx_PARALLEL (mode, rtvec_alloc (2));
1671 = gen_rtx_EXPR_LIST (VOIDmode,
1672 gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM),
1676 = gen_rtx_EXPR_LIST (VOIDmode,
1677 gen_rtx_REG (mode, FIRST_DATA_REGNUM),
1682 /* Implements TARGET_LIBCALL_VALUE. */
1685 mn10300_libcall_value (enum machine_mode mode,
1686 const_rtx fun ATTRIBUTE_UNUSED)
1688 return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1691 /* Implements FUNCTION_VALUE_REGNO_P. */
1694 mn10300_function_value_regno_p (const unsigned int regno)
1696 return (regno == FIRST_DATA_REGNUM || regno == FIRST_ADDRESS_REGNUM);
1699 /* Output an addition operation. */
1702 mn10300_output_add (rtx operands[3], bool need_flags)
1704 rtx dest, src1, src2;
1705 unsigned int dest_regnum, src1_regnum, src2_regnum;
1706 enum reg_class src1_class, src2_class, dest_class;
1712 dest_regnum = true_regnum (dest);
1713 src1_regnum = true_regnum (src1);
1715 dest_class = REGNO_REG_CLASS (dest_regnum);
1716 src1_class = REGNO_REG_CLASS (src1_regnum);
1718 if (CONST_INT_P (src2))
1720 gcc_assert (dest_regnum == src1_regnum);
1722 if (src2 == const1_rtx && !need_flags)
1724 if (INTVAL (src2) == 4 && !need_flags && dest_class != DATA_REGS)
1727 gcc_assert (!need_flags || dest_class != SP_REGS);
1730 else if (CONSTANT_P (src2))
1733 src2_regnum = true_regnum (src2);
1734 src2_class = REGNO_REG_CLASS (src2_regnum);
1736 if (dest_regnum == src1_regnum)
1738 if (dest_regnum == src2_regnum)
1741 /* The rest of the cases are reg = reg+reg. For AM33, we can implement
1742 this directly, as below, but when optimizing for space we can sometimes
1743 do better by using a mov+add. For MN103, we claimed that we could
1744 implement a three-operand add because the various move and add insns
1745 change sizes across register classes, and we can often do better than
1746 reload in choosing which operand to move. */
1747 if (TARGET_AM33 && optimize_insn_for_speed_p ())
1748 return "add %2,%1,%0";
1750 /* Catch cases where no extended register was used. */
1751 if (src1_class != EXTENDED_REGS
1752 && src2_class != EXTENDED_REGS
1753 && dest_class != EXTENDED_REGS)
1755 /* We have to copy one of the sources into the destination, then
1756 add the other source to the destination.
1758 Carefully select which source to copy to the destination; a
1759 naive implementation will waste a byte when the source classes
1760 are different and the destination is an address register.
1761 Selecting the lowest cost register copy will optimize this
1763 if (src1_class == dest_class)
1764 return "mov %1,%0\n\tadd %2,%0";
1766 return "mov %2,%0\n\tadd %1,%0";
1769 /* At least one register is an extended register. */
1771 /* The three operand add instruction on the am33 is a win iff the
1772 output register is an extended register, or if both source
1773 registers are extended registers. */
1774 if (dest_class == EXTENDED_REGS || src1_class == src2_class)
1775 return "add %2,%1,%0";
1777 /* It is better to copy one of the sources to the destination, then
1778 perform a 2 address add. The destination in this case must be
1779 an address or data register and one of the sources must be an
1780 extended register and the remaining source must not be an extended
1783 The best code for this case is to copy the extended reg to the
1784 destination, then emit a two address add. */
1785 if (src1_class == EXTENDED_REGS)
1786 return "mov %1,%0\n\tadd %2,%0";
1788 return "mov %2,%0\n\tadd %1,%0";
1791 /* Return 1 if X contains a symbolic expression. We know these
1792 expressions will have one of a few well defined forms, so
1793 we need only check those forms. */
1796 mn10300_symbolic_operand (rtx op,
1797 enum machine_mode mode ATTRIBUTE_UNUSED)
1799 switch (GET_CODE (op))
1806 return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
1807 || GET_CODE (XEXP (op, 0)) == LABEL_REF)
1808 && CONST_INT_P (XEXP (op, 1)));
1814 /* Try machine dependent ways of modifying an illegitimate address
1815 to be legitimate. If we find one, return the new valid address.
1816 This macro is used in only one place: `memory_address' in explow.c.
1818 OLDX is the address as it was before break_out_memory_refs was called.
1819 In some cases it is useful to look at this to decide what needs to be done.
1821 Normally it is always safe for this macro to do nothing. It exists to
1822 recognize opportunities to optimize the output.
1824 But on a few ports with segmented architectures and indexed addressing
1825 (mn10300, hppa) it is used to rewrite certain problematical addresses. */
1828 mn10300_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1829 enum machine_mode mode ATTRIBUTE_UNUSED)
1831 if (flag_pic && ! mn10300_legitimate_pic_operand_p (x))
1832 x = mn10300_legitimize_pic_address (oldx, NULL_RTX);
1834 /* Uh-oh. We might have an address for x[n-100000]. This needs
1835 special handling to avoid creating an indexed memory address
1836 with x-100000 as the base. */
1837 if (GET_CODE (x) == PLUS
1838 && mn10300_symbolic_operand (XEXP (x, 1), VOIDmode))
1840 /* Ugly. We modify things here so that the address offset specified
1841 by the index expression is computed first, then added to x to form
1842 the entire address. */
1844 rtx regx1, regy1, regy2, y;
1846 /* Strip off any CONST. */
1848 if (GET_CODE (y) == CONST)
1851 if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1853 regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1854 regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1855 regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1856 regx1 = force_reg (Pmode,
1857 gen_rtx_fmt_ee (GET_CODE (y), Pmode, regx1,
1859 return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1865 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
1866 @GOTOFF in `reg'. */
1869 mn10300_legitimize_pic_address (rtx orig, rtx reg)
1873 if (GET_CODE (orig) == LABEL_REF
1874 || (GET_CODE (orig) == SYMBOL_REF
1875 && (CONSTANT_POOL_ADDRESS_P (orig)
1876 || ! MN10300_GLOBAL_P (orig))))
1879 reg = gen_reg_rtx (Pmode);
1881 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOTOFF);
1882 x = gen_rtx_CONST (SImode, x);
1883 emit_move_insn (reg, x);
1885 x = emit_insn (gen_addsi3 (reg, reg, pic_offset_table_rtx));
1887 else if (GET_CODE (orig) == SYMBOL_REF)
1890 reg = gen_reg_rtx (Pmode);
1892 x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOT);
1893 x = gen_rtx_CONST (SImode, x);
1894 x = gen_rtx_PLUS (SImode, pic_offset_table_rtx, x);
1895 x = gen_const_mem (SImode, x);
1897 x = emit_move_insn (reg, x);
1902 set_unique_reg_note (x, REG_EQUAL, orig);
1906 /* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
1907 isn't protected by a PIC unspec; nonzero otherwise. */
1910 mn10300_legitimate_pic_operand_p (rtx x)
1915 if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1918 if (GET_CODE (x) == UNSPEC
1919 && (XINT (x, 1) == UNSPEC_PIC
1920 || XINT (x, 1) == UNSPEC_GOT
1921 || XINT (x, 1) == UNSPEC_GOTOFF
1922 || XINT (x, 1) == UNSPEC_PLT
1923 || XINT (x, 1) == UNSPEC_GOTSYM_OFF))
1926 fmt = GET_RTX_FORMAT (GET_CODE (x));
1927 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
1933 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1934 if (! mn10300_legitimate_pic_operand_p (XVECEXP (x, i, j)))
1937 else if (fmt[i] == 'e'
1938 && ! mn10300_legitimate_pic_operand_p (XEXP (x, i)))
1945 /* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
1946 legitimate, and FALSE otherwise.
1948 On the mn10300, the value in the address register must be
1949 in the same memory space/segment as the effective address.
1951 This is problematical for reload since it does not understand
1952 that base+index != index+base in a memory reference.
1954 Note it is still possible to use reg+reg addressing modes,
1955 it's just much more difficult. For a discussion of a possible
1956 workaround and solution, see the comments in pa.c before the
1957 function record_unscaled_index_insn_codes. */
1960 mn10300_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
1964 if (CONSTANT_ADDRESS_P (x))
1965 return !flag_pic || mn10300_legitimate_pic_operand_p (x);
1967 if (RTX_OK_FOR_BASE_P (x, strict))
1970 if (TARGET_AM33 && (mode == SImode || mode == SFmode || mode == HImode))
1972 if (GET_CODE (x) == POST_INC)
1973 return RTX_OK_FOR_BASE_P (XEXP (x, 0), strict);
1974 if (GET_CODE (x) == POST_MODIFY)
1975 return (RTX_OK_FOR_BASE_P (XEXP (x, 0), strict)
1976 && CONSTANT_ADDRESS_P (XEXP (x, 1)));
1979 if (GET_CODE (x) != PLUS)
1983 index = XEXP (x, 1);
1989 /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
1990 addressing is hard to satisfy. */
1994 return (REGNO_GENERAL_P (REGNO (base), strict)
1995 && REGNO_GENERAL_P (REGNO (index), strict));
1998 if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base), strict))
2001 if (CONST_INT_P (index))
2002 return IN_RANGE (INTVAL (index), -1 - 0x7fffffff, 0x7fffffff);
2004 if (CONSTANT_ADDRESS_P (index))
2005 return !flag_pic || mn10300_legitimate_pic_operand_p (index);
2011 mn10300_regno_in_class_p (unsigned regno, int rclass, bool strict)
2013 if (regno >= FIRST_PSEUDO_REGISTER)
2019 regno = reg_renumber[regno];
2020 if (regno == INVALID_REGNUM)
2023 return TEST_HARD_REG_BIT (reg_class_contents[rclass], regno);
2027 mn10300_legitimize_reload_address (rtx x,
2028 enum machine_mode mode ATTRIBUTE_UNUSED,
2029 int opnum, int type,
2030 int ind_levels ATTRIBUTE_UNUSED)
2032 bool any_change = false;
2034 /* See above re disabling reg+reg addressing for MN103. */
2038 if (GET_CODE (x) != PLUS)
2041 if (XEXP (x, 0) == stack_pointer_rtx)
2043 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
2044 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2045 opnum, (enum reload_type) type);
2048 if (XEXP (x, 1) == stack_pointer_rtx)
2050 push_reload (XEXP (x, 1), NULL_RTX, &XEXP (x, 1), NULL,
2051 GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2052 opnum, (enum reload_type) type);
2056 return any_change ? x : NULL_RTX;
2059 /* Used by LEGITIMATE_CONSTANT_P(). Returns TRUE if X is a valid
2060 constant. Note that some "constants" aren't valid, such as TLS
2061 symbols and unconverted GOT-based references, so we eliminate
2065 mn10300_legitimate_constant_p (rtx x)
2067 switch (GET_CODE (x))
2072 if (GET_CODE (x) == PLUS)
2074 if (! CONST_INT_P (XEXP (x, 1)))
2079 /* Only some unspecs are valid as "constants". */
2080 if (GET_CODE (x) == UNSPEC)
2082 switch (XINT (x, 1))
2094 /* We must have drilled down to a symbol. */
2095 if (! mn10300_symbolic_operand (x, Pmode))
2106 /* Undo pic address legitimization for the benefit of debug info. */
2109 mn10300_delegitimize_address (rtx orig_x)
2111 rtx x = orig_x, ret, addend = NULL;
2116 if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
2119 if (XEXP (x, 0) == pic_offset_table_rtx)
2121 /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2122 some odd-looking "addresses" that were never valid in the first place.
2123 We need to look harder to avoid warnings being emitted. */
2124 else if (GET_CODE (XEXP (x, 0)) == PLUS)
2126 rtx x0 = XEXP (x, 0);
2127 rtx x00 = XEXP (x0, 0);
2128 rtx x01 = XEXP (x0, 1);
2130 if (x00 == pic_offset_table_rtx)
2132 else if (x01 == pic_offset_table_rtx)
2142 if (GET_CODE (x) != CONST)
2145 if (GET_CODE (x) != UNSPEC)
2148 ret = XVECEXP (x, 0, 0);
2149 if (XINT (x, 1) == UNSPEC_GOTOFF)
2151 else if (XINT (x, 1) == UNSPEC_GOT)
2156 gcc_assert (GET_CODE (ret) == SYMBOL_REF);
2157 if (need_mem != MEM_P (orig_x))
2159 if (need_mem && addend)
2162 ret = gen_rtx_PLUS (Pmode, addend, ret);
2166 /* For addresses, costs are relative to "MOV (Rm),Rn". For AM33 this is
2167 the 3-byte fully general instruction; for MN103 this is the 2-byte form
2168 with an address register. */
2171 mn10300_address_cost (rtx x, bool speed)
2176 switch (GET_CODE (x))
2181 /* We assume all of these require a 32-bit constant, even though
2182 some symbol and label references can be relaxed. */
2183 return speed ? 1 : 4;
2191 /* Assume any symbolic offset is a 32-bit constant. */
2192 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2193 if (IN_RANGE (i, -128, 127))
2194 return speed ? 0 : 1;
2197 if (IN_RANGE (i, -0x800000, 0x7fffff))
2203 index = XEXP (x, 1);
2204 if (register_operand (index, SImode))
2206 /* Attempt to minimize the number of registers in the address.
2207 This is similar to what other ports do. */
2208 if (register_operand (base, SImode))
2212 index = XEXP (x, 0);
2215 /* Assume any symbolic offset is a 32-bit constant. */
2216 i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2217 if (IN_RANGE (i, -128, 127))
2218 return speed ? 0 : 1;
2219 if (IN_RANGE (i, -32768, 32767))
2220 return speed ? 0 : 2;
2221 return speed ? 2 : 6;
2224 return rtx_cost (x, MEM, speed);
2228 /* Implement the TARGET_REGISTER_MOVE_COST hook.
2230 Recall that the base value of 2 is required by assumptions elsewhere
2231 in the body of the compiler, and that cost 2 is special-cased as an
2232 early exit from reload meaning no work is required. */
2235 mn10300_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2236 reg_class_t ifrom, reg_class_t ito)
2238 enum reg_class from = (enum reg_class) ifrom;
2239 enum reg_class to = (enum reg_class) ito;
2240 enum reg_class scratch, test;
2242 /* Simplify the following code by unifying the fp register classes. */
2243 if (to == FP_ACC_REGS)
2245 if (from == FP_ACC_REGS)
2248 /* Diagnose invalid moves by costing them as two moves. */
2253 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
2254 else if (to == MDR_REGS)
2255 scratch = DATA_REGS;
2256 else if (to == FP_REGS && to != from)
2257 scratch = GENERAL_REGS;
2261 if (from == SP_REGS)
2262 scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
2263 else if (from == MDR_REGS)
2264 scratch = DATA_REGS;
2265 else if (from == FP_REGS && to != from)
2266 scratch = GENERAL_REGS;
2268 if (scratch != NO_REGS && !reg_class_subset_p (test, scratch))
2269 return (mn10300_register_move_cost (VOIDmode, from, scratch)
2270 + mn10300_register_move_cost (VOIDmode, scratch, to));
2272 /* From here on, all we need consider are legal combinations. */
2276 /* The scale here is bytes * 2. */
2278 if (from == to && (to == ADDRESS_REGS || to == DATA_REGS))
2281 if (from == SP_REGS)
2282 return (to == ADDRESS_REGS ? 2 : 6);
2284 /* For MN103, all remaining legal moves are two bytes. */
2289 return (from == ADDRESS_REGS ? 4 : 6);
2291 if ((from == ADDRESS_REGS || from == DATA_REGS)
2292 && (to == ADDRESS_REGS || to == DATA_REGS))
2295 if (to == EXTENDED_REGS)
2296 return (to == from ? 6 : 4);
2298 /* What's left are SP_REGS, FP_REGS, or combinations of the above. */
2303 /* The scale here is cycles * 2. */
2307 if (from == FP_REGS)
2310 /* All legal moves between integral registers are single cycle. */
2315 /* Implement the TARGET_MEMORY_MOVE_COST hook.
2317 Given lack of the form of the address, this must be speed-relative,
2318 though we should never be less expensive than a size-relative register
2319 move cost above. This is not a problem. */
2322 mn10300_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
2323 reg_class_t iclass, bool in ATTRIBUTE_UNUSED)
2325 enum reg_class rclass = (enum reg_class) iclass;
2327 if (rclass == FP_REGS)
2332 /* Implement the TARGET_RTX_COSTS hook.
2334 Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2335 to represent cycles. Size-relative costs are in bytes. */
2338 mn10300_rtx_costs (rtx x, int code, int outer_code, int *ptotal, bool speed)
2340 /* This value is used for SYMBOL_REF etc where we want to pretend
2341 we have a full 32-bit constant. */
2342 HOST_WIDE_INT i = 0x12345678;
2352 if (outer_code == SET)
2354 /* 16-bit integer loads have latency 1, 32-bit loads 2. */
2355 if (IN_RANGE (i, -32768, 32767))
2356 total = COSTS_N_INSNS (1);
2358 total = COSTS_N_INSNS (2);
2362 /* 16-bit integer operands don't affect latency;
2363 24-bit and 32-bit operands add a cycle. */
2364 if (IN_RANGE (i, -32768, 32767))
2367 total = COSTS_N_INSNS (1);
2372 if (outer_code == SET)
2376 else if (IN_RANGE (i, -128, 127))
2378 else if (IN_RANGE (i, -32768, 32767))
2385 /* Reference here is ADD An,Dn, vs ADD imm,Dn. */
2386 if (IN_RANGE (i, -128, 127))
2388 else if (IN_RANGE (i, -32768, 32767))
2390 else if (TARGET_AM33 && IN_RANGE (i, -0x01000000, 0x00ffffff))
2402 /* We assume all of these require a 32-bit constant, even though
2403 some symbol and label references can be relaxed. */
2407 switch (XINT (x, 1))
2413 case UNSPEC_GOTSYM_OFF:
2414 /* The PIC unspecs also resolve to a 32-bit constant. */
2418 /* Assume any non-listed unspec is some sort of arithmetic. */
2419 goto do_arith_costs;
2423 /* Notice the size difference of INC and INC4. */
2424 if (!speed && outer_code == SET && CONST_INT_P (XEXP (x, 1)))
2426 i = INTVAL (XEXP (x, 1));
2427 if (i == 1 || i == 4)
2429 total = 1 + rtx_cost (XEXP (x, 0), PLUS, speed);
2433 goto do_arith_costs;
2447 total = (speed ? COSTS_N_INSNS (1) : 2);
2451 /* Notice the size difference of ASL2 and variants. */
2452 if (!speed && CONST_INT_P (XEXP (x, 1)))
2453 switch (INTVAL (XEXP (x, 1)))
2468 total = (speed ? COSTS_N_INSNS (1) : 3);
2472 total = (speed ? COSTS_N_INSNS (3) : 2);
2479 total = (speed ? COSTS_N_INSNS (39)
2480 /* Include space to load+retrieve MDR. */
2481 : code == MOD || code == UMOD ? 6 : 4);
2485 total = mn10300_address_cost (XEXP (x, 0), speed);
2487 total = COSTS_N_INSNS (2 + total);
2491 /* Probably not implemented. Assume external call. */
2492 total = (speed ? COSTS_N_INSNS (10) : 7);
2504 /* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2505 may access it using GOTOFF instead of GOT. */
2508 mn10300_encode_section_info (tree decl, rtx rtl, int first ATTRIBUTE_UNUSED)
2514 symbol = XEXP (rtl, 0);
2515 if (GET_CODE (symbol) != SYMBOL_REF)
2519 SYMBOL_REF_FLAG (symbol) = (*targetm.binds_local_p) (decl);
2522 /* Dispatch tables on the mn10300 are extremely expensive in terms of code
2523 and readonly data size. So we crank up the case threshold value to
2524 encourage a series of if/else comparisons to implement many small switch
2525 statements. In theory, this value could be increased much more if we
2526 were solely optimizing for space, but we keep it "reasonable" to avoid
2527 serious code efficiency lossage. */
2530 mn10300_case_values_threshold (void)
2535 /* Worker function for TARGET_TRAMPOLINE_INIT. */
2538 mn10300_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
2540 rtx mem, disp, fnaddr = XEXP (DECL_RTL (fndecl), 0);
2542 /* This is a strict alignment target, which means that we play
2543 some games to make sure that the locations at which we need
2544 to store <chain> and <disp> wind up at aligned addresses.
2547 0xfc 0xdd mov chain,a1
2549 0xf8 0xed 0x00 btst 0,d1
2553 Note that the two extra insns are effectively nops; they
2554 clobber the flags but do not affect the contents of D0 or D1. */
2556 disp = expand_binop (SImode, sub_optab, fnaddr,
2557 plus_constant (XEXP (m_tramp, 0), 11),
2558 NULL_RTX, 1, OPTAB_DIRECT);
2560 mem = adjust_address (m_tramp, SImode, 0);
2561 emit_move_insn (mem, gen_int_mode (0xddfc0028, SImode));
2562 mem = adjust_address (m_tramp, SImode, 4);
2563 emit_move_insn (mem, chain_value);
2564 mem = adjust_address (m_tramp, SImode, 8);
2565 emit_move_insn (mem, gen_int_mode (0xdc00edf8, SImode));
2566 mem = adjust_address (m_tramp, SImode, 12);
2567 emit_move_insn (mem, disp);
2570 /* Output the assembler code for a C++ thunk function.
2571 THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2572 is the decl for the target function. DELTA is an immediate constant
2573 offset to be added to the THIS parameter. If VCALL_OFFSET is nonzero
2574 the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2575 additionally added to THIS. Finally jump to the entry point of
2579 mn10300_asm_output_mi_thunk (FILE * file,
2580 tree thunk_fndecl ATTRIBUTE_UNUSED,
2581 HOST_WIDE_INT delta,
2582 HOST_WIDE_INT vcall_offset,
2587 /* Get the register holding the THIS parameter. Handle the case
2588 where there is a hidden first argument for a returned structure. */
2589 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
2590 _this = reg_names [FIRST_ARGUMENT_REGNUM + 1];
2592 _this = reg_names [FIRST_ARGUMENT_REGNUM];
2594 fprintf (file, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START);
2597 fprintf (file, "\tadd %d, %s\n", (int) delta, _this);
2601 const char * scratch = reg_names [FIRST_ADDRESS_REGNUM + 1];
2603 fprintf (file, "\tmov %s, %s\n", _this, scratch);
2604 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2605 fprintf (file, "\tadd %d, %s\n", (int) vcall_offset, scratch);
2606 fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2607 fprintf (file, "\tadd %s, %s\n", scratch, _this);
2610 fputs ("\tjmp ", file);
2611 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
2615 /* Return true if mn10300_output_mi_thunk would be able to output the
2616 assembler code for the thunk function specified by the arguments
2617 it is passed, and false otherwise. */
2620 mn10300_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED,
2621 HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
2622 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
2623 const_tree function ATTRIBUTE_UNUSED)
2629 mn10300_hard_regno_mode_ok (unsigned int regno, enum machine_mode mode)
2631 if (REGNO_REG_CLASS (regno) == FP_REGS
2632 || REGNO_REG_CLASS (regno) == FP_ACC_REGS)
2633 /* Do not store integer values in FP registers. */
2634 return GET_MODE_CLASS (mode) == MODE_FLOAT && ((regno & 1) == 0);
2636 if (((regno) & 1) == 0 || GET_MODE_SIZE (mode) == 4)
2639 if (REGNO_REG_CLASS (regno) == DATA_REGS
2640 || (TARGET_AM33 && REGNO_REG_CLASS (regno) == ADDRESS_REGS)
2641 || REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2642 return GET_MODE_SIZE (mode) <= 4;
2648 mn10300_modes_tieable (enum machine_mode mode1, enum machine_mode mode2)
2650 if (GET_MODE_CLASS (mode1) == MODE_FLOAT
2651 && GET_MODE_CLASS (mode2) != MODE_FLOAT)
2654 if (GET_MODE_CLASS (mode2) == MODE_FLOAT
2655 && GET_MODE_CLASS (mode1) != MODE_FLOAT)
2660 || (GET_MODE_SIZE (mode1) <= 4 && GET_MODE_SIZE (mode2) <= 4))
2667 cc_flags_for_mode (enum machine_mode mode)
2672 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C | CC_FLAG_V;
2674 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C;
2676 return CC_FLAG_Z | CC_FLAG_N;
2685 cc_flags_for_code (enum rtx_code code)
2698 case GT: /* ~(Z|(N^V)) */
2699 case LE: /* Z|(N^V) */
2700 return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_V;
2706 case GTU: /* ~(C | Z) */
2707 case LEU: /* C | Z */
2708 return CC_FLAG_Z | CC_FLAG_C;
2726 mn10300_select_cc_mode (enum rtx_code code, rtx x, rtx y ATTRIBUTE_UNUSED)
2730 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2731 return CC_FLOATmode;
2733 req = cc_flags_for_code (code);
2735 if (req & CC_FLAG_V)
2737 if (req & CC_FLAG_C)
2743 is_load_insn (rtx insn)
2745 if (GET_CODE (PATTERN (insn)) != SET)
2748 return MEM_P (SET_SRC (PATTERN (insn)));
2752 is_store_insn (rtx insn)
2754 if (GET_CODE (PATTERN (insn)) != SET)
2757 return MEM_P (SET_DEST (PATTERN (insn)));
2760 /* Update scheduling costs for situations that cannot be
2761 described using the attributes and DFA machinery.
2762 DEP is the insn being scheduled.
2763 INSN is the previous insn.
2764 COST is the current cycle cost for DEP. */
2767 mn10300_adjust_sched_cost (rtx insn, rtx link, rtx dep, int cost)
2769 int timings = get_attr_timings (insn);
2774 if (GET_CODE (insn) == PARALLEL)
2775 insn = XVECEXP (insn, 0, 0);
2777 if (GET_CODE (dep) == PARALLEL)
2778 dep = XVECEXP (dep, 0, 0);
2780 /* For the AM34 a load instruction that follows a
2781 store instruction incurs an extra cycle of delay. */
2782 if (mn10300_tune_cpu == PROCESSOR_AM34
2783 && is_load_insn (dep)
2784 && is_store_insn (insn))
2787 /* For the AM34 a non-store, non-branch FPU insn that follows
2788 another FPU insn incurs a one cycle throughput increase. */
2789 else if (mn10300_tune_cpu == PROCESSOR_AM34
2790 && ! is_store_insn (insn)
2792 && GET_CODE (PATTERN (dep)) == SET
2793 && GET_CODE (PATTERN (insn)) == SET
2794 && GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (dep)))) == MODE_FLOAT
2795 && GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (insn)))) == MODE_FLOAT)
2798 /* Resolve the conflict described in section 1-7-4 of
2799 Chapter 3 of the MN103E Series Instruction Manual
2802 "When the preceeding instruction is a CPU load or
2803 store instruction, a following FPU instruction
2804 cannot be executed until the CPU completes the
2805 latency period even though there are no register
2806 or flag dependencies between them." */
2808 /* Only the AM33-2 (and later) CPUs have FPU instructions. */
2809 if (! TARGET_AM33_2)
2812 /* If a data dependence already exists then the cost is correct. */
2813 if (REG_NOTE_KIND (link) == 0)
2816 /* Check that the instruction about to scheduled is an FPU instruction. */
2817 if (GET_CODE (PATTERN (dep)) != SET)
2820 if (GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (dep)))) != MODE_FLOAT)
2823 /* Now check to see if the previous instruction is a load or store. */
2824 if (! is_load_insn (insn) && ! is_store_insn (insn))
2827 /* XXX: Verify: The text of 1-7-4 implies that the restriction
2828 only applies when an INTEGER load/store preceeds an FPU
2829 instruction, but is this true ? For now we assume that it is. */
2830 if (GET_MODE_CLASS (GET_MODE (SET_SRC (PATTERN (insn)))) != MODE_INT)
2833 /* Extract the latency value from the timings attribute. */
2834 return timings < 100 ? (timings % 10) : (timings % 100);
2838 mn10300_conditional_register_usage (void)
2844 for (i = FIRST_EXTENDED_REGNUM;
2845 i <= LAST_EXTENDED_REGNUM; i++)
2846 fixed_regs[i] = call_used_regs[i] = 1;
2850 for (i = FIRST_FP_REGNUM;
2851 i <= LAST_FP_REGNUM; i++)
2852 fixed_regs[i] = call_used_regs[i] = 1;
2855 fixed_regs[PIC_OFFSET_TABLE_REGNUM] =
2856 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
2859 /* Worker function for TARGET_MD_ASM_CLOBBERS.
2860 We do this in the mn10300 backend to maintain source compatibility
2861 with the old cc0-based compiler. */
2864 mn10300_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
2865 tree inputs ATTRIBUTE_UNUSED,
2868 clobbers = tree_cons (NULL_TREE, build_string (5, "EPSW"),
2873 /* A helper function for splitting cbranch patterns after reload. */
2876 mn10300_split_cbranch (enum machine_mode cmp_mode, rtx cmp_op, rtx label_ref)
2880 flags = gen_rtx_REG (cmp_mode, CC_REG);
2881 x = gen_rtx_COMPARE (cmp_mode, XEXP (cmp_op, 0), XEXP (cmp_op, 1));
2882 x = gen_rtx_SET (VOIDmode, flags, x);
2885 x = gen_rtx_fmt_ee (GET_CODE (cmp_op), VOIDmode, flags, const0_rtx);
2886 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, label_ref, pc_rtx);
2887 x = gen_rtx_SET (VOIDmode, pc_rtx, x);
2891 /* A helper function for matching parallels that set the flags. */
2894 mn10300_match_ccmode (rtx insn, enum machine_mode cc_mode)
2897 enum machine_mode flags_mode;
2899 gcc_checking_assert (XVECLEN (PATTERN (insn), 0) == 2);
2901 op1 = XVECEXP (PATTERN (insn), 0, 1);
2902 gcc_checking_assert (GET_CODE (SET_SRC (op1)) == COMPARE);
2904 flags = SET_DEST (op1);
2905 flags_mode = GET_MODE (flags);
2907 if (GET_MODE (SET_SRC (op1)) != flags_mode)
2909 if (GET_MODE_CLASS (flags_mode) != MODE_CC)
2912 /* Ensure that the mode of FLAGS is compatible with CC_MODE. */
2913 if (cc_flags_for_mode (flags_mode) & ~cc_flags_for_mode (cc_mode))
2920 mn10300_split_and_operand_count (rtx op)
2922 HOST_WIDE_INT val = INTVAL (op);
2927 /* High bit is set, look for bits clear at the bottom. */
2928 count = exact_log2 (-val);
2931 /* This is only size win if we can use the asl2 insn. Otherwise we
2932 would be replacing 1 6-byte insn with 2 3-byte insns. */
2933 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2939 /* High bit is clear, look for bits set at the bottom. */
2940 count = exact_log2 (val + 1);
2942 /* Again, this is only a size win with asl2. */
2943 if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2952 enum attr_liw_op op;
2957 /* Decide if the given insn is a candidate for LIW bundling. If it is then
2958 extract the operands and LIW attributes from the insn and use them to fill
2959 in the liw_data structure. Return true upon success or false if the insn
2960 cannot be bundled. */
2963 extract_bundle (rtx insn, struct liw_data * pdata)
2965 bool allow_consts = true;
2968 gcc_assert (pdata != NULL);
2970 if (insn == NULL_RTX)
2972 /* Make sure that we are dealing with a simple SET insn. */
2973 p = single_set (insn);
2977 /* Make sure that it could go into one of the LIW pipelines. */
2978 pdata->slot = get_attr_liw (insn);
2979 if (pdata->slot == LIW_BOTH)
2982 pdata->op = get_attr_liw_op (insn);
2989 pdata->dest = SET_DEST (p);
2990 pdata->src = SET_SRC (p);
2993 pdata->dest = XEXP (SET_SRC (p), 0);
2994 pdata->src = XEXP (SET_SRC (p), 1);
3001 /* The AND, OR and XOR long instruction words only accept register arguments. */
3002 allow_consts = false;
3005 pdata->dest = SET_DEST (p);
3006 pdata->src = XEXP (SET_SRC (p), 1);
3010 if (! REG_P (pdata->dest))
3013 if (REG_P (pdata->src))
3016 return allow_consts && satisfies_constraint_O (pdata->src);
3019 /* Make sure that it is OK to execute LIW1 and LIW2 in parallel. GCC generated
3020 the instructions with the assumption that LIW1 would be executed before LIW2
3021 so we must check for overlaps between their sources and destinations. */
3024 check_liw_constraints (struct liw_data * pliw1, struct liw_data * pliw2)
3026 /* Check for slot conflicts. */
3027 if (pliw2->slot == pliw1->slot && pliw1->slot != LIW_EITHER)
3030 /* If either operation is a compare, then "dest" is really an input; the real
3031 destination is CC_REG. So these instructions need different checks. */
3033 /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
3034 check its values prior to any changes made by OP. */
3035 if (pliw1->op == LIW_OP_CMP)
3037 /* Two sequential comparisons means dead code, which ought to
3038 have been eliminated given that bundling only happens with
3039 optimization. We cannot bundle them in any case. */
3040 gcc_assert (pliw1->op != pliw2->op);
3044 /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3045 is the destination of OP, as the CMP will look at the old value, not the new
3047 if (pliw2->op == LIW_OP_CMP)
3049 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3052 if (REG_P (pliw2->src))
3053 return REGNO (pliw2->src) != REGNO (pliw1->dest);
3058 /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3059 same destination register. */
3060 if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3063 /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3064 of OP1 is the source of OP2. The exception is when OP1 is a MOVE instruction when
3065 we can replace the source in OP2 with the source of OP1. */
3066 if (REG_P (pliw2->src) && REGNO (pliw2->src) == REGNO (pliw1->dest))
3068 if (pliw1->op == LIW_OP_MOV && REG_P (pliw1->src))
3070 if (! REG_P (pliw1->src)
3071 && (pliw2->op == LIW_OP_AND
3072 || pliw2->op == LIW_OP_OR
3073 || pliw2->op == LIW_OP_XOR))
3076 pliw2->src = pliw1->src;
3082 /* Everything else is OK. */
3086 /* Combine pairs of insns into LIW bundles. */
3089 mn10300_bundle_liw (void)
3093 for (r = get_insns (); r != NULL_RTX; r = next_nonnote_nondebug_insn (r))
3096 struct liw_data liw1, liw2;
3099 if (! extract_bundle (insn1, & liw1))
3102 insn2 = next_nonnote_nondebug_insn (insn1);
3103 if (! extract_bundle (insn2, & liw2))
3106 /* Check for source/destination overlap. */
3107 if (! check_liw_constraints (& liw1, & liw2))
3110 if (liw1.slot == LIW_OP2 || liw2.slot == LIW_OP1)
3112 struct liw_data temp;
3119 delete_insn (insn2);
3121 if (liw1.op == LIW_OP_CMP)
3122 insn2 = gen_cmp_liw (liw2.dest, liw2.src, liw1.dest, liw1.src,
3124 else if (liw2.op == LIW_OP_CMP)
3125 insn2 = gen_liw_cmp (liw1.dest, liw1.src, liw2.dest, liw2.src,
3128 insn2 = gen_liw (liw1.dest, liw2.dest, liw1.src, liw2.src,
3129 GEN_INT (liw1.op), GEN_INT (liw2.op));
3131 insn2 = emit_insn_after (insn2, insn1);
3132 delete_insn (insn1);
3138 mn10300_reorg (void)
3142 if (TARGET_ALLOW_LIW)
3143 mn10300_bundle_liw ();
3147 /* Initialize the GCC target structure. */
3149 #undef TARGET_MACHINE_DEPENDENT_REORG
3150 #define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3152 #undef TARGET_EXCEPT_UNWIND_INFO
3153 #define TARGET_EXCEPT_UNWIND_INFO sjlj_except_unwind_info
3155 #undef TARGET_ASM_ALIGNED_HI_OP
3156 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3158 #undef TARGET_LEGITIMIZE_ADDRESS
3159 #define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3161 #undef TARGET_ADDRESS_COST
3162 #define TARGET_ADDRESS_COST mn10300_address_cost
3163 #undef TARGET_REGISTER_MOVE_COST
3164 #define TARGET_REGISTER_MOVE_COST mn10300_register_move_cost
3165 #undef TARGET_MEMORY_MOVE_COST
3166 #define TARGET_MEMORY_MOVE_COST mn10300_memory_move_cost
3167 #undef TARGET_RTX_COSTS
3168 #define TARGET_RTX_COSTS mn10300_rtx_costs
3170 #undef TARGET_ASM_FILE_START
3171 #define TARGET_ASM_FILE_START mn10300_file_start
3172 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
3173 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3175 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3176 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3178 #undef TARGET_DEFAULT_TARGET_FLAGS
3179 #define TARGET_DEFAULT_TARGET_FLAGS MASK_MULT_BUG | MASK_PTR_A0D0 | MASK_ALLOW_LIW
3180 #undef TARGET_HANDLE_OPTION
3181 #define TARGET_HANDLE_OPTION mn10300_handle_option
3182 #undef TARGET_OPTION_OVERRIDE
3183 #define TARGET_OPTION_OVERRIDE mn10300_option_override
3184 #undef TARGET_OPTION_OPTIMIZATION_TABLE
3185 #define TARGET_OPTION_OPTIMIZATION_TABLE mn10300_option_optimization_table
3187 #undef TARGET_ENCODE_SECTION_INFO
3188 #define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3190 #undef TARGET_PROMOTE_PROTOTYPES
3191 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3192 #undef TARGET_RETURN_IN_MEMORY
3193 #define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3194 #undef TARGET_PASS_BY_REFERENCE
3195 #define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3196 #undef TARGET_CALLEE_COPIES
3197 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
3198 #undef TARGET_ARG_PARTIAL_BYTES
3199 #define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
3200 #undef TARGET_FUNCTION_ARG
3201 #define TARGET_FUNCTION_ARG mn10300_function_arg
3202 #undef TARGET_FUNCTION_ARG_ADVANCE
3203 #define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
3205 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
3206 #define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3207 #undef TARGET_EXPAND_BUILTIN_VA_START
3208 #define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3210 #undef TARGET_CASE_VALUES_THRESHOLD
3211 #define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3213 #undef TARGET_LEGITIMATE_ADDRESS_P
3214 #define TARGET_LEGITIMATE_ADDRESS_P mn10300_legitimate_address_p
3215 #undef TARGET_DELEGITIMIZE_ADDRESS
3216 #define TARGET_DELEGITIMIZE_ADDRESS mn10300_delegitimize_address
3218 #undef TARGET_PREFERRED_RELOAD_CLASS
3219 #define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3220 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
3221 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3222 mn10300_preferred_output_reload_class
3223 #undef TARGET_SECONDARY_RELOAD
3224 #define TARGET_SECONDARY_RELOAD mn10300_secondary_reload
3226 #undef TARGET_TRAMPOLINE_INIT
3227 #define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3229 #undef TARGET_FUNCTION_VALUE
3230 #define TARGET_FUNCTION_VALUE mn10300_function_value
3231 #undef TARGET_LIBCALL_VALUE
3232 #define TARGET_LIBCALL_VALUE mn10300_libcall_value
3234 #undef TARGET_ASM_OUTPUT_MI_THUNK
3235 #define TARGET_ASM_OUTPUT_MI_THUNK mn10300_asm_output_mi_thunk
3236 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3237 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK mn10300_can_output_mi_thunk
3239 #undef TARGET_SCHED_ADJUST_COST
3240 #define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3242 #undef TARGET_CONDITIONAL_REGISTER_USAGE
3243 #define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3245 #undef TARGET_MD_ASM_CLOBBERS
3246 #define TARGET_MD_ASM_CLOBBERS mn10300_md_asm_clobbers
3248 #undef TARGET_FLAGS_REGNUM
3249 #define TARGET_FLAGS_REGNUM CC_REG
3251 struct gcc_target targetm = TARGET_INITIALIZER;