1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
60 #include "diagnostic.h"
62 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
63 #define UNSPEC_ADDRESS_P(X) \
64 (GET_CODE (X) == UNSPEC \
65 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
66 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
68 /* Extract the symbol or label from UNSPEC wrapper X. */
69 #define UNSPEC_ADDRESS(X) \
72 /* Extract the symbol type from UNSPEC wrapper X. */
73 #define UNSPEC_ADDRESS_TYPE(X) \
74 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
76 /* The maximum distance between the top of the stack frame and the
77 value $sp has when we save and restore registers.
79 The value for normal-mode code must be a SMALL_OPERAND and must
80 preserve the maximum stack alignment. We therefore use a value
81 of 0x7ff0 in this case.
83 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
84 up to 0x7f8 bytes and can usually save or restore all the registers
85 that we need to save or restore. (Note that we can only use these
86 instructions for o32, for which the stack alignment is 8 bytes.)
88 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
89 RESTORE are not available. We can then use unextended instructions
90 to save and restore registers, and to allocate and deallocate the top
92 #define MIPS_MAX_FIRST_STACK_STEP \
93 (!TARGET_MIPS16 ? 0x7ff0 \
94 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
95 : TARGET_64BIT ? 0x100 : 0x400)
97 /* True if INSN is a mips.md pattern or asm statement. */
98 #define USEFUL_INSN_P(INSN) \
100 && GET_CODE (PATTERN (INSN)) != USE \
101 && GET_CODE (PATTERN (INSN)) != CLOBBER \
102 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
103 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
105 /* If INSN is a delayed branch sequence, return the first instruction
106 in the sequence, otherwise return INSN itself. */
107 #define SEQ_BEGIN(INSN) \
108 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
109 ? XVECEXP (PATTERN (INSN), 0, 0) \
112 /* Likewise for the last instruction in a delayed branch sequence. */
113 #define SEQ_END(INSN) \
114 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
115 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
118 /* Execute the following loop body with SUBINSN set to each instruction
119 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
120 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
121 for ((SUBINSN) = SEQ_BEGIN (INSN); \
122 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
123 (SUBINSN) = NEXT_INSN (SUBINSN))
125 /* True if bit BIT is set in VALUE. */
126 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
128 /* Classifies an address.
131 A natural register + offset address. The register satisfies
132 mips_valid_base_register_p and the offset is a const_arith_operand.
135 A LO_SUM rtx. The first operand is a valid base register and
136 the second operand is a symbolic address.
139 A signed 16-bit constant address.
142 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
143 enum mips_address_type {
150 /* Classifies the prototype of a builtin function. */
151 enum mips_function_type
153 MIPS_V2SF_FTYPE_V2SF,
154 MIPS_V2SF_FTYPE_V2SF_V2SF,
155 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
156 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
157 MIPS_V2SF_FTYPE_SF_SF,
158 MIPS_INT_FTYPE_V2SF_V2SF,
159 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
160 MIPS_INT_FTYPE_SF_SF,
161 MIPS_INT_FTYPE_DF_DF,
168 /* For MIPS DSP ASE */
170 MIPS_DI_FTYPE_DI_SI_SI,
171 MIPS_DI_FTYPE_DI_V2HI_V2HI,
172 MIPS_DI_FTYPE_DI_V4QI_V4QI,
174 MIPS_SI_FTYPE_PTR_SI,
178 MIPS_SI_FTYPE_V2HI_V2HI,
180 MIPS_SI_FTYPE_V4QI_V4QI,
183 MIPS_V2HI_FTYPE_SI_SI,
184 MIPS_V2HI_FTYPE_V2HI,
185 MIPS_V2HI_FTYPE_V2HI_SI,
186 MIPS_V2HI_FTYPE_V2HI_V2HI,
187 MIPS_V2HI_FTYPE_V4QI,
188 MIPS_V2HI_FTYPE_V4QI_V2HI,
190 MIPS_V4QI_FTYPE_V2HI_V2HI,
191 MIPS_V4QI_FTYPE_V4QI_SI,
192 MIPS_V4QI_FTYPE_V4QI_V4QI,
193 MIPS_VOID_FTYPE_SI_SI,
194 MIPS_VOID_FTYPE_V2HI_V2HI,
195 MIPS_VOID_FTYPE_V4QI_V4QI,
197 /* For MIPS DSP REV 2 ASE. */
198 MIPS_V4QI_FTYPE_V4QI,
199 MIPS_SI_FTYPE_SI_SI_SI,
200 MIPS_DI_FTYPE_DI_USI_USI,
202 MIPS_DI_FTYPE_USI_USI,
203 MIPS_V2HI_FTYPE_SI_SI_SI,
209 /* Specifies how a builtin function should be converted into rtl. */
210 enum mips_builtin_type
212 /* The builtin corresponds directly to an .md pattern. The return
213 value is mapped to operand 0 and the arguments are mapped to
214 operands 1 and above. */
217 /* The builtin corresponds directly to an .md pattern. There is no return
218 value and the arguments are mapped to operands 0 and above. */
219 MIPS_BUILTIN_DIRECT_NO_TARGET,
221 /* The builtin corresponds to a comparison instruction followed by
222 a mips_cond_move_tf_ps pattern. The first two arguments are the
223 values to compare and the second two arguments are the vector
224 operands for the movt.ps or movf.ps instruction (in assembly order). */
228 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
229 of this instruction is the result of the comparison, which has mode
230 CCV2 or CCV4. The function arguments are mapped to operands 1 and
231 above. The function's return value is an SImode boolean that is
232 true under the following conditions:
234 MIPS_BUILTIN_CMP_ANY: one of the registers is true
235 MIPS_BUILTIN_CMP_ALL: all of the registers are true
236 MIPS_BUILTIN_CMP_LOWER: the first register is true
237 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
238 MIPS_BUILTIN_CMP_ANY,
239 MIPS_BUILTIN_CMP_ALL,
240 MIPS_BUILTIN_CMP_UPPER,
241 MIPS_BUILTIN_CMP_LOWER,
243 /* As above, but the instruction only sets a single $fcc register. */
244 MIPS_BUILTIN_CMP_SINGLE,
246 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
247 MIPS_BUILTIN_BPOSGE32
250 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
251 #define MIPS_FP_CONDITIONS(MACRO) \
269 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
270 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
271 enum mips_fp_condition {
272 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
275 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
276 #define STRINGIFY(X) #X
277 static const char *const mips_fp_conditions[] = {
278 MIPS_FP_CONDITIONS (STRINGIFY)
281 /* Structure to be filled in by compute_frame_size with register
282 save masks, and offsets for the current function. */
284 struct mips_frame_info GTY(())
286 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
287 HOST_WIDE_INT var_size; /* # bytes that variables take up */
288 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
289 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
290 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
291 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
292 unsigned int mask; /* mask of saved gp registers */
293 unsigned int fmask; /* mask of saved fp registers */
294 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
295 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
296 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
297 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
298 bool initialized; /* true if frame size already calculated */
299 int num_gp; /* number of gp registers saved */
300 int num_fp; /* number of fp registers saved */
303 struct machine_function GTY(()) {
304 /* Pseudo-reg holding the value of $28 in a mips16 function which
305 refers to GP relative global variables. */
306 rtx mips16_gp_pseudo_rtx;
308 /* The number of extra stack bytes taken up by register varargs.
309 This area is allocated by the callee at the very top of the frame. */
312 /* Current frame information, calculated by compute_frame_size. */
313 struct mips_frame_info frame;
315 /* The register to use as the global pointer within this function. */
316 unsigned int global_pointer;
318 /* True if mips_adjust_insn_length should ignore an instruction's
320 bool ignore_hazard_length_p;
322 /* True if the whole function is suitable for .set noreorder and
324 bool all_noreorder_p;
326 /* True if the function is known to have an instruction that needs $gp. */
329 /* True if we have emitted an instruction to initialize
330 mips16_gp_pseudo_rtx. */
331 bool initialized_mips16_gp_pseudo_p;
334 /* Information about a single argument. */
337 /* True if the argument is passed in a floating-point register, or
338 would have been if we hadn't run out of registers. */
341 /* The number of words passed in registers, rounded up. */
342 unsigned int reg_words;
344 /* For EABI, the offset of the first register from GP_ARG_FIRST or
345 FP_ARG_FIRST. For other ABIs, the offset of the first register from
346 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
347 comment for details).
349 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
351 unsigned int reg_offset;
353 /* The number of words that must be passed on the stack, rounded up. */
354 unsigned int stack_words;
356 /* The offset from the start of the stack overflow area of the argument's
357 first stack word. Only meaningful when STACK_WORDS is nonzero. */
358 unsigned int stack_offset;
362 /* Information about an address described by mips_address_type.
368 REG is the base register and OFFSET is the constant offset.
371 REG is the register that contains the high part of the address,
372 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
373 is the type of OFFSET's symbol.
376 SYMBOL_TYPE is the type of symbol being referenced. */
378 struct mips_address_info
380 enum mips_address_type type;
383 enum mips_symbol_type symbol_type;
387 /* One stage in a constant building sequence. These sequences have
391 A = A CODE[1] VALUE[1]
392 A = A CODE[2] VALUE[2]
395 where A is an accumulator, each CODE[i] is a binary rtl operation
396 and each VALUE[i] is a constant integer. */
397 struct mips_integer_op {
399 unsigned HOST_WIDE_INT value;
403 /* The largest number of operations needed to load an integer constant.
404 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
405 When the lowest bit is clear, we can try, but reject a sequence with
406 an extra SLL at the end. */
407 #define MIPS_MAX_INTEGER_OPS 7
409 /* Information about a MIPS16e SAVE or RESTORE instruction. */
410 struct mips16e_save_restore_info {
411 /* The number of argument registers saved by a SAVE instruction.
412 0 for RESTORE instructions. */
415 /* Bit X is set if the instruction saves or restores GPR X. */
418 /* The total number of bytes to allocate. */
422 /* Global variables for machine-dependent things. */
424 /* Threshold for data being put into the small data/bss area, instead
425 of the normal data area. */
426 int mips_section_threshold = -1;
428 /* Count the number of .file directives, so that .loc is up to date. */
429 int num_source_filenames = 0;
431 /* Name of the file containing the current function. */
432 const char *current_function_file = "";
434 /* Count the number of sdb related labels are generated (to find block
435 start and end boundaries). */
436 int sdb_label_count = 0;
438 /* Next label # for each statement for Silicon Graphics IRIS systems. */
441 /* Map GCC register number to debugger register number. */
442 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
443 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
445 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
451 /* The next branch instruction is a branch likely, not branch normal. */
452 int mips_branch_likely;
454 /* The operands passed to the last cmpMM expander. */
457 /* The target cpu for code generation. */
458 enum processor_type mips_arch;
459 const struct mips_cpu_info *mips_arch_info;
461 /* The target cpu for optimization and scheduling. */
462 enum processor_type mips_tune;
463 const struct mips_cpu_info *mips_tune_info;
465 /* Which instruction set architecture to use. */
468 /* The architecture selected by -mipsN. */
469 static const struct mips_cpu_info *mips_isa_info;
471 /* Which ABI to use. */
472 int mips_abi = MIPS_ABI_DEFAULT;
474 /* Cost information to use. */
475 const struct mips_rtx_cost_data *mips_cost;
477 /* Remember the ambient target flags, excluding mips16. */
478 static int mips_base_target_flags;
479 /* The mips16 command-line target flags only. */
480 static bool mips_base_mips16;
481 /* Similar copies of option settings. */
482 static int mips_flag_delayed_branch; /* flag_delayed_branch */
483 static int mips_base_schedule_insns; /* flag_schedule_insns */
484 static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
485 static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
486 static int mips_base_align_loops; /* align_loops */
487 static int mips_base_align_jumps; /* align_jumps */
488 static int mips_base_align_functions; /* align_functions */
490 /* The -mtext-loads setting. */
491 enum mips_code_readable_setting mips_code_readable = CODE_READABLE_YES;
493 /* If TRUE, we split addresses into their high and low parts in the RTL. */
494 int mips_split_addresses;
496 /* Mode used for saving/restoring general purpose registers. */
497 static enum machine_mode gpr_mode;
499 /* Array giving truth value on whether or not a given hard register
500 can support a given mode. */
501 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
503 /* List of all MIPS punctuation characters used by print_operand. */
504 char mips_print_operand_punct[256];
506 static GTY (()) int mips_output_filename_first_time = 1;
508 /* mips_split_p[X] is true if symbols of type X can be split by
509 mips_split_symbol(). */
510 bool mips_split_p[NUM_SYMBOL_TYPES];
512 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
513 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
514 if they are matched by a special .md file pattern. */
515 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
517 /* Likewise for HIGHs. */
518 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
520 /* Map hard register number to register class */
521 const enum reg_class mips_regno_to_class[] =
523 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
524 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
525 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
526 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
527 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
528 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
529 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
530 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
531 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
532 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
533 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
534 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
535 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
536 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
537 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
538 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
539 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
540 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
541 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
542 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
543 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
544 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
545 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
546 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
547 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
548 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
549 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
550 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
551 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
552 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
553 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
554 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
555 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
556 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
557 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
558 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
559 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
560 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
561 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
562 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
563 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
564 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
565 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
566 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
567 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
568 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
569 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
572 /* Table of machine dependent attributes. */
573 const struct attribute_spec mips_attribute_table[] =
575 { "long_call", 0, 0, false, true, true, NULL },
576 { "far", 0, 0, false, true, true, NULL },
577 { "near", 0, 0, false, true, true, NULL },
578 /* Switch MIPS16 ASE on and off per-function. We would really like
579 to make these type attributes, but GCC doesn't provide the hooks
580 we need to support the right conversion rules. As declaration
581 attributes, they affect code generation but don't carry other
583 { "mips16", 0, 0, true, false, false, NULL },
584 { "nomips16", 0, 0, true, false, false, NULL },
585 { NULL, 0, 0, false, false, false, NULL }
588 /* A table describing all the processors gcc knows about. Names are
589 matched in the order listed. The first mention of an ISA level is
590 taken as the canonical name for that ISA.
592 To ease comparison, please keep this table in the same order
593 as gas's mips_cpu_info_table[]. Please also make sure that
594 MIPS_ISA_LEVEL_SPEC and MIPS_ARCH_FLOAT_SPEC handle all -march
595 options correctly. */
596 const struct mips_cpu_info mips_cpu_info_table[] = {
597 /* Entries for generic ISAs */
598 { "mips1", PROCESSOR_R3000, 1, 0 },
599 { "mips2", PROCESSOR_R6000, 2, 0 },
600 { "mips3", PROCESSOR_R4000, 3, 0 },
601 { "mips4", PROCESSOR_R8000, 4, 0 },
602 /* Prefer not to use branch-likely instructions for generic MIPS32rX
603 and MIPS64rX code. The instructions were officially deprecated
604 in revisions 2 and earlier, but revision 3 is likely to downgrade
605 that to a recommendation to avoid the instructions in code that
606 isn't tuned to a specific processor. */
607 { "mips32", PROCESSOR_4KC, 32, PTF_AVOID_BRANCHLIKELY },
608 { "mips32r2", PROCESSOR_M4K, 33, PTF_AVOID_BRANCHLIKELY },
609 { "mips64", PROCESSOR_5KC, 64, PTF_AVOID_BRANCHLIKELY },
612 { "r3000", PROCESSOR_R3000, 1, 0 },
613 { "r2000", PROCESSOR_R3000, 1, 0 }, /* = r3000 */
614 { "r3900", PROCESSOR_R3900, 1, 0 },
617 { "r6000", PROCESSOR_R6000, 2, 0 },
620 { "r4000", PROCESSOR_R4000, 3, 0 },
621 { "vr4100", PROCESSOR_R4100, 3, 0 },
622 { "vr4111", PROCESSOR_R4111, 3, 0 },
623 { "vr4120", PROCESSOR_R4120, 3, 0 },
624 { "vr4130", PROCESSOR_R4130, 3, 0 },
625 { "vr4300", PROCESSOR_R4300, 3, 0 },
626 { "r4400", PROCESSOR_R4000, 3, 0 }, /* = r4000 */
627 { "r4600", PROCESSOR_R4600, 3, 0 },
628 { "orion", PROCESSOR_R4600, 3, 0 }, /* = r4600 */
629 { "r4650", PROCESSOR_R4650, 3, 0 },
632 { "r8000", PROCESSOR_R8000, 4, 0 },
633 { "vr5000", PROCESSOR_R5000, 4, 0 },
634 { "vr5400", PROCESSOR_R5400, 4, 0 },
635 { "vr5500", PROCESSOR_R5500, 4, PTF_AVOID_BRANCHLIKELY },
636 { "rm7000", PROCESSOR_R7000, 4, 0 },
637 { "rm9000", PROCESSOR_R9000, 4, 0 },
640 { "4kc", PROCESSOR_4KC, 32, 0 },
641 { "4km", PROCESSOR_4KC, 32, 0 }, /* = 4kc */
642 { "4kp", PROCESSOR_4KP, 32, 0 },
643 { "4ksc", PROCESSOR_4KC, 32, 0 },
645 /* MIPS32 Release 2 */
646 { "m4k", PROCESSOR_M4K, 33, 0 },
647 { "4kec", PROCESSOR_4KC, 33, 0 },
648 { "4kem", PROCESSOR_4KC, 33, 0 },
649 { "4kep", PROCESSOR_4KP, 33, 0 },
650 { "4ksd", PROCESSOR_4KC, 33, 0 },
652 { "24kc", PROCESSOR_24KC, 33, 0 },
653 { "24kf2_1", PROCESSOR_24KF2_1, 33, 0 },
654 { "24kf", PROCESSOR_24KF2_1, 33, 0 },
655 { "24kf1_1", PROCESSOR_24KF1_1, 33, 0 },
656 { "24kfx", PROCESSOR_24KF1_1, 33, 0 },
657 { "24kx", PROCESSOR_24KF1_1, 33, 0 },
659 { "24kec", PROCESSOR_24KC, 33, 0 }, /* 24K with DSP */
660 { "24kef2_1", PROCESSOR_24KF2_1, 33, 0 },
661 { "24kef", PROCESSOR_24KF2_1, 33, 0 },
662 { "24kef1_1", PROCESSOR_24KF1_1, 33, 0 },
663 { "24kefx", PROCESSOR_24KF1_1, 33, 0 },
664 { "24kex", PROCESSOR_24KF1_1, 33, 0 },
666 { "34kc", PROCESSOR_24KC, 33, 0 }, /* 34K with MT/DSP */
667 { "34kf2_1", PROCESSOR_24KF2_1, 33, 0 },
668 { "34kf", PROCESSOR_24KF2_1, 33, 0 },
669 { "34kf1_1", PROCESSOR_24KF1_1, 33, 0 },
670 { "34kfx", PROCESSOR_24KF1_1, 33, 0 },
671 { "34kx", PROCESSOR_24KF1_1, 33, 0 },
673 { "74kc", PROCESSOR_74KC, 33, 0 }, /* 74K with DSPr2 */
674 { "74kf2_1", PROCESSOR_74KF2_1, 33, 0 },
675 { "74kf", PROCESSOR_74KF2_1, 33, 0 },
676 { "74kf1_1", PROCESSOR_74KF1_1, 33, 0 },
677 { "74kfx", PROCESSOR_74KF1_1, 33, 0 },
678 { "74kx", PROCESSOR_74KF1_1, 33, 0 },
679 { "74kf3_2", PROCESSOR_74KF3_2, 33, 0 },
682 { "5kc", PROCESSOR_5KC, 64, 0 },
683 { "5kf", PROCESSOR_5KF, 64, 0 },
684 { "20kc", PROCESSOR_20KC, 64, PTF_AVOID_BRANCHLIKELY },
685 { "sb1", PROCESSOR_SB1, 64, PTF_AVOID_BRANCHLIKELY },
686 { "sb1a", PROCESSOR_SB1A, 64, PTF_AVOID_BRANCHLIKELY },
687 { "sr71000", PROCESSOR_SR71000, 64, PTF_AVOID_BRANCHLIKELY },
690 /* Default costs. If these are used for a processor we should look
691 up the actual costs. */
692 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
693 COSTS_N_INSNS (7), /* fp_mult_sf */ \
694 COSTS_N_INSNS (8), /* fp_mult_df */ \
695 COSTS_N_INSNS (23), /* fp_div_sf */ \
696 COSTS_N_INSNS (36), /* fp_div_df */ \
697 COSTS_N_INSNS (10), /* int_mult_si */ \
698 COSTS_N_INSNS (10), /* int_mult_di */ \
699 COSTS_N_INSNS (69), /* int_div_si */ \
700 COSTS_N_INSNS (69), /* int_div_di */ \
701 2, /* branch_cost */ \
702 4 /* memory_latency */
704 /* Need to replace these with the costs of calling the appropriate
706 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
707 COSTS_N_INSNS (256), /* fp_mult_sf */ \
708 COSTS_N_INSNS (256), /* fp_mult_df */ \
709 COSTS_N_INSNS (256), /* fp_div_sf */ \
710 COSTS_N_INSNS (256) /* fp_div_df */
712 static struct mips_rtx_cost_data const mips_rtx_cost_optimize_size =
714 COSTS_N_INSNS (1), /* fp_add */
715 COSTS_N_INSNS (1), /* fp_mult_sf */
716 COSTS_N_INSNS (1), /* fp_mult_df */
717 COSTS_N_INSNS (1), /* fp_div_sf */
718 COSTS_N_INSNS (1), /* fp_div_df */
719 COSTS_N_INSNS (1), /* int_mult_si */
720 COSTS_N_INSNS (1), /* int_mult_di */
721 COSTS_N_INSNS (1), /* int_div_si */
722 COSTS_N_INSNS (1), /* int_div_di */
724 4 /* memory_latency */
727 static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
730 COSTS_N_INSNS (2), /* fp_add */
731 COSTS_N_INSNS (4), /* fp_mult_sf */
732 COSTS_N_INSNS (5), /* fp_mult_df */
733 COSTS_N_INSNS (12), /* fp_div_sf */
734 COSTS_N_INSNS (19), /* fp_div_df */
735 COSTS_N_INSNS (12), /* int_mult_si */
736 COSTS_N_INSNS (12), /* int_mult_di */
737 COSTS_N_INSNS (35), /* int_div_si */
738 COSTS_N_INSNS (35), /* int_div_di */
740 4 /* memory_latency */
745 COSTS_N_INSNS (6), /* int_mult_si */
746 COSTS_N_INSNS (6), /* int_mult_di */
747 COSTS_N_INSNS (36), /* int_div_si */
748 COSTS_N_INSNS (36), /* int_div_di */
750 4 /* memory_latency */
754 COSTS_N_INSNS (36), /* int_mult_si */
755 COSTS_N_INSNS (36), /* int_mult_di */
756 COSTS_N_INSNS (37), /* int_div_si */
757 COSTS_N_INSNS (37), /* int_div_di */
759 4 /* memory_latency */
763 COSTS_N_INSNS (4), /* int_mult_si */
764 COSTS_N_INSNS (11), /* int_mult_di */
765 COSTS_N_INSNS (36), /* int_div_si */
766 COSTS_N_INSNS (68), /* int_div_di */
768 4 /* memory_latency */
771 COSTS_N_INSNS (4), /* fp_add */
772 COSTS_N_INSNS (4), /* fp_mult_sf */
773 COSTS_N_INSNS (5), /* fp_mult_df */
774 COSTS_N_INSNS (17), /* fp_div_sf */
775 COSTS_N_INSNS (32), /* fp_div_df */
776 COSTS_N_INSNS (4), /* int_mult_si */
777 COSTS_N_INSNS (11), /* int_mult_di */
778 COSTS_N_INSNS (36), /* int_div_si */
779 COSTS_N_INSNS (68), /* int_div_di */
781 4 /* memory_latency */
784 COSTS_N_INSNS (4), /* fp_add */
785 COSTS_N_INSNS (4), /* fp_mult_sf */
786 COSTS_N_INSNS (5), /* fp_mult_df */
787 COSTS_N_INSNS (17), /* fp_div_sf */
788 COSTS_N_INSNS (32), /* fp_div_df */
789 COSTS_N_INSNS (4), /* int_mult_si */
790 COSTS_N_INSNS (7), /* int_mult_di */
791 COSTS_N_INSNS (42), /* int_div_si */
792 COSTS_N_INSNS (72), /* int_div_di */
794 4 /* memory_latency */
798 COSTS_N_INSNS (5), /* int_mult_si */
799 COSTS_N_INSNS (5), /* int_mult_di */
800 COSTS_N_INSNS (41), /* int_div_si */
801 COSTS_N_INSNS (41), /* int_div_di */
803 4 /* memory_latency */
806 COSTS_N_INSNS (8), /* fp_add */
807 COSTS_N_INSNS (8), /* fp_mult_sf */
808 COSTS_N_INSNS (10), /* fp_mult_df */
809 COSTS_N_INSNS (34), /* fp_div_sf */
810 COSTS_N_INSNS (64), /* fp_div_df */
811 COSTS_N_INSNS (5), /* int_mult_si */
812 COSTS_N_INSNS (5), /* int_mult_di */
813 COSTS_N_INSNS (41), /* int_div_si */
814 COSTS_N_INSNS (41), /* int_div_di */
816 4 /* memory_latency */
819 COSTS_N_INSNS (4), /* fp_add */
820 COSTS_N_INSNS (4), /* fp_mult_sf */
821 COSTS_N_INSNS (5), /* fp_mult_df */
822 COSTS_N_INSNS (17), /* fp_div_sf */
823 COSTS_N_INSNS (32), /* fp_div_df */
824 COSTS_N_INSNS (5), /* int_mult_si */
825 COSTS_N_INSNS (5), /* int_mult_di */
826 COSTS_N_INSNS (41), /* int_div_si */
827 COSTS_N_INSNS (41), /* int_div_di */
829 4 /* memory_latency */
833 COSTS_N_INSNS (5), /* int_mult_si */
834 COSTS_N_INSNS (5), /* int_mult_di */
835 COSTS_N_INSNS (41), /* int_div_si */
836 COSTS_N_INSNS (41), /* int_div_di */
838 4 /* memory_latency */
841 COSTS_N_INSNS (8), /* fp_add */
842 COSTS_N_INSNS (8), /* fp_mult_sf */
843 COSTS_N_INSNS (10), /* fp_mult_df */
844 COSTS_N_INSNS (34), /* fp_div_sf */
845 COSTS_N_INSNS (64), /* fp_div_df */
846 COSTS_N_INSNS (5), /* int_mult_si */
847 COSTS_N_INSNS (5), /* int_mult_di */
848 COSTS_N_INSNS (41), /* int_div_si */
849 COSTS_N_INSNS (41), /* int_div_di */
851 4 /* memory_latency */
854 COSTS_N_INSNS (4), /* fp_add */
855 COSTS_N_INSNS (4), /* fp_mult_sf */
856 COSTS_N_INSNS (5), /* fp_mult_df */
857 COSTS_N_INSNS (17), /* fp_div_sf */
858 COSTS_N_INSNS (32), /* fp_div_df */
859 COSTS_N_INSNS (5), /* int_mult_si */
860 COSTS_N_INSNS (5), /* int_mult_di */
861 COSTS_N_INSNS (41), /* int_div_si */
862 COSTS_N_INSNS (41), /* int_div_di */
864 4 /* memory_latency */
867 COSTS_N_INSNS (6), /* fp_add */
868 COSTS_N_INSNS (6), /* fp_mult_sf */
869 COSTS_N_INSNS (7), /* fp_mult_df */
870 COSTS_N_INSNS (25), /* fp_div_sf */
871 COSTS_N_INSNS (48), /* fp_div_df */
872 COSTS_N_INSNS (5), /* int_mult_si */
873 COSTS_N_INSNS (5), /* int_mult_di */
874 COSTS_N_INSNS (41), /* int_div_si */
875 COSTS_N_INSNS (41), /* int_div_di */
877 4 /* memory_latency */
883 COSTS_N_INSNS (2), /* fp_add */
884 COSTS_N_INSNS (4), /* fp_mult_sf */
885 COSTS_N_INSNS (5), /* fp_mult_df */
886 COSTS_N_INSNS (12), /* fp_div_sf */
887 COSTS_N_INSNS (19), /* fp_div_df */
888 COSTS_N_INSNS (2), /* int_mult_si */
889 COSTS_N_INSNS (2), /* int_mult_di */
890 COSTS_N_INSNS (35), /* int_div_si */
891 COSTS_N_INSNS (35), /* int_div_di */
893 4 /* memory_latency */
896 COSTS_N_INSNS (3), /* fp_add */
897 COSTS_N_INSNS (5), /* fp_mult_sf */
898 COSTS_N_INSNS (6), /* fp_mult_df */
899 COSTS_N_INSNS (15), /* fp_div_sf */
900 COSTS_N_INSNS (16), /* fp_div_df */
901 COSTS_N_INSNS (17), /* int_mult_si */
902 COSTS_N_INSNS (17), /* int_mult_di */
903 COSTS_N_INSNS (38), /* int_div_si */
904 COSTS_N_INSNS (38), /* int_div_di */
906 6 /* memory_latency */
909 COSTS_N_INSNS (6), /* fp_add */
910 COSTS_N_INSNS (7), /* fp_mult_sf */
911 COSTS_N_INSNS (8), /* fp_mult_df */
912 COSTS_N_INSNS (23), /* fp_div_sf */
913 COSTS_N_INSNS (36), /* fp_div_df */
914 COSTS_N_INSNS (10), /* int_mult_si */
915 COSTS_N_INSNS (10), /* int_mult_di */
916 COSTS_N_INSNS (69), /* int_div_si */
917 COSTS_N_INSNS (69), /* int_div_di */
919 6 /* memory_latency */
931 /* The only costs that appear to be updated here are
932 integer multiplication. */
934 COSTS_N_INSNS (4), /* int_mult_si */
935 COSTS_N_INSNS (6), /* int_mult_di */
936 COSTS_N_INSNS (69), /* int_div_si */
937 COSTS_N_INSNS (69), /* int_div_di */
939 4 /* memory_latency */
951 COSTS_N_INSNS (6), /* fp_add */
952 COSTS_N_INSNS (4), /* fp_mult_sf */
953 COSTS_N_INSNS (5), /* fp_mult_df */
954 COSTS_N_INSNS (23), /* fp_div_sf */
955 COSTS_N_INSNS (36), /* fp_div_df */
956 COSTS_N_INSNS (5), /* int_mult_si */
957 COSTS_N_INSNS (5), /* int_mult_di */
958 COSTS_N_INSNS (36), /* int_div_si */
959 COSTS_N_INSNS (36), /* int_div_di */
961 4 /* memory_latency */
964 COSTS_N_INSNS (6), /* fp_add */
965 COSTS_N_INSNS (5), /* fp_mult_sf */
966 COSTS_N_INSNS (6), /* fp_mult_df */
967 COSTS_N_INSNS (30), /* fp_div_sf */
968 COSTS_N_INSNS (59), /* fp_div_df */
969 COSTS_N_INSNS (3), /* int_mult_si */
970 COSTS_N_INSNS (4), /* int_mult_di */
971 COSTS_N_INSNS (42), /* int_div_si */
972 COSTS_N_INSNS (74), /* int_div_di */
974 4 /* memory_latency */
977 COSTS_N_INSNS (6), /* fp_add */
978 COSTS_N_INSNS (5), /* fp_mult_sf */
979 COSTS_N_INSNS (6), /* fp_mult_df */
980 COSTS_N_INSNS (30), /* fp_div_sf */
981 COSTS_N_INSNS (59), /* fp_div_df */
982 COSTS_N_INSNS (5), /* int_mult_si */
983 COSTS_N_INSNS (9), /* int_mult_di */
984 COSTS_N_INSNS (42), /* int_div_si */
985 COSTS_N_INSNS (74), /* int_div_di */
987 4 /* memory_latency */
990 /* The only costs that are changed here are
991 integer multiplication. */
992 COSTS_N_INSNS (6), /* fp_add */
993 COSTS_N_INSNS (7), /* fp_mult_sf */
994 COSTS_N_INSNS (8), /* fp_mult_df */
995 COSTS_N_INSNS (23), /* fp_div_sf */
996 COSTS_N_INSNS (36), /* fp_div_df */
997 COSTS_N_INSNS (5), /* int_mult_si */
998 COSTS_N_INSNS (9), /* int_mult_di */
999 COSTS_N_INSNS (69), /* int_div_si */
1000 COSTS_N_INSNS (69), /* int_div_di */
1001 1, /* branch_cost */
1002 4 /* memory_latency */
1008 /* The only costs that are changed here are
1009 integer multiplication. */
1010 COSTS_N_INSNS (6), /* fp_add */
1011 COSTS_N_INSNS (7), /* fp_mult_sf */
1012 COSTS_N_INSNS (8), /* fp_mult_df */
1013 COSTS_N_INSNS (23), /* fp_div_sf */
1014 COSTS_N_INSNS (36), /* fp_div_df */
1015 COSTS_N_INSNS (3), /* int_mult_si */
1016 COSTS_N_INSNS (8), /* int_mult_di */
1017 COSTS_N_INSNS (69), /* int_div_si */
1018 COSTS_N_INSNS (69), /* int_div_di */
1019 1, /* branch_cost */
1020 4 /* memory_latency */
1023 /* These costs are the same as the SB-1A below. */
1024 COSTS_N_INSNS (4), /* fp_add */
1025 COSTS_N_INSNS (4), /* fp_mult_sf */
1026 COSTS_N_INSNS (4), /* fp_mult_df */
1027 COSTS_N_INSNS (24), /* fp_div_sf */
1028 COSTS_N_INSNS (32), /* fp_div_df */
1029 COSTS_N_INSNS (3), /* int_mult_si */
1030 COSTS_N_INSNS (4), /* int_mult_di */
1031 COSTS_N_INSNS (36), /* int_div_si */
1032 COSTS_N_INSNS (68), /* int_div_di */
1033 1, /* branch_cost */
1034 4 /* memory_latency */
1037 /* These costs are the same as the SB-1 above. */
1038 COSTS_N_INSNS (4), /* fp_add */
1039 COSTS_N_INSNS (4), /* fp_mult_sf */
1040 COSTS_N_INSNS (4), /* fp_mult_df */
1041 COSTS_N_INSNS (24), /* fp_div_sf */
1042 COSTS_N_INSNS (32), /* fp_div_df */
1043 COSTS_N_INSNS (3), /* int_mult_si */
1044 COSTS_N_INSNS (4), /* int_mult_di */
1045 COSTS_N_INSNS (36), /* int_div_si */
1046 COSTS_N_INSNS (68), /* int_div_di */
1047 1, /* branch_cost */
1048 4 /* memory_latency */
1055 /* Use a hash table to keep track of implicit mips16/nomips16 attributes
1056 for -mflip_mips16. It maps decl names onto a boolean mode setting. */
1058 struct mflip_mips16_entry GTY (()) {
1062 static GTY ((param_is (struct mflip_mips16_entry))) htab_t mflip_mips16_htab;
1064 /* Hash table callbacks for mflip_mips16_htab. */
1067 mflip_mips16_htab_hash (const void *entry)
1069 return htab_hash_string (((const struct mflip_mips16_entry *) entry)->name);
1073 mflip_mips16_htab_eq (const void *entry, const void *name)
1075 return strcmp (((const struct mflip_mips16_entry *) entry)->name,
1076 (const char *) name) == 0;
1079 static GTY(()) int mips16_flipper;
1081 /* DECL is a function that needs a default "mips16" or "nomips16" attribute
1082 for -mflip-mips16. Return true if it should use "mips16" and false if
1083 it should use "nomips16". */
1086 mflip_mips16_use_mips16_p (tree decl)
1088 struct mflip_mips16_entry *entry;
1093 /* Use the opposite of the command-line setting for anonymous decls. */
1094 if (!DECL_NAME (decl))
1095 return !mips_base_mips16;
1097 if (!mflip_mips16_htab)
1098 mflip_mips16_htab = htab_create_ggc (37, mflip_mips16_htab_hash,
1099 mflip_mips16_htab_eq, NULL);
1101 name = IDENTIFIER_POINTER (DECL_NAME (decl));
1102 hash = htab_hash_string (name);
1103 slot = htab_find_slot_with_hash (mflip_mips16_htab, name, hash, INSERT);
1104 entry = (struct mflip_mips16_entry *) *slot;
1107 mips16_flipper = !mips16_flipper;
1108 entry = GGC_NEW (struct mflip_mips16_entry);
1110 entry->mips16_p = mips16_flipper ? !mips_base_mips16 : mips_base_mips16;
1113 return entry->mips16_p;
1116 /* Predicates to test for presence of "near" and "far"/"long_call"
1117 attributes on the given TYPE. */
1120 mips_near_type_p (const_tree type)
1122 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1126 mips_far_type_p (const_tree type)
1128 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1129 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1132 /* Similar predicates for "mips16"/"nomips16" attributes. */
1135 mips_mips16_decl_p (const_tree decl)
1137 return lookup_attribute ("mips16", DECL_ATTRIBUTES (decl)) != NULL;
1141 mips_nomips16_decl_p (const_tree decl)
1143 return lookup_attribute ("nomips16", DECL_ATTRIBUTES (decl)) != NULL;
1146 /* Return true if function DECL is a MIPS16 function. Return the ambient
1147 setting if DECL is null. */
1150 mips_use_mips16_mode_p (tree decl)
1154 /* Nested functions must use the same frame pointer as their
1155 parent and must therefore use the same ISA mode. */
1156 tree parent = decl_function_context (decl);
1159 if (mips_mips16_decl_p (decl))
1161 if (mips_nomips16_decl_p (decl))
1164 return mips_base_mips16;
1167 /* Return 0 if the attributes for two types are incompatible, 1 if they
1168 are compatible, and 2 if they are nearly compatible (which causes a
1169 warning to be generated). */
1172 mips_comp_type_attributes (const_tree type1, const_tree type2)
1174 /* Check for mismatch of non-default calling convention. */
1175 if (TREE_CODE (type1) != FUNCTION_TYPE)
1178 /* Disallow mixed near/far attributes. */
1179 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1181 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1187 /* Implement TARGET_INSERT_ATTRIBUTES. */
1190 mips_insert_attributes (tree decl, tree *attributes)
1193 bool mips16_p, nomips16_p;
1195 /* Check for "mips16" and "nomips16" attributes. */
1196 mips16_p = lookup_attribute ("mips16", *attributes) != NULL;
1197 nomips16_p = lookup_attribute ("nomips16", *attributes) != NULL;
1198 if (TREE_CODE (decl) != FUNCTION_DECL)
1201 error ("%qs attribute only applies to functions", "mips16");
1203 error ("%qs attribute only applies to functions", "nomips16");
1207 mips16_p |= mips_mips16_decl_p (decl);
1208 nomips16_p |= mips_nomips16_decl_p (decl);
1209 if (mips16_p || nomips16_p)
1211 /* DECL cannot be simultaneously mips16 and nomips16. */
1212 if (mips16_p && nomips16_p)
1213 error ("%qs cannot have both %<mips16%> and "
1214 "%<nomips16%> attributes",
1215 IDENTIFIER_POINTER (DECL_NAME (decl)));
1217 else if (TARGET_FLIP_MIPS16 && !DECL_ARTIFICIAL (decl))
1219 /* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
1220 "mips16" attribute, arbitrarily pick one. We must pick the same
1221 setting for duplicate declarations of a function. */
1222 name = mflip_mips16_use_mips16_p (decl) ? "mips16" : "nomips16";
1223 *attributes = tree_cons (get_identifier (name), NULL, *attributes);
1228 /* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
1231 mips_merge_decl_attributes (tree olddecl, tree newdecl)
1233 /* The decls' "mips16" and "nomips16" attributes must match exactly. */
1234 if (mips_mips16_decl_p (olddecl) != mips_mips16_decl_p (newdecl))
1235 error ("%qs redeclared with conflicting %qs attributes",
1236 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "mips16");
1237 if (mips_nomips16_decl_p (olddecl) != mips_nomips16_decl_p (newdecl))
1238 error ("%qs redeclared with conflicting %qs attributes",
1239 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "nomips16");
1241 return merge_attributes (DECL_ATTRIBUTES (olddecl),
1242 DECL_ATTRIBUTES (newdecl));
1245 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1246 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1249 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1251 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1253 *base_ptr = XEXP (x, 0);
1254 *offset_ptr = INTVAL (XEXP (x, 1));
1263 static unsigned int mips_build_integer (struct mips_integer_op *,
1264 unsigned HOST_WIDE_INT);
1266 /* Subroutine of mips_build_integer (with the same interface).
1267 Assume that the final action in the sequence should be a left shift. */
1270 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
1272 unsigned int i, shift;
1274 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
1275 since signed numbers are easier to load than unsigned ones. */
1277 while ((value & 1) == 0)
1278 value /= 2, shift++;
1280 i = mips_build_integer (codes, value);
1281 codes[i].code = ASHIFT;
1282 codes[i].value = shift;
1287 /* As for mips_build_shift, but assume that the final action will be
1288 an IOR or PLUS operation. */
1291 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
1293 unsigned HOST_WIDE_INT high;
1296 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
1297 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
1299 /* The constant is too complex to load with a simple lui/ori pair
1300 so our goal is to clear as many trailing zeros as possible.
1301 In this case, we know bit 16 is set and that the low 16 bits
1302 form a negative number. If we subtract that number from VALUE,
1303 we will clear at least the lowest 17 bits, maybe more. */
1304 i = mips_build_integer (codes, CONST_HIGH_PART (value));
1305 codes[i].code = PLUS;
1306 codes[i].value = CONST_LOW_PART (value);
1310 i = mips_build_integer (codes, high);
1311 codes[i].code = IOR;
1312 codes[i].value = value & 0xffff;
1318 /* Fill CODES with a sequence of rtl operations to load VALUE.
1319 Return the number of operations needed. */
1322 mips_build_integer (struct mips_integer_op *codes,
1323 unsigned HOST_WIDE_INT value)
1325 if (SMALL_OPERAND (value)
1326 || SMALL_OPERAND_UNSIGNED (value)
1327 || LUI_OPERAND (value))
1329 /* The value can be loaded with a single instruction. */
1330 codes[0].code = UNKNOWN;
1331 codes[0].value = value;
1334 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
1336 /* Either the constant is a simple LUI/ORI combination or its
1337 lowest bit is set. We don't want to shift in this case. */
1338 return mips_build_lower (codes, value);
1340 else if ((value & 0xffff) == 0)
1342 /* The constant will need at least three actions. The lowest
1343 16 bits are clear, so the final action will be a shift. */
1344 return mips_build_shift (codes, value);
1348 /* The final action could be a shift, add or inclusive OR.
1349 Rather than use a complex condition to select the best
1350 approach, try both mips_build_shift and mips_build_lower
1351 and pick the one that gives the shortest sequence.
1352 Note that this case is only used once per constant. */
1353 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
1354 unsigned int cost, alt_cost;
1356 cost = mips_build_shift (codes, value);
1357 alt_cost = mips_build_lower (alt_codes, value);
1358 if (alt_cost < cost)
1360 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
1367 /* Return true if X is a thread-local symbol. */
1370 mips_tls_operand_p (rtx x)
1372 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1375 /* Return true if SYMBOL_REF X is associated with a global symbol
1376 (in the STB_GLOBAL sense). */
1379 mips_global_symbol_p (const_rtx x)
1381 const_tree const decl = SYMBOL_REF_DECL (x);
1384 return !SYMBOL_REF_LOCAL_P (x);
1386 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1387 or weak symbols. Relocations in the object file will be against
1388 the target symbol, so it's that symbol's binding that matters here. */
1389 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1392 /* Return true if SYMBOL_REF X binds locally. */
1395 mips_symbol_binds_local_p (const_rtx x)
1397 return (SYMBOL_REF_DECL (x)
1398 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1399 : SYMBOL_REF_LOCAL_P (x));
1402 /* Return true if rtx constants of mode MODE should be put into a small
1406 mips_rtx_constant_in_small_data_p (enum machine_mode mode)
1408 return (!TARGET_EMBEDDED_DATA
1409 && TARGET_LOCAL_SDATA
1410 && GET_MODE_SIZE (mode) <= mips_section_threshold);
1413 /* Return true if X should not be moved directly into register $25.
1414 We need this because many versions of GAS will treat "la $25,foo" as
1415 part of a call sequence and so allow a global "foo" to be lazily bound. */
1418 mips_dangerous_for_la25_p (rtx x)
1420 return (!TARGET_EXPLICIT_RELOCS
1422 && GET_CODE (x) == SYMBOL_REF
1423 && mips_global_symbol_p (x));
1426 /* Return the method that should be used to access SYMBOL_REF or
1427 LABEL_REF X in context CONTEXT. */
1429 static enum mips_symbol_type
1430 mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
1433 return SYMBOL_GOT_DISP;
1435 if (GET_CODE (x) == LABEL_REF)
1437 /* LABEL_REFs are used for jump tables as well as text labels.
1438 Only return SYMBOL_PC_RELATIVE if we know the label is in
1439 the text section. */
1440 if (TARGET_MIPS16_SHORT_JUMP_TABLES)
1441 return SYMBOL_PC_RELATIVE;
1442 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1443 return SYMBOL_GOT_PAGE_OFST;
1444 return SYMBOL_ABSOLUTE;
1447 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1449 if (SYMBOL_REF_TLS_MODEL (x))
1452 if (CONSTANT_POOL_ADDRESS_P (x))
1454 if (TARGET_MIPS16_TEXT_LOADS)
1455 return SYMBOL_PC_RELATIVE;
1457 if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
1458 return SYMBOL_PC_RELATIVE;
1460 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
1461 return SYMBOL_GP_RELATIVE;
1464 /* Do not use small-data accesses for weak symbols; they may end up
1467 && SYMBOL_REF_SMALL_P (x)
1468 && !SYMBOL_REF_WEAK (x))
1469 return SYMBOL_GP_RELATIVE;
1471 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1474 && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1476 /* There are three cases to consider:
1478 - o32 PIC (either with or without explicit relocs)
1479 - n32/n64 PIC without explicit relocs
1480 - n32/n64 PIC with explicit relocs
1482 In the first case, both local and global accesses will use an
1483 R_MIPS_GOT16 relocation. We must correctly predict which of
1484 the two semantics (local or global) the assembler and linker
1485 will apply. The choice depends on the symbol's binding rather
1486 than its visibility.
1488 In the second case, the assembler will not use R_MIPS_GOT16
1489 relocations, but it chooses between local and global accesses
1490 in the same way as for o32 PIC.
1492 In the third case we have more freedom since both forms of
1493 access will work for any kind of symbol. However, there seems
1494 little point in doing things differently. */
1495 if (mips_global_symbol_p (x))
1496 return SYMBOL_GOT_DISP;
1498 return SYMBOL_GOT_PAGE_OFST;
1501 if (TARGET_MIPS16_PCREL_LOADS && context != SYMBOL_CONTEXT_CALL)
1502 return SYMBOL_FORCE_TO_MEM;
1503 return SYMBOL_ABSOLUTE;
1506 /* Classify symbolic expression X, given that it appears in context
1509 static enum mips_symbol_type
1510 mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
1514 split_const (x, &x, &offset);
1515 if (UNSPEC_ADDRESS_P (x))
1516 return UNSPEC_ADDRESS_TYPE (x);
1518 return mips_classify_symbol (x, context);
1521 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1522 is the alignment (in bytes) of SYMBOL_REF X. */
1525 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1527 /* If for some reason we can't get the alignment for the
1528 symbol, initializing this to one means we will only accept
1530 HOST_WIDE_INT align = 1;
1533 /* Get the alignment of the symbol we're referring to. */
1534 t = SYMBOL_REF_DECL (x);
1536 align = DECL_ALIGN_UNIT (t);
1538 return offset >= 0 && offset < align;
1541 /* Return true if X is a symbolic constant that can be used in context
1542 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1545 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1546 enum mips_symbol_type *symbol_type)
1550 split_const (x, &x, &offset);
1551 if (UNSPEC_ADDRESS_P (x))
1553 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1554 x = UNSPEC_ADDRESS (x);
1556 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1558 *symbol_type = mips_classify_symbol (x, context);
1559 if (*symbol_type == SYMBOL_TLS)
1565 if (offset == const0_rtx)
1568 /* Check whether a nonzero offset is valid for the underlying
1570 switch (*symbol_type)
1572 case SYMBOL_ABSOLUTE:
1573 case SYMBOL_FORCE_TO_MEM:
1574 case SYMBOL_32_HIGH:
1575 case SYMBOL_64_HIGH:
1578 /* If the target has 64-bit pointers and the object file only
1579 supports 32-bit symbols, the values of those symbols will be
1580 sign-extended. In this case we can't allow an arbitrary offset
1581 in case the 32-bit value X + OFFSET has a different sign from X. */
1582 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1583 return offset_within_block_p (x, INTVAL (offset));
1585 /* In other cases the relocations can handle any offset. */
1588 case SYMBOL_PC_RELATIVE:
1589 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1590 In this case, we no longer have access to the underlying constant,
1591 but the original symbol-based access was known to be valid. */
1592 if (GET_CODE (x) == LABEL_REF)
1597 case SYMBOL_GP_RELATIVE:
1598 /* Make sure that the offset refers to something within the
1599 same object block. This should guarantee that the final
1600 PC- or GP-relative offset is within the 16-bit limit. */
1601 return offset_within_block_p (x, INTVAL (offset));
1603 case SYMBOL_GOT_PAGE_OFST:
1604 case SYMBOL_GOTOFF_PAGE:
1605 /* If the symbol is global, the GOT entry will contain the symbol's
1606 address, and we will apply a 16-bit offset after loading it.
1607 If the symbol is local, the linker should provide enough local
1608 GOT entries for a 16-bit offset, but larger offsets may lead
1610 return SMALL_INT (offset);
1614 /* There is no carry between the HI and LO REL relocations, so the
1615 offset is only valid if we know it won't lead to such a carry. */
1616 return mips_offset_within_alignment_p (x, INTVAL (offset));
1618 case SYMBOL_GOT_DISP:
1619 case SYMBOL_GOTOFF_DISP:
1620 case SYMBOL_GOTOFF_CALL:
1621 case SYMBOL_GOTOFF_LOADGP:
1624 case SYMBOL_GOTTPREL:
1632 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1633 single instruction. We rely on the fact that, in the worst case,
1634 all instructions involved in a MIPS16 address calculation are usually
1638 mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
1642 case SYMBOL_ABSOLUTE:
1643 /* When using 64-bit symbols, we need 5 preparatory instructions,
1646 lui $at,%highest(symbol)
1647 daddiu $at,$at,%higher(symbol)
1649 daddiu $at,$at,%hi(symbol)
1652 The final address is then $at + %lo(symbol). With 32-bit
1653 symbols we just need a preparatory lui for normal mode and
1654 a preparatory "li; sll" for MIPS16. */
1655 return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
1657 case SYMBOL_GP_RELATIVE:
1658 /* Treat GP-relative accesses as taking a single instruction on
1659 MIPS16 too; the copy of $gp can often be shared. */
1662 case SYMBOL_PC_RELATIVE:
1663 /* PC-relative constants can be only be used with addiupc,
1665 if (mode == MAX_MACHINE_MODE
1666 || GET_MODE_SIZE (mode) == 4
1667 || GET_MODE_SIZE (mode) == 8)
1670 /* The constant must be loaded using addiupc first. */
1673 case SYMBOL_FORCE_TO_MEM:
1674 /* LEAs will be converted into constant-pool references by
1676 if (mode == MAX_MACHINE_MODE)
1679 /* The constant must be loaded from the constant pool. */
1682 case SYMBOL_GOT_DISP:
1683 /* The constant will have to be loaded from the GOT before it
1684 is used in an address. */
1685 if (mode != MAX_MACHINE_MODE)
1690 case SYMBOL_GOT_PAGE_OFST:
1691 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1692 the local/global classification is accurate. See override_options
1695 The worst cases are:
1697 (1) For local symbols when generating o32 or o64 code. The assembler
1703 ...and the final address will be $at + %lo(symbol).
1705 (2) For global symbols when -mxgot. The assembler will use:
1707 lui $at,%got_hi(symbol)
1710 ...and the final address will be $at + %got_lo(symbol). */
1713 case SYMBOL_GOTOFF_PAGE:
1714 case SYMBOL_GOTOFF_DISP:
1715 case SYMBOL_GOTOFF_CALL:
1716 case SYMBOL_GOTOFF_LOADGP:
1717 case SYMBOL_32_HIGH:
1718 case SYMBOL_64_HIGH:
1724 case SYMBOL_GOTTPREL:
1727 /* A 16-bit constant formed by a single relocation, or a 32-bit
1728 constant formed from a high 16-bit relocation and a low 16-bit
1729 relocation. Use mips_split_p to determine which. */
1730 return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
1733 /* We don't treat a bare TLS symbol as a constant. */
1739 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1740 to load symbols of type TYPE into a register. Return 0 if the given
1741 type of symbol cannot be used as an immediate operand.
1743 Otherwise, return the number of instructions needed to load or store
1744 values of mode MODE to or from addresses of type TYPE. Return 0 if
1745 the given type of symbol is not valid in addresses.
1747 In both cases, treat extended MIPS16 instructions as two instructions. */
1750 mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
1752 return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
1755 /* Return true if X can not be forced into a constant pool. */
1758 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1760 return mips_tls_operand_p (*x);
1763 /* Return true if X can not be forced into a constant pool. */
1766 mips_cannot_force_const_mem (rtx x)
1772 /* As an optimization, reject constants that mips_legitimize_move
1775 Suppose we have a multi-instruction sequence that loads constant C
1776 into register R. If R does not get allocated a hard register, and
1777 R is used in an operand that allows both registers and memory
1778 references, reload will consider forcing C into memory and using
1779 one of the instruction's memory alternatives. Returning false
1780 here will force it to use an input reload instead. */
1781 if (GET_CODE (x) == CONST_INT)
1784 split_const (x, &base, &offset);
1785 if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
1789 if (for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
1795 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
1796 constants when we're using a per-function constant pool. */
1799 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1800 const_rtx x ATTRIBUTE_UNUSED)
1802 return !TARGET_MIPS16_PCREL_LOADS;
1805 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1808 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1810 if (!HARD_REGISTER_NUM_P (regno))
1814 regno = reg_renumber[regno];
1817 /* These fake registers will be eliminated to either the stack or
1818 hard frame pointer, both of which are usually valid base registers.
1819 Reload deals with the cases where the eliminated form isn't valid. */
1820 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1823 /* In mips16 mode, the stack pointer can only address word and doubleword
1824 values, nothing smaller. There are two problems here:
1826 (a) Instantiating virtual registers can introduce new uses of the
1827 stack pointer. If these virtual registers are valid addresses,
1828 the stack pointer should be too.
1830 (b) Most uses of the stack pointer are not made explicit until
1831 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1832 We don't know until that stage whether we'll be eliminating to the
1833 stack pointer (which needs the restriction) or the hard frame
1834 pointer (which doesn't).
1836 All in all, it seems more consistent to only enforce this restriction
1837 during and after reload. */
1838 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1839 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1841 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1845 /* Return true if X is a valid base register for the given mode.
1846 Allow only hard registers if STRICT. */
1849 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1851 if (!strict && GET_CODE (x) == SUBREG)
1855 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1859 /* Return true if X is a valid address for machine mode MODE. If it is,
1860 fill in INFO appropriately. STRICT is true if we should only accept
1861 hard base registers. */
1864 mips_classify_address (struct mips_address_info *info, rtx x,
1865 enum machine_mode mode, int strict)
1867 switch (GET_CODE (x))
1871 info->type = ADDRESS_REG;
1873 info->offset = const0_rtx;
1874 return mips_valid_base_register_p (info->reg, mode, strict);
1877 info->type = ADDRESS_REG;
1878 info->reg = XEXP (x, 0);
1879 info->offset = XEXP (x, 1);
1880 return (mips_valid_base_register_p (info->reg, mode, strict)
1881 && const_arith_operand (info->offset, VOIDmode));
1884 info->type = ADDRESS_LO_SUM;
1885 info->reg = XEXP (x, 0);
1886 info->offset = XEXP (x, 1);
1887 /* We have to trust the creator of the LO_SUM to do something vaguely
1888 sane. Target-independent code that creates a LO_SUM should also
1889 create and verify the matching HIGH. Target-independent code that
1890 adds an offset to a LO_SUM must prove that the offset will not
1891 induce a carry. Failure to do either of these things would be
1892 a bug, and we are not required to check for it here. The MIPS
1893 backend itself should only create LO_SUMs for valid symbolic
1894 constants, with the high part being either a HIGH or a copy
1897 = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
1898 return (mips_valid_base_register_p (info->reg, mode, strict)
1899 && mips_symbol_insns (info->symbol_type, mode) > 0
1900 && mips_lo_relocs[info->symbol_type] != 0);
1903 /* Small-integer addresses don't occur very often, but they
1904 are legitimate if $0 is a valid base register. */
1905 info->type = ADDRESS_CONST_INT;
1906 return !TARGET_MIPS16 && SMALL_INT (x);
1911 info->type = ADDRESS_SYMBOLIC;
1912 return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
1914 && mips_symbol_insns (info->symbol_type, mode) > 0
1915 && !mips_split_p[info->symbol_type]);
1922 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
1923 returns a nonzero value if X is a legitimate address for a memory
1924 operand of the indicated MODE. STRICT is nonzero if this function
1925 is called during reload. */
1928 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1930 struct mips_address_info addr;
1932 return mips_classify_address (&addr, x, mode, strict);
1935 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1938 mips_stack_address_p (rtx x, enum machine_mode mode)
1940 struct mips_address_info addr;
1942 return (mips_classify_address (&addr, x, mode, false)
1943 && addr.type == ADDRESS_REG
1944 && addr.reg == stack_pointer_rtx);
1947 /* Return true if ADDR matches the pattern for the lwxs load scaled indexed
1948 address instruction. */
1951 mips_lwxs_address_p (rtx addr)
1954 && GET_CODE (addr) == PLUS
1955 && REG_P (XEXP (addr, 1)))
1957 rtx offset = XEXP (addr, 0);
1958 if (GET_CODE (offset) == MULT
1959 && REG_P (XEXP (offset, 0))
1960 && GET_CODE (XEXP (offset, 1)) == CONST_INT
1961 && INTVAL (XEXP (offset, 1)) == 4)
1967 /* Return true if a value at OFFSET bytes from BASE can be accessed
1968 using an unextended mips16 instruction. MODE is the mode of the
1971 Usually the offset in an unextended instruction is a 5-bit field.
1972 The offset is unsigned and shifted left once for HIs, twice
1973 for SIs, and so on. An exception is SImode accesses off the
1974 stack pointer, which have an 8-bit immediate field. */
1977 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
1980 && GET_CODE (offset) == CONST_INT
1981 && INTVAL (offset) >= 0
1982 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
1984 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1985 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
1986 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
1992 /* Return the number of instructions needed to load or store a value
1993 of mode MODE at X. Return 0 if X isn't valid for MODE. Assume that
1994 multiword moves may need to be split into word moves if MIGHT_SPLIT_P,
1995 otherwise assume that a single load or store is enough.
1997 For mips16 code, count extended instructions as two instructions. */
2000 mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
2002 struct mips_address_info addr;
2005 /* BLKmode is used for single unaligned loads and stores and should
2006 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
2007 meaningless, so we have to single it out as a special case one way
2009 if (mode != BLKmode && might_split_p)
2010 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2014 if (mips_classify_address (&addr, x, mode, false))
2019 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
2023 case ADDRESS_LO_SUM:
2024 return (TARGET_MIPS16 ? factor * 2 : factor);
2026 case ADDRESS_CONST_INT:
2029 case ADDRESS_SYMBOLIC:
2030 return factor * mips_symbol_insns (addr.symbol_type, mode);
2036 /* Likewise for constant X. */
2039 mips_const_insns (rtx x)
2041 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2042 enum mips_symbol_type symbol_type;
2045 switch (GET_CODE (x))
2048 if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2050 || !mips_split_p[symbol_type])
2053 /* This is simply an lui for normal mode. It is an extended
2054 "li" followed by an extended "sll" for MIPS16. */
2055 return TARGET_MIPS16 ? 4 : 1;
2059 /* Unsigned 8-bit constants can be loaded using an unextended
2060 LI instruction. Unsigned 16-bit constants can be loaded
2061 using an extended LI. Negative constants must be loaded
2062 using LI and then negated. */
2063 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
2064 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2065 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
2066 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2069 return mips_build_integer (codes, INTVAL (x));
2073 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
2079 /* See if we can refer to X directly. */
2080 if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2081 return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2083 /* Otherwise try splitting the constant into a base and offset.
2084 16-bit offsets can be added using an extra addiu. Larger offsets
2085 must be calculated separately and then added to the base. */
2086 split_const (x, &x, &offset);
2089 int n = mips_const_insns (x);
2092 if (SMALL_INT (offset))
2095 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2102 return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2111 /* Return the number of instructions needed to implement INSN,
2112 given that it loads from or stores to MEM. Count extended
2113 mips16 instructions as two instructions. */
2116 mips_load_store_insns (rtx mem, rtx insn)
2118 enum machine_mode mode;
2122 gcc_assert (MEM_P (mem));
2123 mode = GET_MODE (mem);
2125 /* Try to prove that INSN does not need to be split. */
2126 might_split_p = true;
2127 if (GET_MODE_BITSIZE (mode) == 64)
2129 set = single_set (insn);
2130 if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
2131 might_split_p = false;
2134 return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2138 /* Return the number of instructions needed for an integer division. */
2141 mips_idiv_insns (void)
2146 if (TARGET_CHECK_ZERO_DIV)
2148 if (GENERATE_DIVIDE_TRAPS)
2154 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2159 /* Emit a move from SRC to DEST. Assume that the move expanders can
2160 handle all moves if !can_create_pseudo_p (). The distinction is
2161 important because, unlike emit_move_insn, the move expanders know
2162 how to force Pmode objects into the constant pool even when the
2163 constant pool address is not itself legitimate. */
2166 mips_emit_move (rtx dest, rtx src)
2168 return (can_create_pseudo_p ()
2169 ? emit_move_insn (dest, src)
2170 : emit_move_insn_1 (dest, src));
2173 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
2176 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
2178 emit_insn (gen_rtx_SET (VOIDmode, target,
2179 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
2182 /* Copy VALUE to a register and return that register. If new psuedos
2183 are allowed, copy it into a new register, otherwise use DEST. */
2186 mips_force_temporary (rtx dest, rtx value)
2188 if (can_create_pseudo_p ())
2189 return force_reg (Pmode, value);
2192 mips_emit_move (copy_rtx (dest), value);
2197 /* If we can access small data directly (using gp-relative relocation
2198 operators) return the small data pointer, otherwise return null.
2200 For each mips16 function which refers to GP relative symbols, we
2201 use a pseudo register, initialized at the start of the function, to
2202 hold the $gp value. */
2205 mips16_gp_pseudo_reg (void)
2207 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
2208 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
2210 /* Don't initialize the pseudo register if we are being called from
2211 the tree optimizers' cost-calculation routines. */
2212 if (!cfun->machine->initialized_mips16_gp_pseudo_p
2213 && (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl))
2217 /* We want to initialize this to a value which gcc will believe
2219 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
2221 push_topmost_sequence ();
2222 /* We need to emit the initialization after the FUNCTION_BEG
2223 note, so that it will be integrated. */
2224 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
2226 && NOTE_KIND (scan) == NOTE_INSN_FUNCTION_BEG)
2228 if (scan == NULL_RTX)
2229 scan = get_insns ();
2230 insn = emit_insn_after (insn, scan);
2231 pop_topmost_sequence ();
2233 cfun->machine->initialized_mips16_gp_pseudo_p = true;
2236 return cfun->machine->mips16_gp_pseudo_rtx;
2239 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2240 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2241 constant in that context and can be split into a high part and a LO_SUM.
2242 If so, and if LO_SUM_OUT is nonnull, emit the high part and return
2243 the LO_SUM in *LO_SUM_OUT. Leave *LO_SUM_OUT unchanged otherwise.
2245 TEMP is as for mips_force_temporary and is used to load the high
2246 part into a register. */
2249 mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *lo_sum_out)
2251 enum mips_symbol_context context;
2252 enum mips_symbol_type symbol_type;
2255 context = (mode == MAX_MACHINE_MODE
2256 ? SYMBOL_CONTEXT_LEA
2257 : SYMBOL_CONTEXT_MEM);
2258 if (!mips_symbolic_constant_p (addr, context, &symbol_type)
2259 || mips_symbol_insns (symbol_type, mode) == 0
2260 || !mips_split_p[symbol_type])
2265 if (symbol_type == SYMBOL_GP_RELATIVE)
2267 if (!can_create_pseudo_p ())
2269 emit_insn (gen_load_const_gp (copy_rtx (temp)));
2273 high = mips16_gp_pseudo_reg ();
2277 high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2278 high = mips_force_temporary (temp, high);
2280 *lo_sum_out = gen_rtx_LO_SUM (Pmode, high, addr);
2286 /* Wrap symbol or label BASE in an unspec address of type SYMBOL_TYPE
2287 and add CONST_INT OFFSET to the result. */
2290 mips_unspec_address_offset (rtx base, rtx offset,
2291 enum mips_symbol_type symbol_type)
2293 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2294 UNSPEC_ADDRESS_FIRST + symbol_type);
2295 if (offset != const0_rtx)
2296 base = gen_rtx_PLUS (Pmode, base, offset);
2297 return gen_rtx_CONST (Pmode, base);
2300 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2301 type SYMBOL_TYPE. */
2304 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2308 split_const (address, &base, &offset);
2309 return mips_unspec_address_offset (base, offset, symbol_type);
2313 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2314 high part to BASE and return the result. Just return BASE otherwise.
2315 TEMP is available as a temporary register if needed.
2317 The returned expression can be used as the first operand to a LO_SUM. */
2320 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2321 enum mips_symbol_type symbol_type)
2323 if (mips_split_p[symbol_type])
2325 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2326 addr = mips_force_temporary (temp, addr);
2327 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2333 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2334 mips_force_temporary; it is only needed when OFFSET is not a
2338 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2340 if (!SMALL_OPERAND (offset))
2345 /* Load the full offset into a register so that we can use
2346 an unextended instruction for the address itself. */
2347 high = GEN_INT (offset);
2352 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2353 high = GEN_INT (CONST_HIGH_PART (offset));
2354 offset = CONST_LOW_PART (offset);
2356 high = mips_force_temporary (temp, high);
2357 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2359 return plus_constant (reg, offset);
2362 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
2363 referencing, and TYPE is the symbol type to use (either global
2364 dynamic or local dynamic). V0 is an RTX for the return value
2365 location. The entire insn sequence is returned. */
2367 static GTY(()) rtx mips_tls_symbol;
2370 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2372 rtx insn, loc, tga, a0;
2374 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2376 if (!mips_tls_symbol)
2377 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2379 loc = mips_unspec_address (sym, type);
2383 emit_insn (gen_rtx_SET (Pmode, a0,
2384 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2385 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
2386 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
2387 CONST_OR_PURE_CALL_P (insn) = 1;
2388 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
2389 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2390 insn = get_insns ();
2397 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2398 return value will be a valid address and move_operand (either a REG
2402 mips_legitimize_tls_address (rtx loc)
2404 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2405 enum tls_model model;
2409 sorry ("MIPS16 TLS");
2410 return gen_reg_rtx (Pmode);
2413 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2414 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2416 model = SYMBOL_REF_TLS_MODEL (loc);
2417 /* Only TARGET_ABICALLS code can have more than one module; other
2418 code must be be static and should not use a GOT. All TLS models
2419 reduce to local exec in this situation. */
2420 if (!TARGET_ABICALLS)
2421 model = TLS_MODEL_LOCAL_EXEC;
2425 case TLS_MODEL_GLOBAL_DYNAMIC:
2426 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2427 dest = gen_reg_rtx (Pmode);
2428 emit_libcall_block (insn, dest, v0, loc);
2431 case TLS_MODEL_LOCAL_DYNAMIC:
2432 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2433 tmp1 = gen_reg_rtx (Pmode);
2435 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2436 share the LDM result with other LD model accesses. */
2437 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2439 emit_libcall_block (insn, tmp1, v0, eqv);
2441 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2442 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2443 mips_unspec_address (loc, SYMBOL_DTPREL));
2446 case TLS_MODEL_INITIAL_EXEC:
2447 tmp1 = gen_reg_rtx (Pmode);
2448 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2449 if (Pmode == DImode)
2451 emit_insn (gen_tls_get_tp_di (v1));
2452 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2456 emit_insn (gen_tls_get_tp_si (v1));
2457 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2459 dest = gen_reg_rtx (Pmode);
2460 emit_insn (gen_add3_insn (dest, tmp1, v1));
2463 case TLS_MODEL_LOCAL_EXEC:
2464 if (Pmode == DImode)
2465 emit_insn (gen_tls_get_tp_di (v1));
2467 emit_insn (gen_tls_get_tp_si (v1));
2469 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2470 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2471 mips_unspec_address (loc, SYMBOL_TPREL));
2481 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2482 be legitimized in a way that the generic machinery might not expect,
2483 put the new address in *XLOC and return true. MODE is the mode of
2484 the memory being accessed. */
2487 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2489 if (mips_tls_operand_p (*xloc))
2491 *xloc = mips_legitimize_tls_address (*xloc);
2495 /* See if the address can split into a high part and a LO_SUM. */
2496 if (mips_split_symbol (NULL, *xloc, mode, xloc))
2499 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
2501 /* Handle REG + CONSTANT using mips_add_offset. */
2504 reg = XEXP (*xloc, 0);
2505 if (!mips_valid_base_register_p (reg, mode, 0))
2506 reg = copy_to_mode_reg (Pmode, reg);
2507 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
2515 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2518 mips_move_integer (rtx dest, rtx temp, unsigned HOST_WIDE_INT value)
2520 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2521 enum machine_mode mode;
2522 unsigned int i, cost;
2525 mode = GET_MODE (dest);
2526 cost = mips_build_integer (codes, value);
2528 /* Apply each binary operation to X. Invariant: X is a legitimate
2529 source operand for a SET pattern. */
2530 x = GEN_INT (codes[0].value);
2531 for (i = 1; i < cost; i++)
2533 if (!can_create_pseudo_p ())
2535 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2539 x = force_reg (mode, x);
2540 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2543 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2547 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2548 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2552 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2556 /* Split moves of big integers into smaller pieces. */
2557 if (splittable_const_int_operand (src, mode))
2559 mips_move_integer (dest, dest, INTVAL (src));
2563 /* Split moves of symbolic constants into high/low pairs. */
2564 if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
2566 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
2570 if (mips_tls_operand_p (src))
2572 mips_emit_move (dest, mips_legitimize_tls_address (src));
2576 /* If we have (const (plus symbol offset)), and that expression cannot
2577 be forced into memory, load the symbol first and add in the offset.
2578 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
2579 forced into memory, as it usually produces better code. */
2580 split_const (src, &base, &offset);
2581 if (offset != const0_rtx
2582 && (targetm.cannot_force_const_mem (src)
2583 || (!TARGET_MIPS16 && can_create_pseudo_p ())))
2585 base = mips_force_temporary (dest, base);
2586 mips_emit_move (dest, mips_add_offset (0, base, INTVAL (offset)));
2590 src = force_const_mem (mode, src);
2592 /* When using explicit relocs, constant pool references are sometimes
2593 not legitimate addresses. */
2594 mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
2595 mips_emit_move (dest, src);
2599 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2600 sequence that is valid. */
2603 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2605 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2607 mips_emit_move (dest, force_reg (mode, src));
2611 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2612 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2613 && REG_P (src) && MD_REG_P (REGNO (src))
2614 && REG_P (dest) && GP_REG_P (REGNO (dest)))
2616 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2617 if (GET_MODE_SIZE (mode) <= 4)
2618 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
2619 gen_rtx_REG (SImode, REGNO (src)),
2620 gen_rtx_REG (SImode, other_regno)));
2622 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
2623 gen_rtx_REG (DImode, REGNO (src)),
2624 gen_rtx_REG (DImode, other_regno)));
2628 /* We need to deal with constants that would be legitimate
2629 immediate_operands but not legitimate move_operands. */
2630 if (CONSTANT_P (src) && !move_operand (src, mode))
2632 mips_legitimize_const_move (mode, dest, src);
2633 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2639 /* Return true if X in context CONTEXT is a small data address that can
2640 be rewritten as a LO_SUM. */
2643 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
2645 enum mips_symbol_type symbol_type;
2647 return (TARGET_EXPLICIT_RELOCS
2648 && mips_symbolic_constant_p (x, context, &symbol_type)
2649 && symbol_type == SYMBOL_GP_RELATIVE);
2653 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
2654 containing MEM, or null if none. */
2657 mips_small_data_pattern_1 (rtx *loc, void *data)
2659 enum mips_symbol_context context;
2661 if (GET_CODE (*loc) == LO_SUM)
2666 if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
2671 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
2672 return mips_rewrite_small_data_p (*loc, context);
2675 /* Return true if OP refers to small data symbols directly, not through
2679 mips_small_data_pattern_p (rtx op)
2681 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
2684 /* A for_each_rtx callback, used by mips_rewrite_small_data.
2685 DATA is the containing MEM, or null if none. */
2688 mips_rewrite_small_data_1 (rtx *loc, void *data)
2690 enum mips_symbol_context context;
2694 for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
2698 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
2699 if (mips_rewrite_small_data_p (*loc, context))
2700 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
2702 if (GET_CODE (*loc) == LO_SUM)
2708 /* If possible, rewrite OP so that it refers to small data using
2709 explicit relocations. */
2712 mips_rewrite_small_data (rtx op)
2714 op = copy_insn (op);
2715 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
2719 /* We need a lot of little routines to check constant values on the
2720 mips16. These are used to figure out how long the instruction will
2721 be. It would be much better to do this using constraints, but
2722 there aren't nearly enough letters available. */
2725 m16_check_op (rtx op, int low, int high, int mask)
2727 return (GET_CODE (op) == CONST_INT
2728 && INTVAL (op) >= low
2729 && INTVAL (op) <= high
2730 && (INTVAL (op) & mask) == 0);
2734 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2736 return m16_check_op (op, 0x1, 0x8, 0);
2740 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2742 return m16_check_op (op, - 0x8, 0x7, 0);
2746 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2748 return m16_check_op (op, - 0x7, 0x8, 0);
2752 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2754 return m16_check_op (op, - 0x10, 0xf, 0);
2758 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2760 return m16_check_op (op, - 0xf, 0x10, 0);
2764 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2766 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2770 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2772 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2776 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2778 return m16_check_op (op, - 0x80, 0x7f, 0);
2782 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2784 return m16_check_op (op, - 0x7f, 0x80, 0);
2788 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2790 return m16_check_op (op, 0x0, 0xff, 0);
2794 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2796 return m16_check_op (op, - 0xff, 0x0, 0);
2800 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2802 return m16_check_op (op, - 0x1, 0xfe, 0);
2806 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2808 return m16_check_op (op, 0x0, 0xff << 2, 3);
2812 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2814 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2818 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2820 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2824 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2826 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2829 /* The cost of loading values from the constant pool. It should be
2830 larger than the cost of any constant we want to synthesize inline. */
2832 #define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
2834 /* Return the cost of X when used as an operand to the MIPS16 instruction
2835 that implements CODE. Return -1 if there is no such instruction, or if
2836 X is not a valid immediate operand for it. */
2839 mips16_constant_cost (int code, HOST_WIDE_INT x)
2846 /* Shifts by between 1 and 8 bits (inclusive) are unextended,
2847 other shifts are extended. The shift patterns truncate the shift
2848 count to the right size, so there are no out-of-range values. */
2849 if (IN_RANGE (x, 1, 8))
2851 return COSTS_N_INSNS (1);
2854 if (IN_RANGE (x, -128, 127))
2856 if (SMALL_OPERAND (x))
2857 return COSTS_N_INSNS (1);
2861 /* Like LE, but reject the always-true case. */
2865 /* We add 1 to the immediate and use SLT. */
2868 /* We can use CMPI for an xor with an unsigned 16-bit X. */
2871 if (IN_RANGE (x, 0, 255))
2873 if (SMALL_OPERAND_UNSIGNED (x))
2874 return COSTS_N_INSNS (1);
2879 /* Equality comparisons with 0 are cheap. */
2889 /* Return true if there is a non-MIPS16 instruction that implements CODE
2890 and if that instruction accepts X as an immediate operand. */
2893 mips_immediate_operand_p (int code, HOST_WIDE_INT x)
2900 /* All shift counts are truncated to a valid constant. */
2905 /* Likewise rotates, if the target supports rotates at all. */
2911 /* These instructions take 16-bit unsigned immediates. */
2912 return SMALL_OPERAND_UNSIGNED (x);
2917 /* These instructions take 16-bit signed immediates. */
2918 return SMALL_OPERAND (x);
2924 /* The "immediate" forms of these instructions are really
2925 implemented as comparisons with register 0. */
2930 /* Likewise, meaning that the only valid immediate operand is 1. */
2934 /* We add 1 to the immediate and use SLT. */
2935 return SMALL_OPERAND (x + 1);
2938 /* Likewise SLTU, but reject the always-true case. */
2939 return SMALL_OPERAND (x + 1) && x + 1 != 0;
2943 /* The bit position and size are immediate operands. */
2944 return ISA_HAS_EXT_INS;
2947 /* By default assume that $0 can be used for 0. */
2952 /* Return the cost of binary operation X, given that the instruction
2953 sequence for a word-sized or smaller operation has cost SINGLE_COST
2954 and that the sequence of a double-word operation has cost DOUBLE_COST. */
2957 mips_binary_cost (rtx x, int single_cost, int double_cost)
2961 if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
2966 + rtx_cost (XEXP (x, 0), 0)
2967 + rtx_cost (XEXP (x, 1), GET_CODE (x)));
2970 /* Return the cost of floating-point multiplications of mode MODE. */
2973 mips_fp_mult_cost (enum machine_mode mode)
2975 return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf;
2978 /* Return the cost of floating-point divisions of mode MODE. */
2981 mips_fp_div_cost (enum machine_mode mode)
2983 return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf;
2986 /* Return the cost of sign-extending OP to mode MODE, not including the
2987 cost of OP itself. */
2990 mips_sign_extend_cost (enum machine_mode mode, rtx op)
2993 /* Extended loads are as cheap as unextended ones. */
2996 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
2997 /* A sign extension from SImode to DImode in 64-bit mode is free. */
3000 if (ISA_HAS_SEB_SEH || GENERATE_MIPS16E)
3001 /* We can use SEB or SEH. */
3002 return COSTS_N_INSNS (1);
3004 /* We need to use a shift left and a shift right. */
3005 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3008 /* Return the cost of zero-extending OP to mode MODE, not including the
3009 cost of OP itself. */
3012 mips_zero_extend_cost (enum machine_mode mode, rtx op)
3015 /* Extended loads are as cheap as unextended ones. */
3018 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
3019 /* We need a shift left by 32 bits and a shift right by 32 bits. */
3020 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
3022 if (GENERATE_MIPS16E)
3023 /* We can use ZEB or ZEH. */
3024 return COSTS_N_INSNS (1);
3027 /* We need to load 0xff or 0xffff into a register and use AND. */
3028 return COSTS_N_INSNS (GET_MODE (op) == QImode ? 2 : 3);
3030 /* We can use ANDI. */
3031 return COSTS_N_INSNS (1);
3034 /* Implement TARGET_RTX_COSTS. */
3037 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
3039 enum machine_mode mode = GET_MODE (x);
3040 bool float_mode_p = FLOAT_MODE_P (mode);
3044 /* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
3045 appear in the instruction stream, and the cost of a comparison is
3046 really the cost of the branch or scc condition. At the time of
3047 writing, gcc only uses an explicit outer COMPARE code when optabs
3048 is testing whether a constant is expensive enough to force into a
3049 register. We want optabs to pass such constants through the MIPS
3050 expanders instead, so make all constants very cheap here. */
3051 if (outer_code == COMPARE)
3053 gcc_assert (CONSTANT_P (x));
3061 /* Treat *clear_upper32-style ANDs as having zero cost in the
3062 second operand. The cost is entirely in the first operand.
3064 ??? This is needed because we would otherwise try to CSE
3065 the constant operand. Although that's the right thing for
3066 instructions that continue to be a register operation throughout
3067 compilation, it is disastrous for instructions that could
3068 later be converted into a memory operation. */
3070 && outer_code == AND
3071 && UINTVAL (x) == 0xffffffff)
3079 cost = mips16_constant_cost (outer_code, INTVAL (x));
3088 /* When not optimizing for size, we care more about the cost
3089 of hot code, and hot code is often in a loop. If a constant
3090 operand needs to be forced into a register, we will often be
3091 able to hoist the constant load out of the loop, so the load
3092 should not contribute to the cost. */
3094 || mips_immediate_operand_p (outer_code, INTVAL (x)))
3106 if (force_to_mem_operand (x, VOIDmode))
3108 *total = COSTS_N_INSNS (1);
3111 cost = mips_const_insns (x);
3114 /* If the constant is likely to be stored in a GPR, SETs of
3115 single-insn constants are as cheap as register sets; we
3116 never want to CSE them.
3118 Don't reduce the cost of storing a floating-point zero in
3119 FPRs. If we have a zero in an FPR for other reasons, we
3120 can get better cfg-cleanup and delayed-branch results by
3121 using it consistently, rather than using $0 sometimes and
3122 an FPR at other times. Also, moves between floating-point
3123 registers are sometimes cheaper than (D)MTC1 $0. */
3125 && outer_code == SET
3126 && !(float_mode_p && TARGET_HARD_FLOAT))
3128 /* When non-MIPS16 code loads a constant N>1 times, we rarely
3129 want to CSE the constant itself. It is usually better to
3130 have N copies of the last operation in the sequence and one
3131 shared copy of the other operations. (Note that this is
3132 not true for MIPS16 code, where the final operation in the
3133 sequence is often an extended instruction.)
3135 Also, if we have a CONST_INT, we don't know whether it is
3136 for a word or doubleword operation, so we cannot rely on
3137 the result of mips_build_integer. */
3138 else if (!TARGET_MIPS16
3139 && (outer_code == SET || mode == VOIDmode))
3141 *total = COSTS_N_INSNS (cost);
3144 /* The value will need to be fetched from the constant pool. */
3145 *total = CONSTANT_POOL_COST;
3149 /* If the address is legitimate, return the number of
3150 instructions it needs. */
3152 cost = mips_address_insns (addr, mode, true);
3155 *total = COSTS_N_INSNS (cost + 1);
3158 /* Check for a scaled indexed address. */
3159 if (mips_lwxs_address_p (addr))
3161 *total = COSTS_N_INSNS (2);
3164 /* Otherwise use the default handling. */
3168 *total = COSTS_N_INSNS (6);
3172 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
3176 /* Check for a *clear_upper32 pattern and treat it like a zero
3177 extension. See the pattern's comment for details. */
3180 && CONST_INT_P (XEXP (x, 1))
3181 && UINTVAL (XEXP (x, 1)) == 0xffffffff)
3183 *total = (mips_zero_extend_cost (mode, XEXP (x, 0))
3184 + rtx_cost (XEXP (x, 0), 0));
3191 /* Double-word operations use two single-word operations. */
3192 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2));
3200 if (CONSTANT_P (XEXP (x, 1)))
3201 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3203 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12));
3208 *total = mips_cost->fp_add;
3210 *total = COSTS_N_INSNS (4);
3214 /* Low-part immediates need an extended MIPS16 instruction. */
3215 *total = (COSTS_N_INSNS (TARGET_MIPS16 ? 2 : 1)
3216 + rtx_cost (XEXP (x, 0), 0));
3231 /* Branch comparisons have VOIDmode, so use the first operand's
3233 mode = GET_MODE (XEXP (x, 0));
3234 if (FLOAT_MODE_P (mode))
3236 *total = mips_cost->fp_add;
3239 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3244 && ISA_HAS_NMADD_NMSUB
3245 && TARGET_FUSED_MADD
3246 && !HONOR_NANS (mode)
3247 && !HONOR_SIGNED_ZEROS (mode))
3249 /* See if we can use NMADD or NMSUB. See mips.md for the
3250 associated patterns. */
3251 rtx op0 = XEXP (x, 0);
3252 rtx op1 = XEXP (x, 1);
3253 if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
3255 *total = (mips_fp_mult_cost (mode)
3256 + rtx_cost (XEXP (XEXP (op0, 0), 0), 0)
3257 + rtx_cost (XEXP (op0, 1), 0)
3258 + rtx_cost (op1, 0));
3261 if (GET_CODE (op1) == MULT)
3263 *total = (mips_fp_mult_cost (mode)
3265 + rtx_cost (XEXP (op1, 0), 0)
3266 + rtx_cost (XEXP (op1, 1), 0));
3276 && TARGET_FUSED_MADD
3277 && GET_CODE (XEXP (x, 0)) == MULT)
3280 *total = mips_cost->fp_add;
3284 /* Double-word operations require three single-word operations and
3285 an SLTU. The MIPS16 version then needs to move the result of
3286 the SLTU from $24 to a MIPS16 register. */
3287 *total = mips_binary_cost (x, COSTS_N_INSNS (1),
3288 COSTS_N_INSNS (TARGET_MIPS16 ? 5 : 4));
3293 && ISA_HAS_NMADD_NMSUB
3294 && TARGET_FUSED_MADD
3295 && !HONOR_NANS (mode)
3296 && HONOR_SIGNED_ZEROS (mode))
3298 /* See if we can use NMADD or NMSUB. See mips.md for the
3299 associated patterns. */
3300 rtx op = XEXP (x, 0);
3301 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3302 && GET_CODE (XEXP (op, 0)) == MULT)
3304 *total = (mips_fp_mult_cost (mode)
3305 + rtx_cost (XEXP (XEXP (op, 0), 0), 0)
3306 + rtx_cost (XEXP (XEXP (op, 0), 1), 0)
3307 + rtx_cost (XEXP (op, 1), 0));
3313 *total = mips_cost->fp_add;
3315 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
3320 *total = mips_fp_mult_cost (mode);
3321 else if (mode == DImode && !TARGET_64BIT)
3322 /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
3323 where the mulsidi3 always includes an MFHI and an MFLO. */
3324 *total = (optimize_size
3325 ? COSTS_N_INSNS (ISA_HAS_MUL3 ? 7 : 9)
3326 : mips_cost->int_mult_si * 3 + 6);
3327 else if (optimize_size)
3328 *total = (ISA_HAS_MUL3 ? 1 : 2);
3329 else if (mode == DImode)
3330 *total = mips_cost->int_mult_di;
3332 *total = mips_cost->int_mult_si;
3336 /* Check for a reciprocal. */
3337 if (float_mode_p && XEXP (x, 0) == CONST1_RTX (mode))
3340 && flag_unsafe_math_optimizations
3341 && (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT))
3343 /* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
3344 division as being free. */
3345 *total = rtx_cost (XEXP (x, 1), 0);
3350 *total = mips_fp_div_cost (mode) + rtx_cost (XEXP (x, 1), 0);
3360 *total = mips_fp_div_cost (mode);
3369 /* It is our responsibility to make division by a power of 2
3370 as cheap as 2 register additions if we want the division
3371 expanders to be used for such operations; see the setting
3372 of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
3373 should always produce shorter code than using
3374 expand_sdiv2_pow2. */
3376 && CONST_INT_P (XEXP (x, 1))
3377 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
3379 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), 0);
3382 *total = COSTS_N_INSNS (mips_idiv_insns ());
3384 else if (mode == DImode)
3385 *total = mips_cost->int_div_di;
3387 *total = mips_cost->int_div_si;
3391 *total = mips_sign_extend_cost (mode, XEXP (x, 0));
3395 *total = mips_zero_extend_cost (mode, XEXP (x, 0));
3399 case UNSIGNED_FLOAT:
3402 case FLOAT_TRUNCATE:
3403 *total = mips_cost->fp_add;
3411 /* Provide the costs of an addressing mode that contains ADDR.
3412 If ADDR is not a valid address, its cost is irrelevant. */
3415 mips_address_cost (rtx addr)
3417 return mips_address_insns (addr, SImode, false);
3420 /* Return one word of double-word value OP, taking into account the fixed
3421 endianness of certain registers. HIGH_P is true to select the high part,
3422 false to select the low part. */
3425 mips_subword (rtx op, int high_p)
3427 unsigned int byte, offset;
3428 enum machine_mode mode;
3430 mode = GET_MODE (op);
3431 if (mode == VOIDmode)
3434 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
3435 byte = UNITS_PER_WORD;
3439 if (FP_REG_RTX_P (op))
3441 /* Paired FPRs are always ordered little-endian. */
3442 offset = (UNITS_PER_WORD < UNITS_PER_HWFPVALUE ? high_p : byte != 0);
3443 return gen_rtx_REG (word_mode, REGNO (op) + offset);
3447 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
3449 return simplify_gen_subreg (word_mode, op, mode, byte);
3453 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3456 mips_split_64bit_move_p (rtx dest, rtx src)
3461 /* FP->FP moves can be done in a single instruction. */
3462 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
3465 /* Check for floating-point loads and stores. They can be done using
3466 ldc1 and sdc1 on MIPS II and above. */
3469 if (FP_REG_RTX_P (dest) && MEM_P (src))
3471 if (FP_REG_RTX_P (src) && MEM_P (dest))
3478 /* Split a doubleword move from SRC to DEST. On 32-bit targets,
3479 this function handles 64-bit moves for which mips_split_64bit_move_p
3480 holds. For 64-bit targets, this function handles 128-bit moves. */
3483 mips_split_doubleword_move (rtx dest, rtx src)
3485 if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src))
3487 if (!TARGET_64BIT && GET_MODE (dest) == DImode)
3488 emit_insn (gen_move_doubleword_fprdi (dest, src));
3489 else if (!TARGET_64BIT && GET_MODE (dest) == DFmode)
3490 emit_insn (gen_move_doubleword_fprdf (dest, src));
3491 else if (TARGET_64BIT && GET_MODE (dest) == TFmode)
3492 emit_insn (gen_move_doubleword_fprtf (dest, src));
3498 /* The operation can be split into two normal moves. Decide in
3499 which order to do them. */
3502 low_dest = mips_subword (dest, 0);
3503 if (REG_P (low_dest)
3504 && reg_overlap_mentioned_p (low_dest, src))
3506 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3507 mips_emit_move (low_dest, mips_subword (src, 0));
3511 mips_emit_move (low_dest, mips_subword (src, 0));
3512 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3517 /* Return the appropriate instructions to move SRC into DEST. Assume
3518 that SRC is operand 1 and DEST is operand 0. */
3521 mips_output_move (rtx dest, rtx src)
3523 enum rtx_code dest_code, src_code;
3524 enum mips_symbol_type symbol_type;
3527 dest_code = GET_CODE (dest);
3528 src_code = GET_CODE (src);
3529 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
3531 if (dbl_p && mips_split_64bit_move_p (dest, src))
3534 if ((src_code == REG && GP_REG_P (REGNO (src)))
3535 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
3537 if (dest_code == REG)
3539 if (GP_REG_P (REGNO (dest)))
3540 return "move\t%0,%z1";
3542 if (MD_REG_P (REGNO (dest)))
3545 if (DSP_ACC_REG_P (REGNO (dest)))
3547 static char retval[] = "mt__\t%z1,%q0";
3548 retval[2] = reg_names[REGNO (dest)][4];
3549 retval[3] = reg_names[REGNO (dest)][5];
3553 if (FP_REG_P (REGNO (dest)))
3554 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
3556 if (ALL_COP_REG_P (REGNO (dest)))
3558 static char retval[] = "dmtc_\t%z1,%0";
3560 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3561 return (dbl_p ? retval : retval + 1);
3564 if (dest_code == MEM)
3565 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
3567 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3569 if (src_code == REG)
3571 if (DSP_ACC_REG_P (REGNO (src)))
3573 static char retval[] = "mf__\t%0,%q1";
3574 retval[2] = reg_names[REGNO (src)][4];
3575 retval[3] = reg_names[REGNO (src)][5];
3579 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3580 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3582 if (FP_REG_P (REGNO (src)))
3583 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
3585 if (ALL_COP_REG_P (REGNO (src)))
3587 static char retval[] = "dmfc_\t%0,%1";
3589 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3590 return (dbl_p ? retval : retval + 1);
3594 if (src_code == MEM)
3595 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
3597 if (src_code == CONST_INT)
3599 /* Don't use the X format, because that will give out of
3600 range numbers for 64-bit hosts and 32-bit targets. */
3602 return "li\t%0,%1\t\t\t# %X1";
3604 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
3607 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
3611 if (src_code == HIGH)
3612 return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
3614 if (CONST_GP_P (src))
3615 return "move\t%0,%1";
3617 if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
3618 && mips_lo_relocs[symbol_type] != 0)
3620 /* A signed 16-bit constant formed by applying a relocation
3621 operator to a symbolic address. */
3622 gcc_assert (!mips_split_p[symbol_type]);
3623 return "li\t%0,%R1";
3626 if (symbolic_operand (src, VOIDmode))
3628 gcc_assert (TARGET_MIPS16
3629 ? TARGET_MIPS16_TEXT_LOADS
3630 : !TARGET_EXPLICIT_RELOCS);
3631 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
3634 if (src_code == REG && FP_REG_P (REGNO (src)))
3636 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3638 if (GET_MODE (dest) == V2SFmode)
3639 return "mov.ps\t%0,%1";
3641 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
3644 if (dest_code == MEM)
3645 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
3647 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3649 if (src_code == MEM)
3650 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
3652 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3654 static char retval[] = "l_c_\t%0,%1";
3656 retval[1] = (dbl_p ? 'd' : 'w');
3657 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3660 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
3662 static char retval[] = "s_c_\t%1,%0";
3664 retval[1] = (dbl_p ? 'd' : 'w');
3665 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3671 /* Return true if CMP1 is a suitable second operand for relational
3672 operator CODE. See also the *sCC patterns in mips.md. */
3675 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
3681 return reg_or_0_operand (cmp1, VOIDmode);
3685 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3689 return arith_operand (cmp1, VOIDmode);
3692 return sle_operand (cmp1, VOIDmode);
3695 return sleu_operand (cmp1, VOIDmode);
3702 /* Canonicalize LE or LEU comparisons into LT comparisons when
3703 possible to avoid extra instructions or inverting the
3707 mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
3708 enum machine_mode mode)
3710 HOST_WIDE_INT original, plus_one;
3712 if (GET_CODE (*cmp1) != CONST_INT)
3715 original = INTVAL (*cmp1);
3716 plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
3721 if (original < plus_one)
3724 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3733 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3746 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3747 result in TARGET. CMP0 and TARGET are register_operands that have
3748 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3749 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3752 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
3753 rtx target, rtx cmp0, rtx cmp1)
3755 /* First see if there is a MIPS instruction that can do this operation
3756 with CMP1 in its current form. If not, try to canonicalize the
3757 comparison to LT. If that fails, try doing the same for the
3758 inverse operation. If that also fails, force CMP1 into a register
3760 if (mips_relational_operand_ok_p (code, cmp1))
3761 mips_emit_binary (code, target, cmp0, cmp1);
3762 else if (mips_canonicalize_comparison (&code, &cmp1, GET_MODE (target)))
3763 mips_emit_binary (code, target, cmp0, cmp1);
3766 enum rtx_code inv_code = reverse_condition (code);
3767 if (!mips_relational_operand_ok_p (inv_code, cmp1))
3769 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
3770 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
3772 else if (invert_ptr == 0)
3774 rtx inv_target = gen_reg_rtx (GET_MODE (target));
3775 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3776 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3780 *invert_ptr = !*invert_ptr;
3781 mips_emit_binary (inv_code, target, cmp0, cmp1);
3786 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3787 The register will have the same mode as CMP0. */
3790 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3792 if (cmp1 == const0_rtx)
3795 if (uns_arith_operand (cmp1, VOIDmode))
3796 return expand_binop (GET_MODE (cmp0), xor_optab,
3797 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3799 return expand_binop (GET_MODE (cmp0), sub_optab,
3800 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3803 /* Convert *CODE into a code that can be used in a floating-point
3804 scc instruction (c.<cond>.<fmt>). Return true if the values of
3805 the condition code registers will be inverted, with 0 indicating
3806 that the condition holds. */
3809 mips_reverse_fp_cond_p (enum rtx_code *code)
3816 *code = reverse_condition_maybe_unordered (*code);
3824 /* Convert a comparison into something that can be used in a branch or
3825 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3826 being compared and *CODE is the code used to compare them.
3828 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3829 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3830 otherwise any standard branch condition can be used. The standard branch
3833 - EQ/NE between two registers.
3834 - any comparison between a register and zero. */
3837 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3839 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3841 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3843 *op0 = cmp_operands[0];
3844 *op1 = cmp_operands[1];
3846 else if (*code == EQ || *code == NE)
3850 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3855 *op0 = cmp_operands[0];
3856 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3861 /* The comparison needs a separate scc instruction. Store the
3862 result of the scc in *OP0 and compare it against zero. */
3863 bool invert = false;
3864 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3866 mips_emit_int_relational (*code, &invert, *op0,
3867 cmp_operands[0], cmp_operands[1]);
3868 *code = (invert ? EQ : NE);
3871 else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_operands[0])))
3873 *op0 = gen_rtx_REG (CCDSPmode, CCDSP_CC_REGNUM);
3874 mips_emit_binary (*code, *op0, cmp_operands[0], cmp_operands[1]);
3880 enum rtx_code cmp_code;
3882 /* Floating-point tests use a separate c.cond.fmt comparison to
3883 set a condition code register. The branch or conditional move
3884 will then compare that register against zero.
3886 Set CMP_CODE to the code of the comparison instruction and
3887 *CODE to the code that the branch or move should use. */
3889 *code = mips_reverse_fp_cond_p (&cmp_code) ? EQ : NE;
3891 ? gen_reg_rtx (CCmode)
3892 : gen_rtx_REG (CCmode, FPSW_REGNUM));
3894 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
3898 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
3899 Store the result in TARGET and return true if successful.
3901 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
3904 mips_emit_scc (enum rtx_code code, rtx target)
3906 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
3909 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
3910 if (code == EQ || code == NE)
3912 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3913 mips_emit_binary (code, target, zie, const0_rtx);
3916 mips_emit_int_relational (code, 0, target,
3917 cmp_operands[0], cmp_operands[1]);
3921 /* Emit the common code for doing conditional branches.
3922 operand[0] is the label to jump to.
3923 The comparison operands are saved away by cmp{si,di,sf,df}. */
3926 gen_conditional_branch (rtx *operands, enum rtx_code code)
3928 rtx op0, op1, condition;
3930 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
3931 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
3932 emit_jump_insn (gen_condjump (condition, operands[0]));
3937 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
3938 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
3941 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
3942 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
3947 reversed_p = mips_reverse_fp_cond_p (&cond);
3948 cmp_result = gen_reg_rtx (CCV2mode);
3949 emit_insn (gen_scc_ps (cmp_result,
3950 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
3952 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
3955 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
3959 /* Emit the common code for conditional moves. OPERANDS is the array
3960 of operands passed to the conditional move define_expand. */
3963 gen_conditional_move (rtx *operands)
3968 code = GET_CODE (operands[1]);
3969 mips_emit_compare (&code, &op0, &op1, true);
3970 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
3971 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3972 gen_rtx_fmt_ee (code,
3975 operands[2], operands[3])));
3978 /* Emit a conditional trap. OPERANDS is the array of operands passed to
3979 the conditional_trap expander. */
3982 mips_gen_conditional_trap (rtx *operands)
3985 enum rtx_code cmp_code = GET_CODE (operands[0]);
3986 enum machine_mode mode = GET_MODE (cmp_operands[0]);
3988 /* MIPS conditional trap machine instructions don't have GT or LE
3989 flavors, so we must invert the comparison and convert to LT and
3990 GE, respectively. */
3993 case GT: cmp_code = LT; break;
3994 case LE: cmp_code = GE; break;
3995 case GTU: cmp_code = LTU; break;
3996 case LEU: cmp_code = GEU; break;
3999 if (cmp_code == GET_CODE (operands[0]))
4001 op0 = cmp_operands[0];
4002 op1 = cmp_operands[1];
4006 op0 = cmp_operands[1];
4007 op1 = cmp_operands[0];
4009 op0 = force_reg (mode, op0);
4010 if (!arith_operand (op1, mode))
4011 op1 = force_reg (mode, op1);
4013 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
4014 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
4018 /* Argument support functions. */
4020 /* Initialize CUMULATIVE_ARGS for a function. */
4023 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
4024 rtx libname ATTRIBUTE_UNUSED)
4026 static CUMULATIVE_ARGS zero_cum;
4027 tree param, next_param;
4030 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
4032 /* Determine if this function has variable arguments. This is
4033 indicated by the last argument being 'void_type_mode' if there
4034 are no variable arguments. The standard MIPS calling sequence
4035 passes all arguments in the general purpose registers in this case. */
4037 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
4038 param != 0; param = next_param)
4040 next_param = TREE_CHAIN (param);
4041 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
4042 cum->gp_reg_found = 1;
4047 /* Fill INFO with information about a single argument. CUM is the
4048 cumulative state for earlier arguments. MODE is the mode of this
4049 argument and TYPE is its type (if known). NAMED is true if this
4050 is a named (fixed) argument rather than a variable one. */
4053 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4054 tree type, int named, struct mips_arg_info *info)
4056 bool doubleword_aligned_p;
4057 unsigned int num_bytes, num_words, max_regs;
4059 /* Work out the size of the argument. */
4060 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4061 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4063 /* Decide whether it should go in a floating-point register, assuming
4064 one is free. Later code checks for availability.
4066 The checks against UNITS_PER_FPVALUE handle the soft-float and
4067 single-float cases. */
4071 /* The EABI conventions have traditionally been defined in terms
4072 of TYPE_MODE, regardless of the actual type. */
4073 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4074 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4075 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4080 /* Only leading floating-point scalars are passed in
4081 floating-point registers. We also handle vector floats the same
4082 say, which is OK because they are not covered by the standard ABI. */
4083 info->fpr_p = (!cum->gp_reg_found
4084 && cum->arg_number < 2
4085 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
4086 || VECTOR_FLOAT_TYPE_P (type))
4087 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4088 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4089 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4094 /* Scalar and complex floating-point types are passed in
4095 floating-point registers. */
4096 info->fpr_p = (named
4097 && (type == 0 || FLOAT_TYPE_P (type))
4098 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4099 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4100 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4101 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4103 /* ??? According to the ABI documentation, the real and imaginary
4104 parts of complex floats should be passed in individual registers.
4105 The real and imaginary parts of stack arguments are supposed
4106 to be contiguous and there should be an extra word of padding
4109 This has two problems. First, it makes it impossible to use a
4110 single "void *" va_list type, since register and stack arguments
4111 are passed differently. (At the time of writing, MIPSpro cannot
4112 handle complex float varargs correctly.) Second, it's unclear
4113 what should happen when there is only one register free.
4115 For now, we assume that named complex floats should go into FPRs
4116 if there are two FPRs free, otherwise they should be passed in the
4117 same way as a struct containing two floats. */
4119 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4120 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4122 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4123 info->fpr_p = false;
4133 /* See whether the argument has doubleword alignment. */
4134 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
4136 /* Set REG_OFFSET to the register count we're interested in.
4137 The EABI allocates the floating-point registers separately,
4138 but the other ABIs allocate them like integer registers. */
4139 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4143 /* Advance to an even register if the argument is doubleword-aligned. */
4144 if (doubleword_aligned_p)
4145 info->reg_offset += info->reg_offset & 1;
4147 /* Work out the offset of a stack argument. */
4148 info->stack_offset = cum->stack_words;
4149 if (doubleword_aligned_p)
4150 info->stack_offset += info->stack_offset & 1;
4152 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4154 /* Partition the argument between registers and stack. */
4155 info->reg_words = MIN (num_words, max_regs);
4156 info->stack_words = num_words - info->reg_words;
4159 /* INFO describes an argument that is passed in a single-register value.
4160 Return the register it uses, assuming that FPRs are available if
4164 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4166 if (!info->fpr_p || !hard_float_p)
4167 return GP_ARG_FIRST + info->reg_offset;
4168 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4169 /* In o32, the second argument is always passed in $f14
4170 for TARGET_DOUBLE_FLOAT, regardless of whether the
4171 first argument was a word or doubleword. */
4172 return FP_ARG_FIRST + 2;
4174 return FP_ARG_FIRST + info->reg_offset;
4178 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4180 return !TARGET_OLDABI;
4183 /* Implement FUNCTION_ARG. */
4186 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4187 tree type, int named)
4189 struct mips_arg_info info;
4191 /* We will be called with a mode of VOIDmode after the last argument
4192 has been seen. Whatever we return will be passed to the call
4193 insn. If we need a mips16 fp_code, return a REG with the code
4194 stored as the mode. */
4195 if (mode == VOIDmode)
4197 if (TARGET_MIPS16 && cum->fp_code != 0)
4198 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4204 mips_arg_info (cum, mode, type, named, &info);
4206 /* Return straight away if the whole argument is passed on the stack. */
4207 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4211 && TREE_CODE (type) == RECORD_TYPE
4213 && TYPE_SIZE_UNIT (type)
4214 && host_integerp (TYPE_SIZE_UNIT (type), 1)
4217 /* The Irix 6 n32/n64 ABIs say that if any 64-bit chunk of the
4218 structure contains a double in its entirety, then that 64-bit
4219 chunk is passed in a floating point register. */
4222 /* First check to see if there is any such field. */
4223 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4224 if (TREE_CODE (field) == FIELD_DECL
4225 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4226 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4227 && host_integerp (bit_position (field), 0)
4228 && int_bit_position (field) % BITS_PER_WORD == 0)
4233 /* Now handle the special case by returning a PARALLEL
4234 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4235 chunks are passed in registers. */
4237 HOST_WIDE_INT bitpos;
4240 /* assign_parms checks the mode of ENTRY_PARM, so we must
4241 use the actual mode here. */
4242 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4245 field = TYPE_FIELDS (type);
4246 for (i = 0; i < info.reg_words; i++)
4250 for (; field; field = TREE_CHAIN (field))
4251 if (TREE_CODE (field) == FIELD_DECL
4252 && int_bit_position (field) >= bitpos)
4256 && int_bit_position (field) == bitpos
4257 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4258 && !TARGET_SOFT_FLOAT
4259 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4260 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4262 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4265 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4266 GEN_INT (bitpos / BITS_PER_UNIT));
4268 bitpos += BITS_PER_WORD;
4274 /* Handle the n32/n64 conventions for passing complex floating-point
4275 arguments in FPR pairs. The real part goes in the lower register
4276 and the imaginary part goes in the upper register. */
4279 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4282 enum machine_mode inner;
4285 inner = GET_MODE_INNER (mode);
4286 reg = FP_ARG_FIRST + info.reg_offset;
4287 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4289 /* Real part in registers, imaginary part on stack. */
4290 gcc_assert (info.stack_words == info.reg_words);
4291 return gen_rtx_REG (inner, reg);
4295 gcc_assert (info.stack_words == 0);
4296 real = gen_rtx_EXPR_LIST (VOIDmode,
4297 gen_rtx_REG (inner, reg),
4299 imag = gen_rtx_EXPR_LIST (VOIDmode,
4301 reg + info.reg_words / 2),
4302 GEN_INT (GET_MODE_SIZE (inner)));
4303 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4307 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4310 /* Implement FUNCTION_ARG_ADVANCE. */
4313 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4314 tree type, int named)
4316 struct mips_arg_info info;
4318 mips_arg_info (cum, mode, type, named, &info);
4321 cum->gp_reg_found = true;
4323 /* See the comment above the cumulative args structure in mips.h
4324 for an explanation of what this code does. It assumes the O32
4325 ABI, which passes at most 2 arguments in float registers. */
4326 if (cum->arg_number < 2 && info.fpr_p)
4327 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4329 if (mips_abi != ABI_EABI || !info.fpr_p)
4330 cum->num_gprs = info.reg_offset + info.reg_words;
4331 else if (info.reg_words > 0)
4332 cum->num_fprs += MAX_FPRS_PER_FMT;
4334 if (info.stack_words > 0)
4335 cum->stack_words = info.stack_offset + info.stack_words;
4340 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4343 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4344 enum machine_mode mode, tree type, bool named)
4346 struct mips_arg_info info;
4348 mips_arg_info (cum, mode, type, named, &info);
4349 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4353 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4354 PARM_BOUNDARY bits of alignment, but will be given anything up
4355 to STACK_BOUNDARY bits if the type requires it. */
4358 function_arg_boundary (enum machine_mode mode, tree type)
4360 unsigned int alignment;
4362 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4363 if (alignment < PARM_BOUNDARY)
4364 alignment = PARM_BOUNDARY;
4365 if (alignment > STACK_BOUNDARY)
4366 alignment = STACK_BOUNDARY;
4370 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4371 upward rather than downward. In other words, return true if the
4372 first byte of the stack slot has useful data, false if the last
4376 mips_pad_arg_upward (enum machine_mode mode, const_tree type)
4378 /* On little-endian targets, the first byte of every stack argument
4379 is passed in the first byte of the stack slot. */
4380 if (!BYTES_BIG_ENDIAN)
4383 /* Otherwise, integral types are padded downward: the last byte of a
4384 stack argument is passed in the last byte of the stack slot. */
4386 ? (INTEGRAL_TYPE_P (type)
4387 || POINTER_TYPE_P (type)
4388 || FIXED_POINT_TYPE_P (type))
4389 : (GET_MODE_CLASS (mode) == MODE_INT
4390 || ALL_SCALAR_FIXED_POINT_MODE_P (mode)))
4393 /* Big-endian o64 pads floating-point arguments downward. */
4394 if (mips_abi == ABI_O64)
4395 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4398 /* Other types are padded upward for o32, o64, n32 and n64. */
4399 if (mips_abi != ABI_EABI)
4402 /* Arguments smaller than a stack slot are padded downward. */
4403 if (mode != BLKmode)
4404 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
4406 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
4410 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4411 if the least significant byte of the register has useful data. Return
4412 the opposite if the most significant byte does. */
4415 mips_pad_reg_upward (enum machine_mode mode, tree type)
4417 /* No shifting is required for floating-point arguments. */
4418 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4419 return !BYTES_BIG_ENDIAN;
4421 /* Otherwise, apply the same padding to register arguments as we do
4422 to stack arguments. */
4423 return mips_pad_arg_upward (mode, type);
4427 /* Return nonzero when an argument must be passed by reference. */
4430 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4431 enum machine_mode mode, const_tree type,
4432 bool named ATTRIBUTE_UNUSED)
4434 if (mips_abi == ABI_EABI)
4438 /* ??? How should SCmode be handled? */
4439 if (mode == DImode || mode == DFmode
4440 || mode == DQmode || mode == UDQmode
4441 || mode == DAmode || mode == UDAmode)
4444 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4445 return size == -1 || size > UNITS_PER_WORD;
4449 /* If we have a variable-sized parameter, we have no choice. */
4450 return targetm.calls.must_pass_in_stack (mode, type);
4455 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4456 enum machine_mode mode ATTRIBUTE_UNUSED,
4457 const_tree type ATTRIBUTE_UNUSED, bool named)
4459 return mips_abi == ABI_EABI && named;
4462 /* See whether VALTYPE is a record whose fields should be returned in
4463 floating-point registers. If so, return the number of fields and
4464 list them in FIELDS (which should have two elements). Return 0
4467 For n32 & n64, a structure with one or two fields is returned in
4468 floating-point registers as long as every field has a floating-point
4472 mips_fpr_return_fields (const_tree valtype, tree *fields)
4480 if (TREE_CODE (valtype) != RECORD_TYPE)
4484 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
4486 if (TREE_CODE (field) != FIELD_DECL)
4489 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
4495 fields[i++] = field;
4501 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
4502 a value in the most significant part of $2/$3 if:
4504 - the target is big-endian;
4506 - the value has a structure or union type (we generalize this to
4507 cover aggregates from other languages too); and
4509 - the structure is not returned in floating-point registers. */
4512 mips_return_in_msb (const_tree valtype)
4516 return (TARGET_NEWABI
4517 && TARGET_BIG_ENDIAN
4518 && AGGREGATE_TYPE_P (valtype)
4519 && mips_fpr_return_fields (valtype, fields) == 0);
4523 /* Return true if the function return value MODE will get returned in a
4524 floating-point register. */
4527 mips_return_mode_in_fpr_p (enum machine_mode mode)
4529 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
4530 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
4531 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4532 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
4535 /* Return a composite value in a pair of floating-point registers.
4536 MODE1 and OFFSET1 are the mode and byte offset for the first value,
4537 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
4540 For n32 & n64, $f0 always holds the first value and $f2 the second.
4541 Otherwise the values are packed together as closely as possible. */
4544 mips_return_fpr_pair (enum machine_mode mode,
4545 enum machine_mode mode1, HOST_WIDE_INT offset1,
4546 enum machine_mode mode2, HOST_WIDE_INT offset2)
4550 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
4551 return gen_rtx_PARALLEL
4554 gen_rtx_EXPR_LIST (VOIDmode,
4555 gen_rtx_REG (mode1, FP_RETURN),
4557 gen_rtx_EXPR_LIST (VOIDmode,
4558 gen_rtx_REG (mode2, FP_RETURN + inc),
4559 GEN_INT (offset2))));
4564 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
4565 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
4566 VALTYPE is null and MODE is the mode of the return value. */
4569 mips_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
4570 enum machine_mode mode)
4577 mode = TYPE_MODE (valtype);
4578 unsignedp = TYPE_UNSIGNED (valtype);
4580 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
4581 true, we must promote the mode just as PROMOTE_MODE does. */
4582 mode = promote_mode (valtype, mode, &unsignedp, 1);
4584 /* Handle structures whose fields are returned in $f0/$f2. */
4585 switch (mips_fpr_return_fields (valtype, fields))
4588 return gen_rtx_REG (mode, FP_RETURN);
4591 return mips_return_fpr_pair (mode,
4592 TYPE_MODE (TREE_TYPE (fields[0])),
4593 int_byte_position (fields[0]),
4594 TYPE_MODE (TREE_TYPE (fields[1])),
4595 int_byte_position (fields[1]));
4598 /* If a value is passed in the most significant part of a register, see
4599 whether we have to round the mode up to a whole number of words. */
4600 if (mips_return_in_msb (valtype))
4602 HOST_WIDE_INT size = int_size_in_bytes (valtype);
4603 if (size % UNITS_PER_WORD != 0)
4605 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
4606 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
4610 /* For EABI, the class of return register depends entirely on MODE.
4611 For example, "struct { some_type x; }" and "union { some_type x; }"
4612 are returned in the same way as a bare "some_type" would be.
4613 Other ABIs only use FPRs for scalar, complex or vector types. */
4614 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
4615 return gen_rtx_REG (mode, GP_RETURN);
4620 /* Handle long doubles for n32 & n64. */
4622 return mips_return_fpr_pair (mode,
4624 DImode, GET_MODE_SIZE (mode) / 2);
4626 if (mips_return_mode_in_fpr_p (mode))
4628 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4629 return mips_return_fpr_pair (mode,
4630 GET_MODE_INNER (mode), 0,
4631 GET_MODE_INNER (mode),
4632 GET_MODE_SIZE (mode) / 2);
4634 return gen_rtx_REG (mode, FP_RETURN);
4638 return gen_rtx_REG (mode, GP_RETURN);
4641 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
4642 all BLKmode objects are returned in memory. Under the new (N32 and
4643 64-bit MIPS ABIs) small structures are returned in a register.
4644 Objects with varying size must still be returned in memory, of
4648 mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
4651 return (TYPE_MODE (type) == BLKmode);
4653 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
4654 || (int_size_in_bytes (type) == -1));
4658 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4659 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4662 CUMULATIVE_ARGS local_cum;
4663 int gp_saved, fp_saved;
4665 /* The caller has advanced CUM up to, but not beyond, the last named
4666 argument. Advance a local copy of CUM past the last "real" named
4667 argument, to find out how many registers are left over. */
4670 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
4672 /* Found out how many registers we need to save. */
4673 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4674 fp_saved = (EABI_FLOAT_VARARGS_P
4675 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4684 ptr = plus_constant (virtual_incoming_args_rtx,
4685 REG_PARM_STACK_SPACE (cfun->decl)
4686 - gp_saved * UNITS_PER_WORD);
4687 mem = gen_rtx_MEM (BLKmode, ptr);
4688 set_mem_alias_set (mem, get_varargs_alias_set ());
4690 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4695 /* We can't use move_block_from_reg, because it will use
4697 enum machine_mode mode;
4700 /* Set OFF to the offset from virtual_incoming_args_rtx of
4701 the first float register. The FP save area lies below
4702 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4703 off = -gp_saved * UNITS_PER_WORD;
4704 off &= ~(UNITS_PER_FPVALUE - 1);
4705 off -= fp_saved * UNITS_PER_FPREG;
4707 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4709 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
4710 i += MAX_FPRS_PER_FMT)
4714 ptr = plus_constant (virtual_incoming_args_rtx, off);
4715 mem = gen_rtx_MEM (mode, ptr);
4716 set_mem_alias_set (mem, get_varargs_alias_set ());
4717 mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4718 off += UNITS_PER_HWFPVALUE;
4722 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4723 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4724 + fp_saved * UNITS_PER_FPREG);
4727 /* Create the va_list data type.
4728 We keep 3 pointers, and two offsets.
4729 Two pointers are to the overflow area, which starts at the CFA.
4730 One of these is constant, for addressing into the GPR save area below it.
4731 The other is advanced up the stack through the overflow region.
4732 The third pointer is to the GPR save area. Since the FPR save area
4733 is just below it, we can address FPR slots off this pointer.
4734 We also keep two one-byte offsets, which are to be subtracted from the
4735 constant pointers to yield addresses in the GPR and FPR save areas.
4736 These are downcounted as float or non-float arguments are used,
4737 and when they get to zero, the argument must be obtained from the
4739 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4740 pointer is enough. It's started at the GPR save area, and is
4742 Note that the GPR save area is not constant size, due to optimization
4743 in the prologue. Hence, we can't use a design with two pointers
4744 and two offsets, although we could have designed this with two pointers
4745 and three offsets. */
4748 mips_build_builtin_va_list (void)
4750 if (EABI_FLOAT_VARARGS_P)
4752 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4755 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4757 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4759 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4761 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4763 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4764 unsigned_char_type_node);
4765 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4766 unsigned_char_type_node);
4767 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4768 warn on every user file. */
4769 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4770 array = build_array_type (unsigned_char_type_node,
4771 build_index_type (index));
4772 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4774 DECL_FIELD_CONTEXT (f_ovfl) = record;
4775 DECL_FIELD_CONTEXT (f_gtop) = record;
4776 DECL_FIELD_CONTEXT (f_ftop) = record;
4777 DECL_FIELD_CONTEXT (f_goff) = record;
4778 DECL_FIELD_CONTEXT (f_foff) = record;
4779 DECL_FIELD_CONTEXT (f_res) = record;
4781 TYPE_FIELDS (record) = f_ovfl;
4782 TREE_CHAIN (f_ovfl) = f_gtop;
4783 TREE_CHAIN (f_gtop) = f_ftop;
4784 TREE_CHAIN (f_ftop) = f_goff;
4785 TREE_CHAIN (f_goff) = f_foff;
4786 TREE_CHAIN (f_foff) = f_res;
4788 layout_type (record);
4791 else if (TARGET_IRIX && TARGET_IRIX6)
4792 /* On IRIX 6, this type is 'char *'. */
4793 return build_pointer_type (char_type_node);
4795 /* Otherwise, we use 'void *'. */
4796 return ptr_type_node;
4799 /* Implement va_start. */
4802 mips_va_start (tree valist, rtx nextarg)
4804 if (EABI_FLOAT_VARARGS_P)
4806 const CUMULATIVE_ARGS *cum;
4807 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4808 tree ovfl, gtop, ftop, goff, foff;
4810 int gpr_save_area_size;
4811 int fpr_save_area_size;
4814 cum = ¤t_function_args_info;
4816 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
4818 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
4820 f_ovfl = TYPE_FIELDS (va_list_type_node);
4821 f_gtop = TREE_CHAIN (f_ovfl);
4822 f_ftop = TREE_CHAIN (f_gtop);
4823 f_goff = TREE_CHAIN (f_ftop);
4824 f_foff = TREE_CHAIN (f_goff);
4826 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4828 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4830 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4832 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4834 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4837 /* Emit code to initialize OVFL, which points to the next varargs
4838 stack argument. CUM->STACK_WORDS gives the number of stack
4839 words used by named arguments. */
4840 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
4841 if (cum->stack_words > 0)
4842 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
4843 size_int (cum->stack_words * UNITS_PER_WORD));
4844 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4845 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4847 /* Emit code to initialize GTOP, the top of the GPR save area. */
4848 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
4849 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gtop), gtop, t);
4850 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4852 /* Emit code to initialize FTOP, the top of the FPR save area.
4853 This address is gpr_save_area_bytes below GTOP, rounded
4854 down to the next fp-aligned boundary. */
4855 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
4856 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
4857 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
4859 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
4860 size_int (-fpr_offset));
4861 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ftop), ftop, t);
4862 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4864 /* Emit code to initialize GOFF, the offset from GTOP of the
4865 next GPR argument. */
4866 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (goff), goff,
4867 build_int_cst (NULL_TREE, gpr_save_area_size));
4868 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4870 /* Likewise emit code to initialize FOFF, the offset from FTOP
4871 of the next FPR argument. */
4872 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (foff), foff,
4873 build_int_cst (NULL_TREE, fpr_save_area_size));
4874 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4878 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
4879 std_expand_builtin_va_start (valist, nextarg);
4883 /* Implement va_arg. */
4886 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
4888 HOST_WIDE_INT size, rsize;
4892 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
4895 type = build_pointer_type (type);
4897 size = int_size_in_bytes (type);
4898 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
4900 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
4901 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4904 /* Not a simple merged stack. */
4906 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4907 tree ovfl, top, off, align;
4908 HOST_WIDE_INT osize;
4911 f_ovfl = TYPE_FIELDS (va_list_type_node);
4912 f_gtop = TREE_CHAIN (f_ovfl);
4913 f_ftop = TREE_CHAIN (f_gtop);
4914 f_goff = TREE_CHAIN (f_ftop);
4915 f_foff = TREE_CHAIN (f_goff);
4917 /* We maintain separate pointers and offsets for floating-point
4918 and integer arguments, but we need similar code in both cases.
4921 TOP be the top of the register save area;
4922 OFF be the offset from TOP of the next register;
4923 ADDR_RTX be the address of the argument;
4924 RSIZE be the number of bytes used to store the argument
4925 when it's in the register save area;
4926 OSIZE be the number of bytes used to store it when it's
4927 in the stack overflow area; and
4928 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
4930 The code we want is:
4932 1: off &= -rsize; // round down
4935 4: addr_rtx = top - off;
4940 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
4941 10: addr_rtx = ovfl + PADDING;
4945 [1] and [9] can sometimes be optimized away. */
4947 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4950 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
4951 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
4953 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4955 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4958 /* When floating-point registers are saved to the stack,
4959 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
4960 of the float's precision. */
4961 rsize = UNITS_PER_HWFPVALUE;
4963 /* Overflow arguments are padded to UNITS_PER_WORD bytes
4964 (= PARM_BOUNDARY bits). This can be different from RSIZE
4967 (1) On 32-bit targets when TYPE is a structure such as:
4969 struct s { float f; };
4971 Such structures are passed in paired FPRs, so RSIZE
4972 will be 8 bytes. However, the structure only takes
4973 up 4 bytes of memory, so OSIZE will only be 4.
4975 (2) In combinations such as -mgp64 -msingle-float
4976 -fshort-double. Doubles passed in registers
4977 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
4978 but those passed on the stack take up
4979 UNITS_PER_WORD bytes. */
4980 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
4984 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4986 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4988 if (rsize > UNITS_PER_WORD)
4990 /* [1] Emit code for: off &= -rsize. */
4991 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
4992 build_int_cst (NULL_TREE, -rsize));
4993 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (off), off, t);
4994 gimplify_and_add (t, pre_p);
4999 /* [2] Emit code to branch if off == 0. */
5000 t = build2 (NE_EXPR, boolean_type_node, off,
5001 build_int_cst (TREE_TYPE (off), 0));
5002 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
5004 /* [5] Emit code for: off -= rsize. We do this as a form of
5005 post-increment not available to C. Also widen for the
5006 coming pointer arithmetic. */
5007 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
5008 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
5009 t = fold_convert (sizetype, t);
5010 t = fold_build1 (NEGATE_EXPR, sizetype, t);
5012 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
5013 the argument has RSIZE - SIZE bytes of leading padding. */
5014 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
5015 if (BYTES_BIG_ENDIAN && rsize > size)
5017 u = size_int (rsize - size);
5018 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5020 COND_EXPR_THEN (addr) = t;
5022 if (osize > UNITS_PER_WORD)
5024 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
5025 u = size_int (osize - 1);
5026 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
5027 t = fold_convert (sizetype, t);
5028 u = size_int (-osize);
5029 t = build2 (BIT_AND_EXPR, sizetype, t, u);
5030 t = fold_convert (TREE_TYPE (ovfl), t);
5031 align = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
5036 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
5037 post-increment ovfl by osize. On big-endian machines,
5038 the argument has OSIZE - SIZE bytes of leading padding. */
5039 u = fold_convert (TREE_TYPE (ovfl),
5040 build_int_cst (NULL_TREE, osize));
5041 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
5042 if (BYTES_BIG_ENDIAN && osize > size)
5044 u = size_int (osize - size);
5045 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5048 /* String [9] and [10,11] together. */
5050 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
5051 COND_EXPR_ELSE (addr) = t;
5053 addr = fold_convert (build_pointer_type (type), addr);
5054 addr = build_va_arg_indirect_ref (addr);
5058 addr = build_va_arg_indirect_ref (addr);
5063 /* We keep a list of functions for which we have already built stubs
5064 in build_mips16_call_stub. */
5068 struct mips16_stub *next;
5073 static struct mips16_stub *mips16_stubs;
5075 /* Return a two-character string representing a function floating-point
5076 return mode, used to name MIPS16 function stubs. */
5079 mips16_call_stub_mode_suffix (enum machine_mode mode)
5083 else if (mode == DFmode)
5085 else if (mode == SCmode)
5087 else if (mode == DCmode)
5089 else if (mode == V2SFmode)
5095 /* Write out code to move floating point arguments in or out of
5096 general registers. Output the instructions to FILE. FP_CODE is
5097 the code describing which arguments are present (see the comment at
5098 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
5099 we are copying from the floating point registers. */
5102 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
5107 CUMULATIVE_ARGS cum;
5109 /* This code only works for the original 32-bit ABI and the O64 ABI. */
5110 gcc_assert (TARGET_OLDABI);
5117 init_cumulative_args (&cum, NULL, NULL);
5119 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
5121 enum machine_mode mode;
5122 struct mips_arg_info info;
5126 else if ((f & 3) == 2)
5131 mips_arg_info (&cum, mode, NULL, true, &info);
5132 gparg = mips_arg_regno (&info, false);
5133 fparg = mips_arg_regno (&info, true);
5136 fprintf (file, "\t%s\t%s,%s\n", s,
5137 reg_names[gparg], reg_names[fparg]);
5138 else if (TARGET_64BIT)
5139 fprintf (file, "\td%s\t%s,%s\n", s,
5140 reg_names[gparg], reg_names[fparg]);
5141 else if (ISA_HAS_MXHC1)
5142 /* -mips32r2 -mfp64 */
5143 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n",
5145 reg_names[gparg + (WORDS_BIG_ENDIAN ? 1 : 0)],
5147 from_fp_p ? "mfhc1" : "mthc1",
5148 reg_names[gparg + (WORDS_BIG_ENDIAN ? 0 : 1)],
5150 else if (TARGET_BIG_ENDIAN)
5151 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
5152 reg_names[gparg], reg_names[fparg + 1], s,
5153 reg_names[gparg + 1], reg_names[fparg]);
5155 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
5156 reg_names[gparg], reg_names[fparg], s,
5157 reg_names[gparg + 1], reg_names[fparg + 1]);
5159 function_arg_advance (&cum, mode, NULL, true);
5163 /* Build a mips16 function stub. This is used for functions which
5164 take arguments in the floating point registers. It is 32-bit code
5165 that moves the floating point args into the general registers, and
5166 then jumps to the 16-bit code. */
5169 build_mips16_function_stub (FILE *file)
5172 char *secname, *stubname;
5173 tree stubid, stubdecl;
5177 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
5178 fnname = targetm.strip_name_encoding (fnname);
5179 secname = (char *) alloca (strlen (fnname) + 20);
5180 sprintf (secname, ".mips16.fn.%s", fnname);
5181 stubname = (char *) alloca (strlen (fnname) + 20);
5182 sprintf (stubname, "__fn_stub_%s", fnname);
5183 stubid = get_identifier (stubname);
5184 stubdecl = build_decl (FUNCTION_DECL, stubid,
5185 build_function_type (void_type_node, NULL_TREE));
5186 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
5187 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
5189 fprintf (file, "\t# Stub function for %s (", current_function_name ());
5191 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
5193 fprintf (file, "%s%s",
5194 need_comma ? ", " : "",
5195 (f & 3) == 1 ? "float" : "double");
5198 fprintf (file, ")\n");
5200 fprintf (file, "\t.set\tnomips16\n");
5201 switch_to_section (function_section (stubdecl));
5202 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
5204 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
5205 within a .ent, and we cannot emit another .ent. */
5206 if (!FUNCTION_NAME_ALREADY_DECLARED)
5208 fputs ("\t.ent\t", file);
5209 assemble_name (file, stubname);
5213 assemble_name (file, stubname);
5214 fputs (":\n", file);
5216 /* We don't want the assembler to insert any nops here. */
5217 fprintf (file, "\t.set\tnoreorder\n");
5219 mips16_fp_args (file, current_function_args_info.fp_code, 1);
5221 fprintf (asm_out_file, "\t.set\tnoat\n");
5222 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
5223 assemble_name (file, fnname);
5224 fprintf (file, "\n");
5225 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
5226 fprintf (asm_out_file, "\t.set\tat\n");
5228 /* Unfortunately, we can't fill the jump delay slot. We can't fill
5229 with one of the mfc1 instructions, because the result is not
5230 available for one instruction, so if the very first instruction
5231 in the function refers to the register, it will see the wrong
5233 fprintf (file, "\tnop\n");
5235 fprintf (file, "\t.set\treorder\n");
5237 if (!FUNCTION_NAME_ALREADY_DECLARED)
5239 fputs ("\t.end\t", file);
5240 assemble_name (file, stubname);
5244 switch_to_section (function_section (current_function_decl));
5247 /* Emit code to return a double value from a mips16 stub. GPREG is the
5248 first GP reg to use, FPREG is the first FP reg to use. */
5251 mips16_fpret_double (int gpreg, int fpreg)
5254 fprintf (asm_out_file, "\tdmfc1\t%s,%s\n",
5255 reg_names[gpreg], reg_names[fpreg]);
5256 else if (TARGET_FLOAT64)
5258 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
5259 reg_names[gpreg + WORDS_BIG_ENDIAN],
5261 fprintf (asm_out_file, "\tmfhc1\t%s,%s\n",
5262 reg_names[gpreg + !WORDS_BIG_ENDIAN],
5267 if (TARGET_BIG_ENDIAN)
5269 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
5270 reg_names[gpreg + 0],
5271 reg_names[fpreg + 1]);
5272 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
5273 reg_names[gpreg + 1],
5274 reg_names[fpreg + 0]);
5278 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
5279 reg_names[gpreg + 0],
5280 reg_names[fpreg + 0]);
5281 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
5282 reg_names[gpreg + 1],
5283 reg_names[fpreg + 1]);
5288 /* Build a call stub for a mips16 call. A stub is needed if we are
5289 passing any floating point values which should go into the floating
5290 point registers. If we are, and the call turns out to be to a
5291 32-bit function, the stub will be used to move the values into the
5292 floating point registers before calling the 32-bit function. The
5293 linker will magically adjust the function call to either the 16-bit
5294 function or the 32-bit stub, depending upon where the function call
5295 is actually defined.
5297 Similarly, we need a stub if the return value might come back in a
5298 floating point register.
5300 RETVAL is the location of the return value, or null if this is
5301 a call rather than a call_value. FN is the address of the
5302 function and ARG_SIZE is the size of the arguments. FP_CODE
5303 is the code built by function_arg. This function returns a nonzero
5304 value if it builds the call instruction itself. */
5307 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
5311 char *secname, *stubname;
5312 struct mips16_stub *l;
5313 tree stubid, stubdecl;
5318 /* We don't need to do anything if we aren't in mips16 mode, or if
5319 we were invoked with the -msoft-float option. */
5320 if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
5323 /* Figure out whether the value might come back in a floating point
5326 fpret = mips_return_mode_in_fpr_p (GET_MODE (retval));
5328 /* We don't need to do anything if there were no floating point
5329 arguments and the value will not be returned in a floating point
5331 if (fp_code == 0 && ! fpret)
5334 /* We don't need to do anything if this is a call to a special
5335 mips16 support function. */
5336 if (GET_CODE (fn) == SYMBOL_REF
5337 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
5340 /* This code will only work for o32 and o64 abis. The other ABI's
5341 require more sophisticated support. */
5342 gcc_assert (TARGET_OLDABI);
5344 /* If we're calling via a function pointer, then we must always call
5345 via a stub. There are magic stubs provided in libgcc.a for each
5346 of the required cases. Each of them expects the function address
5347 to arrive in register $2. */
5349 if (GET_CODE (fn) != SYMBOL_REF)
5355 /* ??? If this code is modified to support other ABI's, we need
5356 to handle PARALLEL return values here. */
5359 sprintf (buf, "__mips16_call_stub_%s_%d",
5360 mips16_call_stub_mode_suffix (GET_MODE (retval)),
5363 sprintf (buf, "__mips16_call_stub_%d",
5366 id = get_identifier (buf);
5367 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
5369 mips_emit_move (gen_rtx_REG (Pmode, 2), fn);
5371 if (retval == NULL_RTX)
5372 insn = gen_call_internal (stub_fn, arg_size);
5374 insn = gen_call_value_internal (retval, stub_fn, arg_size);
5375 insn = emit_call_insn (insn);
5377 /* Put the register usage information on the CALL. */
5378 CALL_INSN_FUNCTION_USAGE (insn) =
5379 gen_rtx_EXPR_LIST (VOIDmode,
5380 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
5381 CALL_INSN_FUNCTION_USAGE (insn));
5383 /* If we are handling a floating point return value, we need to
5384 save $18 in the function prologue. Putting a note on the
5385 call will mean that df_regs_ever_live_p ($18) will be true if the
5386 call is not eliminated, and we can check that in the prologue
5389 CALL_INSN_FUNCTION_USAGE (insn) =
5390 gen_rtx_EXPR_LIST (VOIDmode,
5391 gen_rtx_USE (VOIDmode,
5392 gen_rtx_REG (word_mode, 18)),
5393 CALL_INSN_FUNCTION_USAGE (insn));
5395 /* Return 1 to tell the caller that we've generated the call
5400 /* We know the function we are going to call. If we have already
5401 built a stub, we don't need to do anything further. */
5403 fnname = targetm.strip_name_encoding (XSTR (fn, 0));
5404 for (l = mips16_stubs; l != NULL; l = l->next)
5405 if (strcmp (l->name, fnname) == 0)
5410 /* Build a special purpose stub. When the linker sees a
5411 function call in mips16 code, it will check where the target
5412 is defined. If the target is a 32-bit call, the linker will
5413 search for the section defined here. It can tell which
5414 symbol this section is associated with by looking at the
5415 relocation information (the name is unreliable, since this
5416 might be a static function). If such a section is found, the
5417 linker will redirect the call to the start of the magic
5420 If the function does not return a floating point value, the
5421 special stub section is named
5424 If the function does return a floating point value, the stub
5426 .mips16.call.fp.FNNAME
5429 secname = (char *) alloca (strlen (fnname) + 40);
5430 sprintf (secname, ".mips16.call.%s%s",
5433 stubname = (char *) alloca (strlen (fnname) + 20);
5434 sprintf (stubname, "__call_stub_%s%s",
5437 stubid = get_identifier (stubname);
5438 stubdecl = build_decl (FUNCTION_DECL, stubid,
5439 build_function_type (void_type_node, NULL_TREE));
5440 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
5441 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
5443 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
5445 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
5449 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
5451 fprintf (asm_out_file, "%s%s",
5452 need_comma ? ", " : "",
5453 (f & 3) == 1 ? "float" : "double");
5456 fprintf (asm_out_file, ")\n");
5458 fprintf (asm_out_file, "\t.set\tnomips16\n");
5459 assemble_start_function (stubdecl, stubname);
5461 if (!FUNCTION_NAME_ALREADY_DECLARED)
5463 fputs ("\t.ent\t", asm_out_file);
5464 assemble_name (asm_out_file, stubname);
5465 fputs ("\n", asm_out_file);
5467 assemble_name (asm_out_file, stubname);
5468 fputs (":\n", asm_out_file);
5471 /* We build the stub code by hand. That's the only way we can
5472 do it, since we can't generate 32-bit code during a 16-bit
5475 /* We don't want the assembler to insert any nops here. */
5476 fprintf (asm_out_file, "\t.set\tnoreorder\n");
5478 mips16_fp_args (asm_out_file, fp_code, 0);
5482 fprintf (asm_out_file, "\t.set\tnoat\n");
5483 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
5485 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
5486 fprintf (asm_out_file, "\t.set\tat\n");
5487 /* Unfortunately, we can't fill the jump delay slot. We
5488 can't fill with one of the mtc1 instructions, because the
5489 result is not available for one instruction, so if the
5490 very first instruction in the function refers to the
5491 register, it will see the wrong value. */
5492 fprintf (asm_out_file, "\tnop\n");
5496 fprintf (asm_out_file, "\tmove\t%s,%s\n",
5497 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
5498 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
5499 /* As above, we can't fill the delay slot. */
5500 fprintf (asm_out_file, "\tnop\n");
5501 switch (GET_MODE (retval))
5504 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
5505 reg_names[GP_REG_FIRST + 3],
5506 reg_names[FP_REG_FIRST + MAX_FPRS_PER_FMT]);
5509 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
5510 reg_names[GP_REG_FIRST + 2],
5511 reg_names[FP_REG_FIRST + 0]);
5512 if (GET_MODE (retval) == SCmode && TARGET_64BIT)
5514 /* On 64-bit targets, complex floats are returned in
5515 a single GPR, such that "sd" on a suitably-aligned
5516 target would store the value correctly. */
5517 fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
5518 reg_names[GP_REG_FIRST + 2 + TARGET_LITTLE_ENDIAN],
5519 reg_names[GP_REG_FIRST + 2 + TARGET_LITTLE_ENDIAN]);
5520 fprintf (asm_out_file, "\tor\t%s,%s,%s\n",
5521 reg_names[GP_REG_FIRST + 2],
5522 reg_names[GP_REG_FIRST + 2],
5523 reg_names[GP_REG_FIRST + 3]);
5528 mips16_fpret_double (GP_REG_FIRST + 2 + (8 / UNITS_PER_WORD),
5529 FP_REG_FIRST + MAX_FPRS_PER_FMT);
5533 mips16_fpret_double (GP_REG_FIRST + 2, FP_REG_FIRST + 0);
5539 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
5540 /* As above, we can't fill the delay slot. */
5541 fprintf (asm_out_file, "\tnop\n");
5544 fprintf (asm_out_file, "\t.set\treorder\n");
5546 #ifdef ASM_DECLARE_FUNCTION_SIZE
5547 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
5550 if (!FUNCTION_NAME_ALREADY_DECLARED)
5552 fputs ("\t.end\t", asm_out_file);
5553 assemble_name (asm_out_file, stubname);
5554 fputs ("\n", asm_out_file);
5557 /* Record this stub. */
5558 l = (struct mips16_stub *) xmalloc (sizeof *l);
5559 l->name = xstrdup (fnname);
5561 l->next = mips16_stubs;
5565 /* If we expect a floating point return value, but we've built a
5566 stub which does not expect one, then we're in trouble. We can't
5567 use the existing stub, because it won't handle the floating point
5568 value. We can't build a new stub, because the linker won't know
5569 which stub to use for the various calls in this object file.
5570 Fortunately, this case is illegal, since it means that a function
5571 was declared in two different ways in a single compilation. */
5572 if (fpret && ! l->fpret)
5573 error ("cannot handle inconsistent calls to %qs", fnname);
5575 if (retval == NULL_RTX)
5576 insn = gen_call_internal_direct (fn, arg_size);
5578 insn = gen_call_value_internal_direct (retval, fn, arg_size);
5579 insn = emit_call_insn (insn);
5581 /* If we are calling a stub which handles a floating point return
5582 value, we need to arrange to save $18 in the prologue. We do
5583 this by marking the function call as using the register. The
5584 prologue will later see that it is used, and emit code to save
5587 CALL_INSN_FUNCTION_USAGE (insn) =
5588 gen_rtx_EXPR_LIST (VOIDmode,
5589 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
5590 CALL_INSN_FUNCTION_USAGE (insn));
5592 /* Return 1 to tell the caller that we've generated the call
5597 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
5600 mips_ok_for_lazy_binding_p (rtx x)
5602 return (TARGET_USE_GOT
5603 && GET_CODE (x) == SYMBOL_REF
5604 && !mips_symbol_binds_local_p (x));
5607 /* Load function address ADDR into register DEST. SIBCALL_P is true
5608 if the address is needed for a sibling call. Return true if we
5609 used an explicit lazy-binding sequence. */
5612 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
5614 /* If we're generating PIC, and this call is to a global function,
5615 try to allow its address to be resolved lazily. This isn't
5616 possible if TARGET_CALL_SAVED_GP since the value of $gp on entry
5617 to the stub would be our caller's gp, not ours. */
5618 if (TARGET_EXPLICIT_RELOCS
5619 && !(sibcall_p && TARGET_CALL_SAVED_GP)
5620 && mips_ok_for_lazy_binding_p (addr))
5622 rtx high, lo_sum_symbol;
5624 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
5625 addr, SYMBOL_GOTOFF_CALL);
5626 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
5627 if (Pmode == SImode)
5628 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
5630 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
5635 mips_emit_move (dest, addr);
5641 /* Expand a call or call_value instruction. RESULT is where the
5642 result will go (null for calls), ADDR is the address of the
5643 function, ARGS_SIZE is the size of the arguments and AUX is
5644 the value passed to us by mips_function_arg. SIBCALL_P is true
5645 if we are expanding a sibling call, false if we're expanding
5649 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
5651 rtx orig_addr, pattern, insn;
5656 if (!call_insn_operand (addr, VOIDmode))
5658 addr = gen_reg_rtx (Pmode);
5659 lazy_p = mips_load_call_address (addr, orig_addr, sibcall_p);
5663 && TARGET_HARD_FLOAT_ABI
5664 && build_mips16_call_stub (result, addr, args_size,
5665 aux == 0 ? 0 : (int) GET_MODE (aux)))
5669 pattern = (sibcall_p
5670 ? gen_sibcall_internal (addr, args_size)
5671 : gen_call_internal (addr, args_size));
5672 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
5676 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
5677 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
5680 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
5681 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
5684 pattern = (sibcall_p
5685 ? gen_sibcall_value_internal (result, addr, args_size)
5686 : gen_call_value_internal (result, addr, args_size));
5688 insn = emit_call_insn (pattern);
5690 /* Lazy-binding stubs require $gp to be valid on entry. We also pretend
5691 that they use FAKE_CALL_REGNO; see the load_call<mode> patterns for
5695 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
5696 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
5697 gen_rtx_REG (Pmode, FAKE_CALL_REGNO));
5702 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
5705 mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
5707 if (!TARGET_SIBCALLS)
5710 /* We can't do a sibcall if the called function is a MIPS16 function
5711 because there is no direct "jx" instruction equivalent to "jalx" to
5712 switch the ISA mode. */
5713 if (mips_use_mips16_mode_p (decl))
5716 /* ...and when -minterlink-mips16 is in effect, assume that external
5717 functions could be MIPS16 ones unless an attribute explicitly
5718 tells us otherwise. We only care about cases where the sibling
5719 and normal calls would both be direct. */
5720 if (TARGET_INTERLINK_MIPS16
5722 && DECL_EXTERNAL (decl)
5723 && !mips_nomips16_decl_p (decl)
5724 && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
5731 /* Emit code to move general operand SRC into condition-code
5732 register DEST. SCRATCH is a scratch TFmode float register.
5739 where FP1 and FP2 are single-precision float registers
5740 taken from SCRATCH. */
5743 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
5747 /* Change the source to SFmode. */
5749 src = adjust_address (src, SFmode, 0);
5750 else if (REG_P (src) || GET_CODE (src) == SUBREG)
5751 src = gen_rtx_REG (SFmode, true_regnum (src));
5753 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
5754 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
5756 mips_emit_move (copy_rtx (fp1), src);
5757 mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
5758 emit_insn (gen_slt_sf (dest, fp2, fp1));
5761 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
5762 Assume that the areas do not overlap. */
5765 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
5767 HOST_WIDE_INT offset, delta;
5768 unsigned HOST_WIDE_INT bits;
5770 enum machine_mode mode;
5773 /* Work out how many bits to move at a time. If both operands have
5774 half-word alignment, it is usually better to move in half words.
5775 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
5776 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
5777 Otherwise move word-sized chunks. */
5778 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
5779 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
5780 bits = BITS_PER_WORD / 2;
5782 bits = BITS_PER_WORD;
5784 mode = mode_for_size (bits, MODE_INT, 0);
5785 delta = bits / BITS_PER_UNIT;
5787 /* Allocate a buffer for the temporary registers. */
5788 regs = alloca (sizeof (rtx) * length / delta);
5790 /* Load as many BITS-sized chunks as possible. Use a normal load if
5791 the source has enough alignment, otherwise use left/right pairs. */
5792 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
5794 regs[i] = gen_reg_rtx (mode);
5795 if (MEM_ALIGN (src) >= bits)
5796 mips_emit_move (regs[i], adjust_address (src, mode, offset));
5799 rtx part = adjust_address (src, BLKmode, offset);
5800 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
5805 /* Copy the chunks to the destination. */
5806 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
5807 if (MEM_ALIGN (dest) >= bits)
5808 mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
5811 rtx part = adjust_address (dest, BLKmode, offset);
5812 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
5816 /* Mop up any left-over bytes. */
5817 if (offset < length)
5819 src = adjust_address (src, BLKmode, offset);
5820 dest = adjust_address (dest, BLKmode, offset);
5821 move_by_pieces (dest, src, length - offset,
5822 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
5826 #define MAX_MOVE_REGS 4
5827 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
5830 /* Helper function for doing a loop-based block operation on memory
5831 reference MEM. Each iteration of the loop will operate on LENGTH
5834 Create a new base register for use within the loop and point it to
5835 the start of MEM. Create a new memory reference that uses this
5836 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
5839 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
5840 rtx *loop_reg, rtx *loop_mem)
5842 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
5844 /* Although the new mem does not refer to a known location,
5845 it does keep up to LENGTH bytes of alignment. */
5846 *loop_mem = change_address (mem, BLKmode, *loop_reg);
5847 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
5851 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
5852 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
5853 memory regions do not overlap. */
5856 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
5858 rtx label, src_reg, dest_reg, final_src;
5859 HOST_WIDE_INT leftover;
5861 leftover = length % MAX_MOVE_BYTES;
5864 /* Create registers and memory references for use within the loop. */
5865 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
5866 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
5868 /* Calculate the value that SRC_REG should have after the last iteration
5870 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
5873 /* Emit the start of the loop. */
5874 label = gen_label_rtx ();
5877 /* Emit the loop body. */
5878 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
5880 /* Move on to the next block. */
5881 mips_emit_move (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
5882 mips_emit_move (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
5884 /* Emit the loop condition. */
5885 if (Pmode == DImode)
5886 emit_insn (gen_cmpdi (src_reg, final_src));
5888 emit_insn (gen_cmpsi (src_reg, final_src));
5889 emit_jump_insn (gen_bne (label));
5891 /* Mop up any left-over bytes. */
5893 mips_block_move_straight (dest, src, leftover);
5896 /* Expand a movmemsi instruction. */
5899 mips_expand_block_move (rtx dest, rtx src, rtx length)
5901 if (GET_CODE (length) == CONST_INT)
5903 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
5905 mips_block_move_straight (dest, src, INTVAL (length));
5910 mips_block_move_loop (dest, src, INTVAL (length));
5918 /* Expand a loop of synci insns for the address range [BEGIN, END). */
5921 mips_expand_synci_loop (rtx begin, rtx end)
5923 rtx inc, label, cmp, cmp_result;
5925 /* Load INC with the cache line size (rdhwr INC,$1). */
5926 inc = gen_reg_rtx (SImode);
5927 emit_insn (gen_rdhwr (inc, const1_rtx));
5929 /* Loop back to here. */
5930 label = gen_label_rtx ();
5933 emit_insn (gen_synci (begin));
5935 cmp = gen_reg_rtx (Pmode);
5936 mips_emit_binary (GTU, cmp, begin, end);
5938 mips_emit_binary (PLUS, begin, begin, inc);
5940 cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
5941 emit_jump_insn (gen_condjump (cmp_result, label));
5944 /* Return true if it is possible to use left/right accesses for a
5945 bitfield of WIDTH bits starting BITPOS bits into *OP. When
5946 returning true, update *OP, *LEFT and *RIGHT as follows:
5948 *OP is a BLKmode reference to the whole field.
5950 *LEFT is a QImode reference to the first byte if big endian or
5951 the last byte if little endian. This address can be used in the
5952 left-side instructions (lwl, swl, ldl, sdl).
5954 *RIGHT is a QImode reference to the opposite end of the field and
5955 can be used in the patterning right-side instruction. */
5958 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
5959 rtx *left, rtx *right)
5963 /* Check that the operand really is a MEM. Not all the extv and
5964 extzv predicates are checked. */
5968 /* Check that the size is valid. */
5969 if (width != 32 && (!TARGET_64BIT || width != 64))
5972 /* We can only access byte-aligned values. Since we are always passed
5973 a reference to the first byte of the field, it is not necessary to
5974 do anything with BITPOS after this check. */
5975 if (bitpos % BITS_PER_UNIT != 0)
5978 /* Reject aligned bitfields: we want to use a normal load or store
5979 instead of a left/right pair. */
5980 if (MEM_ALIGN (*op) >= width)
5983 /* Adjust *OP to refer to the whole field. This also has the effect
5984 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
5985 *op = adjust_address (*op, BLKmode, 0);
5986 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
5988 /* Get references to both ends of the field. We deliberately don't
5989 use the original QImode *OP for FIRST since the new BLKmode one
5990 might have a simpler address. */
5991 first = adjust_address (*op, QImode, 0);
5992 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
5994 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
5995 be the upper word and RIGHT the lower word. */
5996 if (TARGET_BIG_ENDIAN)
5997 *left = first, *right = last;
5999 *left = last, *right = first;
6005 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
6006 Return true on success. We only handle cases where zero_extract is
6007 equivalent to sign_extract. */
6010 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
6012 rtx left, right, temp;
6014 /* If TARGET_64BIT, the destination of a 32-bit load will be a
6015 paradoxical word_mode subreg. This is the only case in which
6016 we allow the destination to be larger than the source. */
6017 if (GET_CODE (dest) == SUBREG
6018 && GET_MODE (dest) == DImode
6019 && SUBREG_BYTE (dest) == 0
6020 && GET_MODE (SUBREG_REG (dest)) == SImode)
6021 dest = SUBREG_REG (dest);
6023 /* After the above adjustment, the destination must be the same
6024 width as the source. */
6025 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
6028 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
6031 temp = gen_reg_rtx (GET_MODE (dest));
6032 if (GET_MODE (dest) == DImode)
6034 emit_insn (gen_mov_ldl (temp, src, left));
6035 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
6039 emit_insn (gen_mov_lwl (temp, src, left));
6040 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
6046 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
6050 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
6053 enum machine_mode mode;
6055 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
6058 mode = mode_for_size (width, MODE_INT, 0);
6059 src = gen_lowpart (mode, src);
6063 emit_insn (gen_mov_sdl (dest, src, left));
6064 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
6068 emit_insn (gen_mov_swl (dest, src, left));
6069 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
6074 /* Return true if X is a MEM with the same size as MODE. */
6077 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
6084 size = MEM_SIZE (x);
6085 return size && INTVAL (size) == GET_MODE_SIZE (mode);
6088 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
6089 source of an "ext" instruction or the destination of an "ins"
6090 instruction. OP must be a register operand and the following
6091 conditions must hold:
6093 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
6094 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
6095 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
6097 Also reject lengths equal to a word as they are better handled
6098 by the move patterns. */
6101 mips_use_ins_ext_p (rtx op, rtx size, rtx position)
6103 HOST_WIDE_INT len, pos;
6105 if (!ISA_HAS_EXT_INS
6106 || !register_operand (op, VOIDmode)
6107 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
6110 len = INTVAL (size);
6111 pos = INTVAL (position);
6113 if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
6114 || pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
6120 /* Initialize mips_split_addresses from the associated command-line
6123 mips_split_addresses is a half-way house between explicit
6124 relocations and the traditional assembler macros. It can
6125 split absolute 32-bit symbolic constants into a high/lo_sum
6126 pair but uses macros for other sorts of access.
6128 Like explicit relocation support for REL targets, it relies
6129 on GNU extensions in the assembler and the linker.
6131 Although this code should work for -O0, it has traditionally
6132 been treated as an optimization. */
6135 mips_init_split_addresses (void)
6137 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
6138 && optimize && !flag_pic
6139 && !ABI_HAS_64BIT_SYMBOLS)
6140 mips_split_addresses = 1;
6142 mips_split_addresses = 0;
6145 /* (Re-)Initialize information about relocs. */
6148 mips_init_relocs (void)
6150 memset (mips_split_p, '\0', sizeof (mips_split_p));
6151 memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
6152 memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
6154 if (ABI_HAS_64BIT_SYMBOLS)
6156 if (TARGET_EXPLICIT_RELOCS)
6158 mips_split_p[SYMBOL_64_HIGH] = true;
6159 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
6160 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
6162 mips_split_p[SYMBOL_64_MID] = true;
6163 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
6164 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
6166 mips_split_p[SYMBOL_64_LOW] = true;
6167 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
6168 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
6170 mips_split_p[SYMBOL_ABSOLUTE] = true;
6171 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
6176 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses || TARGET_MIPS16)
6178 mips_split_p[SYMBOL_ABSOLUTE] = true;
6179 mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
6180 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
6182 mips_lo_relocs[SYMBOL_32_HIGH] = "%hi(";
6188 /* The high part is provided by a pseudo copy of $gp. */
6189 mips_split_p[SYMBOL_GP_RELATIVE] = true;
6190 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
6193 if (TARGET_EXPLICIT_RELOCS)
6195 /* Small data constants are kept whole until after reload,
6196 then lowered by mips_rewrite_small_data. */
6197 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
6199 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
6202 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
6203 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
6207 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
6208 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
6213 /* The HIGH and LO_SUM are matched by special .md patterns. */
6214 mips_split_p[SYMBOL_GOT_DISP] = true;
6216 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
6217 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
6218 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
6220 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
6221 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
6222 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
6227 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
6229 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
6230 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
6236 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
6237 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
6238 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
6241 /* Thread-local relocation operators. */
6242 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
6243 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
6244 mips_split_p[SYMBOL_DTPREL] = 1;
6245 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
6246 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
6247 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
6248 mips_split_p[SYMBOL_TPREL] = 1;
6249 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
6250 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
6252 mips_lo_relocs[SYMBOL_HALF] = "%half(";
6255 /* If OP is an UNSPEC address, return the address to which it refers,
6256 otherwise return OP itself. */
6259 mips_strip_unspec_address (rtx op)
6263 split_const (op, &base, &offset);
6264 if (UNSPEC_ADDRESS_P (base))
6265 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
6269 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6270 in context CONTEXT. RELOCS is the array of relocations to use. */
6273 print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
6274 const char **relocs)
6276 enum mips_symbol_type symbol_type;
6279 symbol_type = mips_classify_symbolic_expression (op, context);
6280 if (relocs[symbol_type] == 0)
6281 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
6283 fputs (relocs[symbol_type], file);
6284 output_addr_const (file, mips_strip_unspec_address (op));
6285 for (p = relocs[symbol_type]; *p != 0; p++)
6290 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
6292 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
6293 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
6294 'h' OP is HIGH, prints %hi(X),
6295 'd' output integer constant in decimal,
6296 'z' if the operand is 0, use $0 instead of normal operand.
6297 'D' print second part of double-word register or memory operand.
6298 'L' print low-order register of double-word register operand.
6299 'M' print high-order register of double-word register operand.
6300 'C' print part of opcode for a branch condition.
6301 'F' print part of opcode for a floating-point branch condition.
6302 'N' print part of opcode for a branch condition, inverted.
6303 'W' print part of opcode for a floating-point branch condition, inverted.
6304 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
6305 'z' for (eq:?I ...), 'n' for (ne:?I ...).
6306 't' like 'T', but with the EQ/NE cases reversed
6307 'Y' for a CONST_INT X, print mips_fp_conditions[X]
6308 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
6309 'R' print the reloc associated with LO_SUM
6310 'q' print DSP accumulator registers
6312 The punctuation characters are:
6314 '(' Turn on .set noreorder
6315 ')' Turn on .set reorder
6316 '[' Turn on .set noat
6318 '<' Turn on .set nomacro
6319 '>' Turn on .set macro
6320 '{' Turn on .set volatile (not GAS)
6321 '}' Turn on .set novolatile (not GAS)
6322 '&' Turn on .set noreorder if filling delay slots
6323 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
6324 '!' Turn on .set nomacro if filling delay slots
6325 '#' Print nop if in a .set noreorder section.
6326 '/' Like '#', but does nothing within a delayed branch sequence
6327 '?' Print 'l' if we are to use a branch likely instead of normal branch.
6328 '@' Print the name of the assembler temporary register (at or $1).
6329 '.' Print the name of the register with a hard-wired zero (zero or $0).
6330 '^' Print the name of the pic call-through register (t9 or $25).
6331 '$' Print the name of the stack pointer register (sp or $29).
6332 '+' Print the name of the gp register (usually gp or $28).
6333 '~' Output a branch alignment to LABEL_ALIGN(NULL).
6334 '|' Print .set push; .set mips2 if !ISA_HAS_LL_SC.
6335 '-' Print .set pop under the same conditions for '|'. */
6338 print_operand (FILE *file, rtx op, int letter)
6340 register enum rtx_code code;
6342 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
6347 if (mips_branch_likely)
6352 fputs (reg_names [GP_REG_FIRST + 1], file);
6356 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
6360 fputs (reg_names [GP_REG_FIRST + 0], file);
6364 fputs (reg_names[STACK_POINTER_REGNUM], file);
6368 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
6372 if (final_sequence != 0 && set_noreorder++ == 0)
6373 fputs (".set\tnoreorder\n\t", file);
6377 if (final_sequence != 0)
6379 if (set_noreorder++ == 0)
6380 fputs (".set\tnoreorder\n\t", file);
6382 if (set_nomacro++ == 0)
6383 fputs (".set\tnomacro\n\t", file);
6388 if (final_sequence != 0 && set_nomacro++ == 0)
6389 fputs ("\n\t.set\tnomacro", file);
6393 if (set_noreorder != 0)
6394 fputs ("\n\tnop", file);
6398 /* Print an extra newline so that the delayed insn is separated
6399 from the following ones. This looks neater and is consistent
6400 with non-nop delayed sequences. */
6401 if (set_noreorder != 0 && final_sequence == 0)
6402 fputs ("\n\tnop\n", file);
6406 if (set_noreorder++ == 0)
6407 fputs (".set\tnoreorder\n\t", file);
6411 if (set_noreorder == 0)
6412 error ("internal error: %%) found without a %%( in assembler pattern");
6414 else if (--set_noreorder == 0)
6415 fputs ("\n\t.set\treorder", file);
6420 if (set_noat++ == 0)
6421 fputs (".set\tnoat\n\t", file);
6426 error ("internal error: %%] found without a %%[ in assembler pattern");
6427 else if (--set_noat == 0)
6428 fputs ("\n\t.set\tat", file);
6433 if (set_nomacro++ == 0)
6434 fputs (".set\tnomacro\n\t", file);
6438 if (set_nomacro == 0)
6439 error ("internal error: %%> found without a %%< in assembler pattern");
6440 else if (--set_nomacro == 0)
6441 fputs ("\n\t.set\tmacro", file);
6446 if (set_volatile++ == 0)
6447 fputs ("#.set\tvolatile\n\t", file);
6451 if (set_volatile == 0)
6452 error ("internal error: %%} found without a %%{ in assembler pattern");
6453 else if (--set_volatile == 0)
6454 fputs ("\n\t#.set\tnovolatile", file);
6460 if (align_labels_log > 0)
6461 ASM_OUTPUT_ALIGN (file, align_labels_log);
6467 fputs (".set\tpush\n\t.set\tmips2\n\t", file);
6472 fputs ("\n\t.set\tpop", file);
6476 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
6485 error ("PRINT_OPERAND null pointer");
6489 code = GET_CODE (op);
6494 case EQ: fputs ("eq", file); break;
6495 case NE: fputs ("ne", file); break;
6496 case GT: fputs ("gt", file); break;
6497 case GE: fputs ("ge", file); break;
6498 case LT: fputs ("lt", file); break;
6499 case LE: fputs ("le", file); break;
6500 case GTU: fputs ("gtu", file); break;
6501 case GEU: fputs ("geu", file); break;
6502 case LTU: fputs ("ltu", file); break;
6503 case LEU: fputs ("leu", file); break;
6505 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
6508 else if (letter == 'N')
6511 case EQ: fputs ("ne", file); break;
6512 case NE: fputs ("eq", file); break;
6513 case GT: fputs ("le", file); break;
6514 case GE: fputs ("lt", file); break;
6515 case LT: fputs ("ge", file); break;
6516 case LE: fputs ("gt", file); break;
6517 case GTU: fputs ("leu", file); break;
6518 case GEU: fputs ("ltu", file); break;
6519 case LTU: fputs ("geu", file); break;
6520 case LEU: fputs ("gtu", file); break;
6522 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
6525 else if (letter == 'F')
6528 case EQ: fputs ("c1f", file); break;
6529 case NE: fputs ("c1t", file); break;
6531 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
6534 else if (letter == 'W')
6537 case EQ: fputs ("c1t", file); break;
6538 case NE: fputs ("c1f", file); break;
6540 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
6543 else if (letter == 'h')
6545 if (GET_CODE (op) == HIGH)
6548 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
6551 else if (letter == 'R')
6552 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
6554 else if (letter == 'Y')
6556 if (GET_CODE (op) == CONST_INT
6557 && ((unsigned HOST_WIDE_INT) INTVAL (op)
6558 < ARRAY_SIZE (mips_fp_conditions)))
6559 fputs (mips_fp_conditions[INTVAL (op)], file);
6561 output_operand_lossage ("invalid %%Y value");
6564 else if (letter == 'Z')
6568 print_operand (file, op, 0);
6573 else if (letter == 'q')
6578 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
6580 regnum = REGNO (op);
6581 if (MD_REG_P (regnum))
6582 fprintf (file, "$ac0");
6583 else if (DSP_ACC_REG_P (regnum))
6584 fprintf (file, "$ac%c", reg_names[regnum][3]);
6586 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
6589 else if (code == REG || code == SUBREG)
6591 register int regnum;
6594 regnum = REGNO (op);
6596 regnum = true_regnum (op);
6598 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
6599 || (letter == 'L' && WORDS_BIG_ENDIAN)
6603 fprintf (file, "%s", reg_names[regnum]);
6606 else if (code == MEM)
6609 output_address (plus_constant (XEXP (op, 0), 4));
6611 output_address (XEXP (op, 0));
6614 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
6615 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
6617 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
6618 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
6620 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
6621 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
6623 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
6624 fputs (reg_names[GP_REG_FIRST], file);
6626 else if (letter == 'd' || letter == 'x' || letter == 'X')
6627 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
6629 else if (letter == 'T' || letter == 't')
6631 int truth = (code == NE) == (letter == 'T');
6632 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
6635 else if (CONST_GP_P (op))
6636 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
6639 output_addr_const (file, mips_strip_unspec_address (op));
6642 /* Output address operand X to FILE. */
6645 print_operand_address (FILE *file, rtx x)
6647 struct mips_address_info addr;
6649 if (mips_classify_address (&addr, x, word_mode, true))
6653 print_operand (file, addr.offset, 0);
6654 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6657 case ADDRESS_LO_SUM:
6658 print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
6660 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6663 case ADDRESS_CONST_INT:
6664 output_addr_const (file, x);
6665 fprintf (file, "(%s)", reg_names[0]);
6668 case ADDRESS_SYMBOLIC:
6669 output_addr_const (file, mips_strip_unspec_address (x));
6675 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
6676 FIRST is true if this is the first time handling this decl. */
6679 mips_encode_section_info (tree decl, rtx rtl, int first)
6681 default_encode_section_info (decl, rtl, first);
6683 if (TREE_CODE (decl) == FUNCTION_DECL)
6685 rtx symbol = XEXP (rtl, 0);
6686 tree type = TREE_TYPE (decl);
6688 if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
6689 || mips_far_type_p (type))
6690 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
6694 /* Implement TARGET_SELECT_RTX_SECTION. */
6697 mips_select_rtx_section (enum machine_mode mode, rtx x,
6698 unsigned HOST_WIDE_INT align)
6700 /* ??? Consider using mergeable small data sections. */
6701 if (mips_rtx_constant_in_small_data_p (mode))
6702 return get_named_section (NULL, ".sdata", 0);
6704 return default_elf_select_rtx_section (mode, x, align);
6707 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
6709 The complication here is that, with the combination TARGET_ABICALLS
6710 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
6711 therefore not be included in the read-only part of a DSO. Handle such
6712 cases by selecting a normal data section instead of a read-only one.
6713 The logic apes that in default_function_rodata_section. */
6716 mips_function_rodata_section (tree decl)
6718 if (!TARGET_ABICALLS || TARGET_GPWORD)
6719 return default_function_rodata_section (decl);
6721 if (decl && DECL_SECTION_NAME (decl))
6723 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6724 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
6726 char *rname = ASTRDUP (name);
6728 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
6730 else if (flag_function_sections && flag_data_sections
6731 && strncmp (name, ".text.", 6) == 0)
6733 char *rname = ASTRDUP (name);
6734 memcpy (rname + 1, "data", 4);
6735 return get_section (rname, SECTION_WRITE, decl);
6738 return data_section;
6741 /* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
6742 locally-defined objects go in a small data section. It also controls
6743 the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
6744 mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
6747 mips_in_small_data_p (const_tree decl)
6751 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
6754 /* We don't yet generate small-data references for -mabicalls or
6755 VxWorks RTP code. See the related -G handling in override_options. */
6756 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
6759 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
6763 /* Reject anything that isn't in a known small-data section. */
6764 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6765 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
6768 /* If a symbol is defined externally, the assembler will use the
6769 usual -G rules when deciding how to implement macros. */
6770 if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
6773 else if (TARGET_EMBEDDED_DATA)
6775 /* Don't put constants into the small data section: we want them
6776 to be in ROM rather than RAM. */
6777 if (TREE_CODE (decl) != VAR_DECL)
6780 if (TREE_READONLY (decl)
6781 && !TREE_SIDE_EFFECTS (decl)
6782 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
6786 /* Enforce -mlocal-sdata. */
6787 if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
6790 /* Enforce -mextern-sdata. */
6791 if (!TARGET_EXTERN_SDATA && DECL_P (decl))
6793 if (DECL_EXTERNAL (decl))
6795 if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
6799 size = int_size_in_bytes (TREE_TYPE (decl));
6800 return (size > 0 && size <= mips_section_threshold);
6803 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
6804 anchors for small data: the GP register acts as an anchor in that
6805 case. We also don't want to use them for PC-relative accesses,
6806 where the PC acts as an anchor. */
6809 mips_use_anchors_for_symbol_p (const_rtx symbol)
6811 switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
6813 case SYMBOL_PC_RELATIVE:
6814 case SYMBOL_GP_RELATIVE:
6818 return default_use_anchors_for_symbol_p (symbol);
6822 /* The MIPS debug format wants all automatic variables and arguments
6823 to be in terms of the virtual frame pointer (stack pointer before
6824 any adjustment in the function), while the MIPS 3.0 linker wants
6825 the frame pointer to be the stack pointer after the initial
6826 adjustment. So, we do the adjustment here. The arg pointer (which
6827 is eliminated) points to the virtual frame pointer, while the frame
6828 pointer (which may be eliminated) points to the stack pointer after
6829 the initial adjustments. */
6832 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
6834 rtx offset2 = const0_rtx;
6835 rtx reg = eliminate_constant_term (addr, &offset2);
6838 offset = INTVAL (offset2);
6840 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
6841 || reg == hard_frame_pointer_rtx)
6843 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
6844 ? compute_frame_size (get_frame_size ())
6845 : cfun->machine->frame.total_size;
6847 /* MIPS16 frame is smaller */
6848 if (frame_pointer_needed && TARGET_MIPS16)
6849 frame_size -= cfun->machine->frame.args_size;
6851 offset = offset - frame_size;
6854 /* sdbout_parms does not want this to crash for unrecognized cases. */
6856 else if (reg != arg_pointer_rtx)
6857 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
6864 /* When using assembler macros, keep track of all of small-data externs
6865 so that mips_file_end can emit the appropriate declarations for them.
6867 In most cases it would be safe (though pointless) to emit .externs
6868 for other symbols too. One exception is when an object is within
6869 the -G limit but declared by the user to be in a section other
6870 than .sbss or .sdata. */
6873 mips_output_external (FILE *file, tree decl, const char *name)
6875 default_elf_asm_output_external (file, decl, name);
6877 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
6878 set in order to avoid putting out names that are never really
6880 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
6882 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
6884 fputs ("\t.extern\t", file);
6885 assemble_name (file, name);
6886 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
6887 int_size_in_bytes (TREE_TYPE (decl)));
6889 else if (TARGET_IRIX
6890 && mips_abi == ABI_32
6891 && TREE_CODE (decl) == FUNCTION_DECL)
6893 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
6894 `.global name .text' directive for every used but
6895 undefined function. If we don't, the linker may perform
6896 an optimization (skipping over the insns that set $gp)
6897 when it is unsafe. */
6898 fputs ("\t.globl ", file);
6899 assemble_name (file, name);
6900 fputs (" .text\n", file);
6905 /* Emit a new filename to a stream. If we are smuggling stabs, try to
6906 put out a MIPS ECOFF file and a stab. */
6909 mips_output_filename (FILE *stream, const char *name)
6912 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
6914 if (write_symbols == DWARF2_DEBUG)
6916 else if (mips_output_filename_first_time)
6918 mips_output_filename_first_time = 0;
6919 num_source_filenames += 1;
6920 current_function_file = name;
6921 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6922 output_quoted_string (stream, name);
6923 putc ('\n', stream);
6926 /* If we are emitting stabs, let dbxout.c handle this (except for
6927 the mips_output_filename_first_time case). */
6928 else if (write_symbols == DBX_DEBUG)
6931 else if (name != current_function_file
6932 && strcmp (name, current_function_file) != 0)
6934 num_source_filenames += 1;
6935 current_function_file = name;
6936 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6937 output_quoted_string (stream, name);
6938 putc ('\n', stream);
6942 /* MIPS implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
6945 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
6950 fputs ("\t.dtprelword\t", file);
6954 fputs ("\t.dtpreldword\t", file);
6960 output_addr_const (file, x);
6961 fputs ("+0x8000", file);
6964 /* Implement TARGET_DWARF_REGISTER_SPAN. */
6967 mips_dwarf_register_span (rtx reg)
6970 enum machine_mode mode;
6972 /* By default, GCC maps increasing register numbers to increasing
6973 memory locations, but paired FPRs are always little-endian,
6974 regardless of the prevailing endianness. */
6975 mode = GET_MODE (reg);
6976 if (FP_REG_P (REGNO (reg))
6977 && TARGET_BIG_ENDIAN
6978 && MAX_FPRS_PER_FMT > 1
6979 && GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
6981 gcc_assert (GET_MODE_SIZE (mode) == UNITS_PER_HWFPVALUE);
6982 high = mips_subword (reg, true);
6983 low = mips_subword (reg, false);
6984 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, high, low));
6990 /* Output an ASCII string, in a space-saving way. PREFIX is the string
6991 that should be written before the opening quote, such as "\t.ascii\t"
6992 for real string data or "\t# " for a comment. */
6995 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
7000 register const unsigned char *string =
7001 (const unsigned char *)string_param;
7003 fprintf (stream, "%s\"", prefix);
7004 for (i = 0; i < len; i++)
7006 register int c = string[i];
7010 if (c == '\\' || c == '\"')
7012 putc ('\\', stream);
7020 fprintf (stream, "\\%03o", c);
7024 if (cur_pos > 72 && i+1 < len)
7027 fprintf (stream, "\"\n%s\"", prefix);
7030 fprintf (stream, "\"\n");
7033 #ifdef BSS_SECTION_ASM_OP
7034 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
7035 in the use of sbss. */
7038 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
7039 unsigned HOST_WIDE_INT size, int align)
7041 extern tree last_assemble_variable_decl;
7043 if (mips_in_small_data_p (decl))
7044 switch_to_section (get_named_section (NULL, ".sbss", 0));
7046 switch_to_section (bss_section);
7047 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
7048 last_assemble_variable_decl = decl;
7049 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
7050 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
7054 /* Emit either a label, .comm, or .lcomm directive. When using assembler
7055 macros, mark the symbol as written so that mips_file_end won't emit an
7056 .extern for it. STREAM is the output file, NAME is the name of the
7057 symbol, INIT_STRING is the string that should be written before the
7058 symbol and FINAL_STRING is the string that should be written after it.
7059 FINAL_STRING is a printf() format that consumes the remaining arguments. */
7062 mips_declare_object (FILE *stream, const char *name, const char *init_string,
7063 const char *final_string, ...)
7067 fputs (init_string, stream);
7068 assemble_name (stream, name);
7069 va_start (ap, final_string);
7070 vfprintf (stream, final_string, ap);
7073 if (!TARGET_EXPLICIT_RELOCS)
7075 tree name_tree = get_identifier (name);
7076 TREE_ASM_WRITTEN (name_tree) = 1;
7080 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
7081 NAME is the name of the object and ALIGN is the required alignment
7082 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
7083 alignment argument. */
7086 mips_declare_common_object (FILE *stream, const char *name,
7087 const char *init_string,
7088 unsigned HOST_WIDE_INT size,
7089 unsigned int align, bool takes_alignment_p)
7091 if (!takes_alignment_p)
7093 size += (align / BITS_PER_UNIT) - 1;
7094 size -= size % (align / BITS_PER_UNIT);
7095 mips_declare_object (stream, name, init_string,
7096 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
7099 mips_declare_object (stream, name, init_string,
7100 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
7101 size, align / BITS_PER_UNIT);
7104 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
7105 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
7108 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
7109 unsigned HOST_WIDE_INT size,
7112 /* If the target wants uninitialized const declarations in
7113 .rdata then don't put them in .comm. */
7114 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
7115 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
7116 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
7118 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
7119 targetm.asm_out.globalize_label (stream, name);
7121 switch_to_section (readonly_data_section);
7122 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
7123 mips_declare_object (stream, name, "",
7124 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
7128 mips_declare_common_object (stream, name, "\n\t.comm\t",
7132 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
7133 extern int size_directive_output;
7135 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
7136 definitions except that it uses mips_declare_object() to emit the label. */
7139 mips_declare_object_name (FILE *stream, const char *name,
7140 tree decl ATTRIBUTE_UNUSED)
7142 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
7143 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
7146 size_directive_output = 0;
7147 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
7151 size_directive_output = 1;
7152 size = int_size_in_bytes (TREE_TYPE (decl));
7153 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7156 mips_declare_object (stream, name, "", ":\n");
7159 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
7162 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
7166 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
7167 if (!flag_inhibit_size_directive
7168 && DECL_SIZE (decl) != 0
7169 && !at_end && top_level
7170 && DECL_INITIAL (decl) == error_mark_node
7171 && !size_directive_output)
7175 size_directive_output = 1;
7176 size = int_size_in_bytes (TREE_TYPE (decl));
7177 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7182 /* Implement TARGET_ASM_FILE_START. */
7185 mips_file_start (void)
7187 default_file_start ();
7191 /* Generate a special section to describe the ABI switches used to
7192 produce the resultant binary. This used to be done by the assembler
7193 setting bits in the ELF header's flags field, but we have run out of
7194 bits. GDB needs this information in order to be able to correctly
7195 debug these binaries. See the function mips_gdbarch_init() in
7196 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
7197 causes unnecessary IRIX 6 ld warnings. */
7198 const char * abi_string = NULL;
7202 case ABI_32: abi_string = "abi32"; break;
7203 case ABI_N32: abi_string = "abiN32"; break;
7204 case ABI_64: abi_string = "abi64"; break;
7205 case ABI_O64: abi_string = "abiO64"; break;
7206 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
7210 /* Note - we use fprintf directly rather than calling switch_to_section
7211 because in this way we can avoid creating an allocated section. We
7212 do not want this section to take up any space in the running
7214 fprintf (asm_out_file, "\t.section .mdebug.%s\n\t.previous\n",
7217 /* There is no ELF header flag to distinguish long32 forms of the
7218 EABI from long64 forms. Emit a special section to help tools
7219 such as GDB. Do the same for o64, which is sometimes used with
7221 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
7222 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n"
7223 "\t.previous\n", TARGET_LONG64 ? 64 : 32);
7225 #ifdef HAVE_AS_GNU_ATTRIBUTE
7226 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
7227 TARGET_HARD_FLOAT_ABI ? (TARGET_DOUBLE_FLOAT ? 1 : 2) : 3);
7231 /* Generate the pseudo ops that System V.4 wants. */
7232 if (TARGET_ABICALLS)
7233 fprintf (asm_out_file, "\t.abicalls\n");
7235 if (flag_verbose_asm)
7236 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
7238 mips_section_threshold, mips_arch_info->name, mips_isa);
7242 /* Make the last instruction frame related and note that it performs
7243 the operation described by FRAME_PATTERN. */
7246 mips_set_frame_expr (rtx frame_pattern)
7250 insn = get_last_insn ();
7251 RTX_FRAME_RELATED_P (insn) = 1;
7252 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7258 /* Return a frame-related rtx that stores REG at MEM.
7259 REG must be a single register. */
7262 mips_frame_set (rtx mem, rtx reg)
7266 /* If we're saving the return address register and the dwarf return
7267 address column differs from the hard register number, adjust the
7268 note reg to refer to the former. */
7269 if (REGNO (reg) == GP_REG_FIRST + 31
7270 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
7271 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
7273 set = gen_rtx_SET (VOIDmode, mem, reg);
7274 RTX_FRAME_RELATED_P (set) = 1;
7279 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
7280 mips16e_s2_s8_regs[X], it must also save the registers in indexes
7281 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
7282 static const unsigned char mips16e_s2_s8_regs[] = {
7283 30, 23, 22, 21, 20, 19, 18
7285 static const unsigned char mips16e_a0_a3_regs[] = {
7289 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
7290 ordered from the uppermost in memory to the lowest in memory. */
7291 static const unsigned char mips16e_save_restore_regs[] = {
7292 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
7295 /* Return the index of the lowest X in the range [0, SIZE) for which
7296 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
7299 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
7304 for (i = 0; i < size; i++)
7305 if (BITSET_P (mask, regs[i]))
7311 /* *MASK_PTR is a mask of general purpose registers and *GP_REG_SIZE_PTR
7312 is the number of bytes that they occupy. If *MASK_PTR contains REGS[X]
7313 for some X in [0, SIZE), adjust *MASK_PTR and *GP_REG_SIZE_PTR so that
7314 the same is true for all indexes (X, SIZE). */
7317 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
7318 unsigned int size, HOST_WIDE_INT *gp_reg_size_ptr)
7322 i = mips16e_find_first_register (*mask_ptr, regs, size);
7323 for (i++; i < size; i++)
7324 if (!BITSET_P (*mask_ptr, regs[i]))
7326 *gp_reg_size_ptr += GET_MODE_SIZE (gpr_mode);
7327 *mask_ptr |= 1 << regs[i];
7331 /* Return a simplified form of X using the register values in REG_VALUES.
7332 REG_VALUES[R] is the last value assigned to hard register R, or null
7333 if R has not been modified.
7335 This function is rather limited, but is good enough for our purposes. */
7338 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
7342 x = avoid_constant_pool_reference (x);
7346 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7347 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
7348 x0, GET_MODE (XEXP (x, 0)));
7351 if (ARITHMETIC_P (x))
7353 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7354 x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
7355 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
7359 && reg_values[REGNO (x)]
7360 && !rtx_unstable_p (reg_values[REGNO (x)]))
7361 return reg_values[REGNO (x)];
7366 /* Return true if (set DEST SRC) stores an argument register into its
7367 caller-allocated save slot, storing the number of that argument
7368 register in *REGNO_PTR if so. REG_VALUES is as for
7369 mips16e_collect_propagate_value. */
7372 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
7373 unsigned int *regno_ptr)
7375 unsigned int argno, regno;
7376 HOST_WIDE_INT offset, required_offset;
7379 /* Check that this is a word-mode store. */
7380 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
7383 /* Check that the register being saved is an unmodified argument
7385 regno = REGNO (src);
7386 if (regno < GP_ARG_FIRST || regno > GP_ARG_LAST || reg_values[regno])
7388 argno = regno - GP_ARG_FIRST;
7390 /* Check whether the address is an appropriate stack pointer or
7391 frame pointer access. The frame pointer is offset from the
7392 stack pointer by the size of the outgoing arguments. */
7393 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
7394 mips_split_plus (addr, &base, &offset);
7395 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
7396 if (base == hard_frame_pointer_rtx)
7397 required_offset -= cfun->machine->frame.args_size;
7398 else if (base != stack_pointer_rtx)
7400 if (offset != required_offset)
7407 /* A subroutine of mips_expand_prologue, called only when generating
7408 MIPS16e SAVE instructions. Search the start of the function for any
7409 instructions that save argument registers into their caller-allocated
7410 save slots. Delete such instructions and return a value N such that
7411 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
7412 instructions redundant. */
7415 mips16e_collect_argument_saves (void)
7417 rtx reg_values[FIRST_PSEUDO_REGISTER];
7418 rtx insn, next, set, dest, src;
7419 unsigned int nargs, regno;
7421 push_topmost_sequence ();
7423 memset (reg_values, 0, sizeof (reg_values));
7424 for (insn = get_insns (); insn; insn = next)
7426 next = NEXT_INSN (insn);
7433 set = PATTERN (insn);
7434 if (GET_CODE (set) != SET)
7437 dest = SET_DEST (set);
7438 src = SET_SRC (set);
7439 if (mips16e_collect_argument_save_p (dest, src, reg_values, ®no))
7441 if (!BITSET_P (cfun->machine->frame.mask, regno))
7444 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
7447 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
7448 reg_values[REGNO (dest)]
7449 = mips16e_collect_propagate_value (src, reg_values);
7453 pop_topmost_sequence ();
7458 /* Return a move between register REGNO and memory location SP + OFFSET.
7459 Make the move a load if RESTORE_P, otherwise make it a frame-related
7463 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
7468 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
7469 reg = gen_rtx_REG (SImode, regno);
7471 ? gen_rtx_SET (VOIDmode, reg, mem)
7472 : mips_frame_set (mem, reg));
7475 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
7476 The instruction must:
7478 - Allocate or deallocate SIZE bytes in total; SIZE is known
7481 - Save or restore as many registers in *MASK_PTR as possible.
7482 The instruction saves the first registers at the top of the
7483 allocated area, with the other registers below it.
7485 - Save NARGS argument registers above the allocated area.
7487 (NARGS is always zero if RESTORE_P.)
7489 The SAVE and RESTORE instructions cannot save and restore all general
7490 registers, so there may be some registers left over for the caller to
7491 handle. Destructively modify *MASK_PTR so that it contains the registers
7492 that still need to be saved or restored. The caller can save these
7493 registers in the memory immediately below *OFFSET_PTR, which is a
7494 byte offset from the bottom of the allocated stack area. */
7497 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
7498 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
7502 HOST_WIDE_INT offset, top_offset;
7503 unsigned int i, regno;
7506 gcc_assert (cfun->machine->frame.fp_reg_size == 0);
7508 /* Calculate the number of elements in the PARALLEL. We need one element
7509 for the stack adjustment, one for each argument register save, and one
7510 for each additional register move. */
7512 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7513 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
7516 /* Create the final PARALLEL. */
7517 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
7520 /* Add the stack pointer adjustment. */
7521 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7522 plus_constant (stack_pointer_rtx,
7523 restore_p ? size : -size));
7524 RTX_FRAME_RELATED_P (set) = 1;
7525 XVECEXP (pattern, 0, n++) = set;
7527 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7528 top_offset = restore_p ? size : 0;
7530 /* Save the arguments. */
7531 for (i = 0; i < nargs; i++)
7533 offset = top_offset + i * GET_MODE_SIZE (gpr_mode);
7534 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
7535 XVECEXP (pattern, 0, n++) = set;
7538 /* Then fill in the other register moves. */
7539 offset = top_offset;
7540 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7542 regno = mips16e_save_restore_regs[i];
7543 if (BITSET_P (*mask_ptr, regno))
7545 offset -= UNITS_PER_WORD;
7546 set = mips16e_save_restore_reg (restore_p, offset, regno);
7547 XVECEXP (pattern, 0, n++) = set;
7548 *mask_ptr &= ~(1 << regno);
7552 /* Tell the caller what offset it should use for the remaining registers. */
7553 *offset_ptr = size + (offset - top_offset) + size;
7555 gcc_assert (n == XVECLEN (pattern, 0));
7560 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
7561 pointer. Return true if PATTERN matches the kind of instruction
7562 generated by mips16e_build_save_restore. If INFO is nonnull,
7563 initialize it when returning true. */
7566 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
7567 struct mips16e_save_restore_info *info)
7569 unsigned int i, nargs, mask;
7570 HOST_WIDE_INT top_offset, save_offset, offset, extra;
7571 rtx set, reg, mem, base;
7574 if (!GENERATE_MIPS16E_SAVE_RESTORE)
7577 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7578 top_offset = adjust > 0 ? adjust : 0;
7580 /* Interpret all other members of the PARALLEL. */
7581 save_offset = top_offset - GET_MODE_SIZE (gpr_mode);
7585 for (n = 1; n < XVECLEN (pattern, 0); n++)
7587 /* Check that we have a SET. */
7588 set = XVECEXP (pattern, 0, n);
7589 if (GET_CODE (set) != SET)
7592 /* Check that the SET is a load (if restoring) or a store
7594 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
7598 /* Check that the address is the sum of the stack pointer and a
7599 possibly-zero constant offset. */
7600 mips_split_plus (XEXP (mem, 0), &base, &offset);
7601 if (base != stack_pointer_rtx)
7604 /* Check that SET's other operand is a register. */
7605 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
7609 /* Check for argument saves. */
7610 if (offset == top_offset + nargs * GET_MODE_SIZE (gpr_mode)
7611 && REGNO (reg) == GP_ARG_FIRST + nargs)
7613 else if (offset == save_offset)
7615 while (mips16e_save_restore_regs[i++] != REGNO (reg))
7616 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
7619 mask |= 1 << REGNO (reg);
7620 save_offset -= GET_MODE_SIZE (gpr_mode);
7626 /* Check that the restrictions on register ranges are met. */
7628 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7629 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
7630 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7631 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
7635 /* Make sure that the topmost argument register is not saved twice.
7636 The checks above ensure that the same is then true for the other
7637 argument registers. */
7638 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
7641 /* Pass back information, if requested. */
7644 info->nargs = nargs;
7646 info->size = (adjust > 0 ? adjust : -adjust);
7652 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
7653 for the register range [MIN_REG, MAX_REG]. Return a pointer to
7654 the null terminator. */
7657 mips16e_add_register_range (char *s, unsigned int min_reg,
7658 unsigned int max_reg)
7660 if (min_reg != max_reg)
7661 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
7663 s += sprintf (s, ",%s", reg_names[min_reg]);
7667 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
7668 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
7671 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
7673 static char buffer[300];
7675 struct mips16e_save_restore_info info;
7676 unsigned int i, end;
7679 /* Parse the pattern. */
7680 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
7683 /* Add the mnemonic. */
7684 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
7687 /* Save the arguments. */
7689 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
7690 reg_names[GP_ARG_FIRST + info.nargs - 1]);
7691 else if (info.nargs == 1)
7692 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
7694 /* Emit the amount of stack space to allocate or deallocate. */
7695 s += sprintf (s, "%d", (int) info.size);
7697 /* Save or restore $16. */
7698 if (BITSET_P (info.mask, 16))
7699 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
7701 /* Save or restore $17. */
7702 if (BITSET_P (info.mask, 17))
7703 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
7705 /* Save or restore registers in the range $s2...$s8, which
7706 mips16e_s2_s8_regs lists in decreasing order. Note that this
7707 is a software register range; the hardware registers are not
7708 numbered consecutively. */
7709 end = ARRAY_SIZE (mips16e_s2_s8_regs);
7710 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
7712 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
7713 mips16e_s2_s8_regs[i]);
7715 /* Save or restore registers in the range $a0...$a3. */
7716 end = ARRAY_SIZE (mips16e_a0_a3_regs);
7717 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
7719 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
7720 mips16e_a0_a3_regs[end - 1]);
7722 /* Save or restore $31. */
7723 if (BITSET_P (info.mask, 31))
7724 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
7729 /* Return true if the current function has an insn that implicitly
7733 mips_function_has_gp_insn (void)
7735 /* Don't bother rechecking if we found one last time. */
7736 if (!cfun->machine->has_gp_insn_p)
7740 push_topmost_sequence ();
7741 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7743 && GET_CODE (PATTERN (insn)) != USE
7744 && GET_CODE (PATTERN (insn)) != CLOBBER
7745 && (get_attr_got (insn) != GOT_UNSET
7746 || small_data_pattern (PATTERN (insn), VOIDmode)))
7748 pop_topmost_sequence ();
7750 cfun->machine->has_gp_insn_p = (insn != 0);
7752 return cfun->machine->has_gp_insn_p;
7756 /* Return the register that should be used as the global pointer
7757 within this function. Return 0 if the function doesn't need
7758 a global pointer. */
7761 mips_global_pointer (void)
7765 /* $gp is always available unless we're using a GOT. */
7766 if (!TARGET_USE_GOT)
7767 return GLOBAL_POINTER_REGNUM;
7769 /* We must always provide $gp when it is used implicitly. */
7770 if (!TARGET_EXPLICIT_RELOCS)
7771 return GLOBAL_POINTER_REGNUM;
7773 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
7775 if (current_function_profile)
7776 return GLOBAL_POINTER_REGNUM;
7778 /* If the function has a nonlocal goto, $gp must hold the correct
7779 global pointer for the target function. */
7780 if (current_function_has_nonlocal_goto)
7781 return GLOBAL_POINTER_REGNUM;
7783 /* If the gp is never referenced, there's no need to initialize it.
7784 Note that reload can sometimes introduce constant pool references
7785 into a function that otherwise didn't need them. For example,
7786 suppose we have an instruction like:
7788 (set (reg:DF R1) (float:DF (reg:SI R2)))
7790 If R2 turns out to be constant such as 1, the instruction may have a
7791 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
7792 using this constant if R2 doesn't get allocated to a register.
7794 In cases like these, reload will have added the constant to the pool
7795 but no instruction will yet refer to it. */
7796 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
7797 && !current_function_uses_const_pool
7798 && !mips_function_has_gp_insn ())
7801 /* We need a global pointer, but perhaps we can use a call-clobbered
7802 register instead of $gp. */
7803 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
7804 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7805 if (!df_regs_ever_live_p (regno)
7806 && call_really_used_regs[regno]
7807 && !fixed_regs[regno]
7808 && regno != PIC_FUNCTION_ADDR_REGNUM)
7811 return GLOBAL_POINTER_REGNUM;
7814 /* Return true if the current function returns its value in a floating-point
7815 register in MIPS16 mode. */
7818 mips16_cfun_returns_in_fpr_p (void)
7820 tree return_type = DECL_RESULT (current_function_decl);
7821 return (TARGET_MIPS16
7822 && TARGET_HARD_FLOAT_ABI
7823 && !aggregate_value_p (return_type, current_function_decl)
7824 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
7828 /* Return true if the current function must save REGNO. */
7831 mips_save_reg_p (unsigned int regno)
7833 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
7834 if we have not chosen a call-clobbered substitute. */
7835 if (regno == GLOBAL_POINTER_REGNUM)
7836 return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno;
7838 /* Check call-saved registers. */
7839 if ((current_function_saves_all_registers || df_regs_ever_live_p (regno))
7840 && !call_really_used_regs[regno])
7843 /* Save both registers in an FPR pair if either one is used. This is
7844 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
7845 register to be used without the even register. */
7846 if (FP_REG_P (regno)
7847 && MAX_FPRS_PER_FMT == 2
7848 && df_regs_ever_live_p (regno + 1)
7849 && !call_really_used_regs[regno + 1])
7852 /* We need to save the old frame pointer before setting up a new one. */
7853 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
7856 /* Check for registers that must be saved for FUNCTION_PROFILER. */
7857 if (current_function_profile && MIPS_SAVE_REG_FOR_PROFILING_P (regno))
7860 /* We need to save the incoming return address if it is ever clobbered
7861 within the function, if __builtin_eh_return is being used to set a
7862 different return address, or if a stub is being used to return a
7864 if (regno == GP_REG_FIRST + 31
7865 && (df_regs_ever_live_p (regno)
7866 || current_function_calls_eh_return
7867 || mips16_cfun_returns_in_fpr_p ()))
7873 /* Return the bytes needed to compute the frame pointer from the current
7874 stack pointer. SIZE is the size (in bytes) of the local variables.
7876 MIPS stack frames look like:
7878 Before call After call
7879 high +-----------------------+ +-----------------------+
7881 | caller's temps. | | caller's temps. |
7883 +-----------------------+ +-----------------------+
7885 | arguments on stack. | | arguments on stack. |
7887 +-----------------------+ +-----------------------+
7888 | 4 words to save | | 4 words to save |
7889 | arguments passed | | arguments passed |
7890 | in registers, even | | in registers, even |
7891 | if not passed. | | if not passed. |
7892 SP->+-----------------------+ VFP->+-----------------------+
7893 (VFP = SP+fp_sp_offset) | |\
7894 | fp register save | | fp_reg_size
7896 SP+gp_sp_offset->+-----------------------+
7898 | | gp register save | | gp_reg_size
7899 gp_reg_rounded | | |/
7900 | +-----------------------+
7901 \| alignment padding |
7902 +-----------------------+
7904 | local variables | | var_size
7906 +-----------------------+
7908 | alloca allocations |
7910 +-----------------------+
7912 cprestore_size | | GP save for V.4 abi |
7914 +-----------------------+
7916 | arguments on stack | |
7918 +-----------------------+ |
7919 | 4 words to save | | args_size
7920 | arguments passed | |
7921 | in registers, even | |
7922 | if not passed. | |
7923 low | (TARGET_OLDABI only) |/
7924 memory SP->+-----------------------+
7929 compute_frame_size (HOST_WIDE_INT size)
7932 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
7933 HOST_WIDE_INT var_size; /* # bytes that variables take up */
7934 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
7935 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
7936 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
7937 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
7938 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
7939 unsigned int mask; /* mask of saved gp registers */
7940 unsigned int fmask; /* mask of saved fp registers */
7942 cfun->machine->global_pointer = mips_global_pointer ();
7948 var_size = MIPS_STACK_ALIGN (size);
7949 args_size = current_function_outgoing_args_size;
7950 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
7952 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
7953 functions. If the function has local variables, we're committed
7954 to allocating it anyway. Otherwise reclaim it here. */
7955 if (var_size == 0 && current_function_is_leaf)
7956 cprestore_size = args_size = 0;
7958 /* The MIPS 3.0 linker does not like functions that dynamically
7959 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
7960 looks like we are trying to create a second frame pointer to the
7961 function, so allocate some stack space to make it happy. */
7963 if (args_size == 0 && current_function_calls_alloca)
7964 args_size = 4 * UNITS_PER_WORD;
7966 total_size = var_size + args_size + cprestore_size;
7968 /* Calculate space needed for gp registers. */
7969 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7970 if (mips_save_reg_p (regno))
7972 gp_reg_size += GET_MODE_SIZE (gpr_mode);
7973 mask |= 1 << (regno - GP_REG_FIRST);
7976 /* We need to restore these for the handler. */
7977 if (current_function_calls_eh_return)
7982 regno = EH_RETURN_DATA_REGNO (i);
7983 if (regno == INVALID_REGNUM)
7985 gp_reg_size += GET_MODE_SIZE (gpr_mode);
7986 mask |= 1 << (regno - GP_REG_FIRST);
7990 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
7991 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
7992 save all later registers too. */
7993 if (GENERATE_MIPS16E_SAVE_RESTORE)
7995 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7996 ARRAY_SIZE (mips16e_s2_s8_regs), &gp_reg_size);
7997 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7998 ARRAY_SIZE (mips16e_a0_a3_regs), &gp_reg_size);
8001 /* This loop must iterate over the same space as its companion in
8002 mips_for_each_saved_reg. */
8003 if (TARGET_HARD_FLOAT)
8004 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
8005 regno >= FP_REG_FIRST;
8006 regno -= MAX_FPRS_PER_FMT)
8007 if (mips_save_reg_p (regno))
8009 fp_reg_size += MAX_FPRS_PER_FMT * UNITS_PER_FPREG;
8010 fmask |= ((1 << MAX_FPRS_PER_FMT) - 1) << (regno - FP_REG_FIRST);
8013 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
8014 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
8016 /* Add in the space required for saving incoming register arguments. */
8017 total_size += current_function_pretend_args_size;
8018 total_size += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
8020 /* Save other computed information. */
8021 cfun->machine->frame.total_size = total_size;
8022 cfun->machine->frame.var_size = var_size;
8023 cfun->machine->frame.args_size = args_size;
8024 cfun->machine->frame.cprestore_size = cprestore_size;
8025 cfun->machine->frame.gp_reg_size = gp_reg_size;
8026 cfun->machine->frame.fp_reg_size = fp_reg_size;
8027 cfun->machine->frame.mask = mask;
8028 cfun->machine->frame.fmask = fmask;
8029 cfun->machine->frame.initialized = reload_completed;
8030 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
8031 cfun->machine->frame.num_fp = (fp_reg_size
8032 / (MAX_FPRS_PER_FMT * UNITS_PER_FPREG));
8036 HOST_WIDE_INT offset;
8038 if (GENERATE_MIPS16E_SAVE_RESTORE)
8039 /* MIPS16e SAVE and RESTORE instructions require the GP save area
8040 to be aligned at the high end with any padding at the low end.
8041 It is only safe to use this calculation for o32, where we never
8042 have pretend arguments, and where any varargs will be saved in
8043 the caller-allocated area rather than at the top of the frame. */
8044 offset = (total_size - GET_MODE_SIZE (gpr_mode));
8046 offset = (args_size + cprestore_size + var_size
8047 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
8048 cfun->machine->frame.gp_sp_offset = offset;
8049 cfun->machine->frame.gp_save_offset = offset - total_size;
8053 cfun->machine->frame.gp_sp_offset = 0;
8054 cfun->machine->frame.gp_save_offset = 0;
8059 HOST_WIDE_INT offset;
8061 offset = (args_size + cprestore_size + var_size
8062 + gp_reg_rounded + fp_reg_size
8063 - MAX_FPRS_PER_FMT * UNITS_PER_FPREG);
8064 cfun->machine->frame.fp_sp_offset = offset;
8065 cfun->machine->frame.fp_save_offset = offset - total_size;
8069 cfun->machine->frame.fp_sp_offset = 0;
8070 cfun->machine->frame.fp_save_offset = 0;
8073 /* Ok, we're done. */
8077 /* Return the style of GP load sequence that is being used for the
8078 current function. */
8080 enum mips_loadgp_style
8081 mips_current_loadgp_style (void)
8083 if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
8089 if (TARGET_ABSOLUTE_ABICALLS)
8090 return LOADGP_ABSOLUTE;
8092 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
8095 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
8096 pointer or argument pointer. TO is either the stack pointer or
8097 hard frame pointer. */
8100 mips_initial_elimination_offset (int from, int to)
8102 HOST_WIDE_INT offset;
8104 compute_frame_size (get_frame_size ());
8106 /* Set OFFSET to the offset from the stack pointer. */
8109 case FRAME_POINTER_REGNUM:
8113 case ARG_POINTER_REGNUM:
8114 offset = (cfun->machine->frame.total_size
8115 - current_function_pretend_args_size);
8122 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
8123 offset -= cfun->machine->frame.args_size;
8128 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
8129 value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
8132 mips_extra_live_on_entry (bitmap regs)
8134 if (TARGET_USE_GOT && !TARGET_ABSOLUTE_ABICALLS)
8135 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
8138 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
8139 back to a previous frame. */
8142 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
8147 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
8150 /* Emit code to change the current function's return address to
8151 ADDRESS. SCRATCH is available as a scratch register, if needed.
8152 ADDRESS and SCRATCH are both word-mode GPRs. */
8155 mips_set_return_address (rtx address, rtx scratch)
8159 compute_frame_size (get_frame_size ());
8160 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
8161 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
8162 cfun->machine->frame.gp_sp_offset);
8164 mips_emit_move (gen_rtx_MEM (GET_MODE (address), slot_address), address);
8167 /* Restore $gp from its save slot. Valid only when using o32 or
8171 mips_restore_gp (void)
8175 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
8177 address = mips_add_offset (pic_offset_table_rtx,
8178 frame_pointer_needed
8179 ? hard_frame_pointer_rtx
8180 : stack_pointer_rtx,
8181 current_function_outgoing_args_size);
8182 slot = gen_rtx_MEM (Pmode, address);
8184 mips_emit_move (pic_offset_table_rtx, slot);
8185 if (!TARGET_EXPLICIT_RELOCS)
8186 emit_insn (gen_blockage ());
8189 /* A function to save or store a register. The first argument is the
8190 register and the second is the stack slot. */
8191 typedef void (*mips_save_restore_fn) (rtx, rtx);
8193 /* Use FN to save or restore register REGNO. MODE is the register's
8194 mode and OFFSET is the offset of its save slot from the current
8198 mips_save_restore_reg (enum machine_mode mode, int regno,
8199 HOST_WIDE_INT offset, mips_save_restore_fn fn)
8203 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
8205 fn (gen_rtx_REG (mode, regno), mem);
8209 /* Call FN for each register that is saved by the current function.
8210 SP_OFFSET is the offset of the current stack pointer from the start
8214 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
8216 enum machine_mode fpr_mode;
8217 HOST_WIDE_INT offset;
8220 /* Save registers starting from high to low. The debuggers prefer at least
8221 the return register be stored at func+4, and also it allows us not to
8222 need a nop in the epilogue if at least one register is reloaded in
8223 addition to return address. */
8224 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
8225 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
8226 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
8228 mips_save_restore_reg (gpr_mode, regno, offset, fn);
8229 offset -= GET_MODE_SIZE (gpr_mode);
8232 /* This loop must iterate over the same space as its companion in
8233 compute_frame_size. */
8234 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
8235 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
8236 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
8237 regno >= FP_REG_FIRST;
8238 regno -= MAX_FPRS_PER_FMT)
8239 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
8241 mips_save_restore_reg (fpr_mode, regno, offset, fn);
8242 offset -= GET_MODE_SIZE (fpr_mode);
8246 /* If we're generating n32 or n64 abicalls, and the current function
8247 does not use $28 as its global pointer, emit a cplocal directive.
8248 Use pic_offset_table_rtx as the argument to the directive. */
8251 mips_output_cplocal (void)
8253 if (!TARGET_EXPLICIT_RELOCS
8254 && cfun->machine->global_pointer > 0
8255 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
8256 output_asm_insn (".cplocal %+", 0);
8259 /* Set up the stack and frame (if desired) for the function. */
8262 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8265 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
8267 #ifdef SDB_DEBUGGING_INFO
8268 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
8269 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
8272 /* In mips16 mode, we may need to generate a 32 bit to handle
8273 floating point arguments. The linker will arrange for any 32-bit
8274 functions to call this stub, which will then jump to the 16-bit
8277 && TARGET_HARD_FLOAT_ABI
8278 && current_function_args_info.fp_code != 0)
8279 build_mips16_function_stub (file);
8281 /* Select the mips16 mode for this function. */
8283 fprintf (file, "\t.set\tmips16\n");
8285 fprintf (file, "\t.set\tnomips16\n");
8287 if (!FUNCTION_NAME_ALREADY_DECLARED)
8289 /* Get the function name the same way that toplev.c does before calling
8290 assemble_start_function. This is needed so that the name used here
8291 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8292 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8294 if (!flag_inhibit_size_directive)
8296 fputs ("\t.ent\t", file);
8297 assemble_name (file, fnname);
8301 assemble_name (file, fnname);
8302 fputs (":\n", file);
8305 /* Stop mips_file_end from treating this function as external. */
8306 if (TARGET_IRIX && mips_abi == ABI_32)
8307 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
8309 if (!flag_inhibit_size_directive)
8311 /* .frame FRAMEREG, FRAMESIZE, RETREG */
8313 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
8314 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
8315 ", args= " HOST_WIDE_INT_PRINT_DEC
8316 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
8317 (reg_names[(frame_pointer_needed)
8318 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
8319 ((frame_pointer_needed && TARGET_MIPS16)
8320 ? tsize - cfun->machine->frame.args_size
8322 reg_names[GP_REG_FIRST + 31],
8323 cfun->machine->frame.var_size,
8324 cfun->machine->frame.num_gp,
8325 cfun->machine->frame.num_fp,
8326 cfun->machine->frame.args_size,
8327 cfun->machine->frame.cprestore_size);
8329 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
8330 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
8331 cfun->machine->frame.mask,
8332 cfun->machine->frame.gp_save_offset);
8333 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
8334 cfun->machine->frame.fmask,
8335 cfun->machine->frame.fp_save_offset);
8338 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
8339 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
8342 if (mips_current_loadgp_style () == LOADGP_OLDABI)
8344 /* Handle the initialization of $gp for SVR4 PIC. */
8345 if (!cfun->machine->all_noreorder_p)
8346 output_asm_insn ("%(.cpload\t%^%)", 0);
8348 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
8350 else if (cfun->machine->all_noreorder_p)
8351 output_asm_insn ("%(%<", 0);
8353 /* Tell the assembler which register we're using as the global
8354 pointer. This is needed for thunks, since they can use either
8355 explicit relocs or assembler macros. */
8356 mips_output_cplocal ();
8359 /* Do any necessary cleanup after a function to restore stack, frame,
8362 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
8365 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
8366 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8368 /* Reinstate the normal $gp. */
8369 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
8370 mips_output_cplocal ();
8372 if (cfun->machine->all_noreorder_p)
8374 /* Avoid using %>%) since it adds excess whitespace. */
8375 output_asm_insn (".set\tmacro", 0);
8376 output_asm_insn (".set\treorder", 0);
8377 set_noreorder = set_nomacro = 0;
8380 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
8384 /* Get the function name the same way that toplev.c does before calling
8385 assemble_start_function. This is needed so that the name used here
8386 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8387 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8388 fputs ("\t.end\t", file);
8389 assemble_name (file, fnname);
8394 /* Save register REG to MEM. Make the instruction frame-related. */
8397 mips_save_reg (rtx reg, rtx mem)
8399 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
8403 if (mips_split_64bit_move_p (mem, reg))
8404 mips_split_doubleword_move (mem, reg);
8406 mips_emit_move (mem, reg);
8408 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
8409 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
8410 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
8415 && REGNO (reg) != GP_REG_FIRST + 31
8416 && !M16_REG_P (REGNO (reg)))
8418 /* Save a non-mips16 register by moving it through a temporary.
8419 We don't need to do this for $31 since there's a special
8420 instruction for it. */
8421 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
8422 mips_emit_move (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
8425 mips_emit_move (mem, reg);
8427 mips_set_frame_expr (mips_frame_set (mem, reg));
8431 /* The __gnu_local_gp symbol. */
8433 static GTY(()) rtx mips_gnu_local_gp;
8435 /* If we're generating n32 or n64 abicalls, emit instructions
8436 to set up the global pointer. */
8439 mips_emit_loadgp (void)
8441 rtx addr, offset, incoming_address, base, index;
8443 switch (mips_current_loadgp_style ())
8445 case LOADGP_ABSOLUTE:
8446 if (mips_gnu_local_gp == NULL)
8448 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
8449 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
8451 emit_insn (gen_loadgp_absolute (mips_gnu_local_gp));
8455 addr = XEXP (DECL_RTL (current_function_decl), 0);
8456 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
8457 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
8458 emit_insn (gen_loadgp_newabi (offset, incoming_address));
8459 if (!TARGET_EXPLICIT_RELOCS)
8460 emit_insn (gen_loadgp_blockage ());
8464 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
8465 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
8466 emit_insn (gen_loadgp_rtp (base, index));
8467 if (!TARGET_EXPLICIT_RELOCS)
8468 emit_insn (gen_loadgp_blockage ());
8476 /* Expand the prologue into a bunch of separate insns. */
8479 mips_expand_prologue (void)
8485 if (cfun->machine->global_pointer > 0)
8486 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8488 size = compute_frame_size (get_frame_size ());
8490 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
8491 bytes beforehand; this is enough to cover the register save area
8492 without going out of range. */
8493 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
8495 HOST_WIDE_INT step1;
8497 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
8499 if (GENERATE_MIPS16E_SAVE_RESTORE)
8501 HOST_WIDE_INT offset;
8502 unsigned int mask, regno;
8504 /* Try to merge argument stores into the save instruction. */
8505 nargs = mips16e_collect_argument_saves ();
8507 /* Build the save instruction. */
8508 mask = cfun->machine->frame.mask;
8509 insn = mips16e_build_save_restore (false, &mask, &offset,
8511 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8514 /* Check if we need to save other registers. */
8515 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8516 if (BITSET_P (mask, regno - GP_REG_FIRST))
8518 offset -= GET_MODE_SIZE (gpr_mode);
8519 mips_save_restore_reg (gpr_mode, regno, offset, mips_save_reg);
8524 insn = gen_add3_insn (stack_pointer_rtx,
8527 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8529 mips_for_each_saved_reg (size, mips_save_reg);
8533 /* Allocate the rest of the frame. */
8536 if (SMALL_OPERAND (-size))
8537 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
8539 GEN_INT (-size)))) = 1;
8542 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
8545 /* There are no instructions to add or subtract registers
8546 from the stack pointer, so use the frame pointer as a
8547 temporary. We should always be using a frame pointer
8548 in this case anyway. */
8549 gcc_assert (frame_pointer_needed);
8550 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8551 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
8552 hard_frame_pointer_rtx,
8553 MIPS_PROLOGUE_TEMP (Pmode)));
8554 mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
8557 emit_insn (gen_sub3_insn (stack_pointer_rtx,
8559 MIPS_PROLOGUE_TEMP (Pmode)));
8561 /* Describe the combined effect of the previous instructions. */
8563 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8564 plus_constant (stack_pointer_rtx, -size)));
8568 /* Set up the frame pointer, if we're using one. In mips16 code,
8569 we point the frame pointer ahead of the outgoing argument area.
8570 This should allow more variables & incoming arguments to be
8571 accessed with unextended instructions. */
8572 if (frame_pointer_needed)
8574 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
8576 rtx offset = GEN_INT (cfun->machine->frame.args_size);
8577 if (SMALL_OPERAND (cfun->machine->frame.args_size))
8579 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
8584 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), offset);
8585 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8586 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
8587 hard_frame_pointer_rtx,
8588 MIPS_PROLOGUE_TEMP (Pmode)));
8590 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
8591 plus_constant (stack_pointer_rtx,
8592 cfun->machine->frame.args_size)));
8596 RTX_FRAME_RELATED_P (mips_emit_move (hard_frame_pointer_rtx,
8597 stack_pointer_rtx)) = 1;
8600 mips_emit_loadgp ();
8602 /* If generating o32/o64 abicalls, save $gp on the stack. */
8603 if (TARGET_ABICALLS && TARGET_OLDABI && !current_function_is_leaf)
8604 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
8606 /* If we are profiling, make sure no instructions are scheduled before
8607 the call to mcount. */
8609 if (current_function_profile)
8610 emit_insn (gen_blockage ());
8613 /* Emit instructions to restore register REG from slot MEM. */
8616 mips_restore_reg (rtx reg, rtx mem)
8618 /* There's no mips16 instruction to load $31 directly. Load into
8619 $7 instead and adjust the return insn appropriately. */
8620 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
8621 reg = gen_rtx_REG (GET_MODE (reg), 7);
8623 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
8625 /* Can't restore directly; move through a temporary. */
8626 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
8627 mips_emit_move (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
8630 mips_emit_move (reg, mem);
8634 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
8635 if this epilogue precedes a sibling call, false if it is for a normal
8636 "epilogue" pattern. */
8639 mips_expand_epilogue (int sibcall_p)
8641 HOST_WIDE_INT step1, step2;
8644 if (!sibcall_p && mips_can_use_return_insn ())
8646 emit_jump_insn (gen_return ());
8650 /* In mips16 mode, if the return value should go into a floating-point
8651 register, we need to call a helper routine to copy it over. */
8652 if (mips16_cfun_returns_in_fpr_p ())
8661 enum machine_mode return_mode;
8663 return_type = DECL_RESULT (current_function_decl);
8664 return_mode = DECL_MODE (return_type);
8666 name = ACONCAT (("__mips16_ret_",
8667 mips16_call_stub_mode_suffix (return_mode),
8669 id = get_identifier (name);
8670 func = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
8671 retval = gen_rtx_REG (return_mode, GP_RETURN);
8672 call = gen_call_value_internal (retval, func, const0_rtx);
8673 insn = emit_call_insn (call);
8674 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
8677 /* Split the frame into two. STEP1 is the amount of stack we should
8678 deallocate before restoring the registers. STEP2 is the amount we
8679 should deallocate afterwards.
8681 Start off by assuming that no registers need to be restored. */
8682 step1 = cfun->machine->frame.total_size;
8685 /* Work out which register holds the frame address. Account for the
8686 frame pointer offset used by mips16 code. */
8687 if (!frame_pointer_needed)
8688 base = stack_pointer_rtx;
8691 base = hard_frame_pointer_rtx;
8693 step1 -= cfun->machine->frame.args_size;
8696 /* If we need to restore registers, deallocate as much stack as
8697 possible in the second step without going out of range. */
8698 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
8700 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
8704 /* Set TARGET to BASE + STEP1. */
8710 /* Get an rtx for STEP1 that we can add to BASE. */
8711 adjust = GEN_INT (step1);
8712 if (!SMALL_OPERAND (step1))
8714 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
8715 adjust = MIPS_EPILOGUE_TEMP (Pmode);
8718 /* Normal mode code can copy the result straight into $sp. */
8720 target = stack_pointer_rtx;
8722 emit_insn (gen_add3_insn (target, base, adjust));
8725 /* Copy TARGET into the stack pointer. */
8726 if (target != stack_pointer_rtx)
8727 mips_emit_move (stack_pointer_rtx, target);
8729 /* If we're using addressing macros, $gp is implicitly used by all
8730 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
8732 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
8733 emit_insn (gen_blockage ());
8735 if (GENERATE_MIPS16E_SAVE_RESTORE && cfun->machine->frame.mask != 0)
8737 unsigned int regno, mask;
8738 HOST_WIDE_INT offset;
8741 /* Generate the restore instruction. */
8742 mask = cfun->machine->frame.mask;
8743 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
8745 /* Restore any other registers manually. */
8746 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8747 if (BITSET_P (mask, regno - GP_REG_FIRST))
8749 offset -= GET_MODE_SIZE (gpr_mode);
8750 mips_save_restore_reg (gpr_mode, regno, offset, mips_restore_reg);
8753 /* Restore the remaining registers and deallocate the final bit
8755 emit_insn (restore);
8759 /* Restore the registers. */
8760 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
8763 /* Deallocate the final bit of the frame. */
8765 emit_insn (gen_add3_insn (stack_pointer_rtx,
8770 /* Add in the __builtin_eh_return stack adjustment. We need to
8771 use a temporary in mips16 code. */
8772 if (current_function_calls_eh_return)
8776 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
8777 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
8778 MIPS_EPILOGUE_TEMP (Pmode),
8779 EH_RETURN_STACKADJ_RTX));
8780 mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
8783 emit_insn (gen_add3_insn (stack_pointer_rtx,
8785 EH_RETURN_STACKADJ_RTX));
8790 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
8791 path will restore the return address into $7 rather than $31. */
8793 && !GENERATE_MIPS16E_SAVE_RESTORE
8794 && (cfun->machine->frame.mask & RA_MASK) != 0)
8795 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8796 GP_REG_FIRST + 7)));
8798 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8799 GP_REG_FIRST + 31)));
8803 /* Return nonzero if this function is known to have a null epilogue.
8804 This allows the optimizer to omit jumps to jumps if no stack
8808 mips_can_use_return_insn (void)
8810 if (! reload_completed)
8813 if (df_regs_ever_live_p (31) || current_function_profile)
8816 /* In mips16 mode, a function that returns a floating point value
8817 needs to arrange to copy the return value into the floating point
8819 if (mips16_cfun_returns_in_fpr_p ())
8822 if (cfun->machine->frame.initialized)
8823 return cfun->machine->frame.total_size == 0;
8825 return compute_frame_size (get_frame_size ()) == 0;
8828 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
8829 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
8830 they only hold condition code modes, and CCmode is always considered to
8831 be 4 bytes wide. All other registers are word sized. */
8834 mips_hard_regno_nregs (int regno, enum machine_mode mode)
8836 if (ST_REG_P (regno))
8837 return ((GET_MODE_SIZE (mode) + 3) / 4);
8838 else if (! FP_REG_P (regno))
8839 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
8841 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
8844 /* Implement CLASS_MAX_NREGS.
8846 - UNITS_PER_FPREG controls the number of registers needed by FP_REGS.
8848 - ST_REGS are always hold CCmode values, and CCmode values are
8849 considered to be 4 bytes wide.
8851 All other register classes are covered by UNITS_PER_WORD. Note that
8852 this is true even for unions of integer and float registers when the
8853 latter are smaller than the former. The only supported combination
8854 in which case this occurs is -mgp64 -msingle-float, which has 64-bit
8855 words but 32-bit float registers. A word-based calculation is correct
8856 in that case since -msingle-float disallows multi-FPR values. */
8859 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
8860 enum machine_mode mode)
8862 if (class == ST_REGS)
8863 return (GET_MODE_SIZE (mode) + 3) / 4;
8864 else if (class == FP_REGS)
8865 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
8867 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
8870 /* Return true if registers of class CLASS cannot change from mode FROM
8874 mips_cannot_change_mode_class (enum machine_mode from ATTRIBUTE_UNUSED,
8875 enum machine_mode to ATTRIBUTE_UNUSED,
8876 enum reg_class class)
8878 /* There are several problems with changing the modes of values
8879 in floating-point registers:
8881 - When a multi-word value is stored in paired floating-point
8882 registers, the first register always holds the low word.
8883 We therefore can't allow FPRs to change between single-word
8884 and multi-word modes on big-endian targets.
8886 - GCC assumes that each word of a multiword register can be accessed
8887 individually using SUBREGs. This is not true for floating-point
8888 registers if they are bigger than a word.
8890 - Loading a 32-bit value into a 64-bit floating-point register
8891 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
8892 We can't allow FPRs to change from SImode to to a wider mode on
8895 - If the FPU has already interpreted a value in one format, we must
8896 not ask it to treat the value as having a different format.
8898 We therefore only allow changes between 4-byte and smaller integer
8899 values, all of which have the "W" format as far as the FPU is
8901 return (reg_classes_intersect_p (FP_REGS, class)
8902 && (GET_MODE_CLASS (from) != MODE_INT
8903 || GET_MODE_CLASS (to) != MODE_INT
8904 || GET_MODE_SIZE (from) > 4
8905 || GET_MODE_SIZE (to) > 4));
8908 /* Return true if moves in mode MODE can use the FPU's mov.fmt instruction. */
8911 mips_mode_ok_for_mov_fmt_p (enum machine_mode mode)
8916 return TARGET_HARD_FLOAT;
8919 return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT;
8922 return TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT;
8929 /* Implement PREFERRED_RELOAD_CLASS. */
8932 mips_preferred_reload_class (rtx x, enum reg_class class)
8934 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
8937 if (reg_class_subset_p (FP_REGS, class)
8938 && mips_mode_ok_for_mov_fmt_p (GET_MODE (x)))
8941 if (reg_class_subset_p (GR_REGS, class))
8944 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
8950 /* Return a number assessing the cost of moving a register in class
8951 FROM to class TO. The classes are expressed using the enumeration
8952 values such as `GENERAL_REGS'. A value of 2 is the default; other
8953 values are interpreted relative to that.
8955 It is not required that the cost always equal 2 when FROM is the
8956 same as TO; on some machines it is expensive to move between
8957 registers if they are not general registers.
8959 If reload sees an insn consisting of a single `set' between two
8960 hard registers, and if `REGISTER_MOVE_COST' applied to their
8961 classes returns a value of 2, reload does not check to ensure that
8962 the constraints of the insn are met. Setting a cost of other than
8963 2 will allow reload to verify that the constraints are met. You
8964 should do this if the `movM' pattern's constraints do not allow
8967 ??? We make the cost of moving from HI/LO into general
8968 registers the same as for one of moving general registers to
8969 HI/LO for TARGET_MIPS16 in order to prevent allocating a
8970 pseudo to HI/LO. This might hurt optimizations though, it
8971 isn't clear if it is wise. And it might not work in all cases. We
8972 could solve the DImode LO reg problem by using a multiply, just
8973 like reload_{in,out}si. We could solve the SImode/HImode HI reg
8974 problem by using divide instructions. divu puts the remainder in
8975 the HI reg, so doing a divide by -1 will move the value in the HI
8976 reg for all values except -1. We could handle that case by using a
8977 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
8978 a compare/branch to test the input value to see which instruction
8979 we need to use. This gets pretty messy, but it is feasible. */
8982 mips_register_move_cost (enum machine_mode mode,
8983 enum reg_class to, enum reg_class from)
8987 if (reg_class_subset_p (from, GENERAL_REGS)
8988 && reg_class_subset_p (to, GENERAL_REGS))
8990 if (reg_class_subset_p (from, M16_REGS)
8991 || reg_class_subset_p (to, M16_REGS))
8997 else if (reg_class_subset_p (from, GENERAL_REGS))
8999 if (reg_class_subset_p (to, GENERAL_REGS))
9001 if (reg_class_subset_p (to, FP_REGS))
9003 if (reg_class_subset_p (to, ALL_COP_AND_GR_REGS))
9005 if (reg_class_subset_p (to, ACC_REGS))
9008 else if (reg_class_subset_p (to, GENERAL_REGS))
9010 if (reg_class_subset_p (from, FP_REGS))
9012 if (reg_class_subset_p (from, ST_REGS))
9013 /* LUI followed by MOVF. */
9015 if (reg_class_subset_p (from, ALL_COP_AND_GR_REGS))
9017 if (reg_class_subset_p (from, ACC_REGS))
9020 else if (reg_class_subset_p (from, FP_REGS))
9022 if (reg_class_subset_p (to, FP_REGS)
9023 && mips_mode_ok_for_mov_fmt_p (mode))
9025 if (reg_class_subset_p (to, ST_REGS))
9026 /* An expensive sequence. */
9033 /* This function returns the register class required for a secondary
9034 register when copying between one of the registers in CLASS, and X,
9035 using MODE. If IN_P is nonzero, the copy is going from X to the
9036 register, otherwise the register is the source. A return value of
9037 NO_REGS means that no secondary register is required. */
9040 mips_secondary_reload_class (enum reg_class class,
9041 enum machine_mode mode, rtx x, int in_p)
9045 /* If X is a constant that cannot be loaded into $25, it must be loaded
9046 into some other GPR. No other register class allows a direct move. */
9047 if (mips_dangerous_for_la25_p (x))
9048 return reg_class_subset_p (class, LEA_REGS) ? NO_REGS : LEA_REGS;
9050 regno = true_regnum (x);
9053 /* In MIPS16 mode, every move must involve a member of M16_REGS. */
9054 if (!reg_class_subset_p (class, M16_REGS) && !M16_REG_P (regno))
9057 /* We can't really copy to HI or LO at all in MIPS16 mode. */
9058 if (in_p ? reg_classes_intersect_p (class, ACC_REGS) : ACC_REG_P (regno))
9064 /* Copying from accumulator registers to anywhere other than a general
9065 register requires a temporary general register. */
9066 if (reg_class_subset_p (class, ACC_REGS))
9067 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
9068 if (ACC_REG_P (regno))
9069 return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
9071 /* We can only copy a value to a condition code register from a
9072 floating point register, and even then we require a scratch
9073 floating point register. We can only copy a value out of a
9074 condition code register into a general register. */
9075 if (reg_class_subset_p (class, ST_REGS))
9079 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
9081 if (ST_REG_P (regno))
9085 return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
9088 if (reg_class_subset_p (class, FP_REGS))
9091 && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))
9092 /* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use
9093 pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */
9096 if (GP_REG_P (regno) || x == CONST0_RTX (mode))
9097 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
9100 if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (x))
9101 /* We can force the constant to memory and use lwc1
9102 and ldc1. As above, we will use pairs of lwc1s if
9103 ldc1 is not supported. */
9106 if (FP_REG_P (regno) && mips_mode_ok_for_mov_fmt_p (mode))
9107 /* In this case we can use mov.fmt. */
9110 /* Otherwise, we need to reload through an integer register. */
9113 if (FP_REG_P (regno))
9114 return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
9119 /* SImode values are represented as sign-extended to DImode. */
9122 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
9124 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
9131 mips_valid_pointer_mode (enum machine_mode mode)
9133 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9136 /* Target hook for vector_mode_supported_p. */
9139 mips_vector_mode_supported_p (enum machine_mode mode)
9144 return TARGET_PAIRED_SINGLE_FLOAT;
9161 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
9164 mips_scalar_mode_supported_p (enum machine_mode mode)
9166 if (ALL_FIXED_POINT_MODE_P (mode)
9167 && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
9170 return default_scalar_mode_supported_p (mode);
9172 /* This function does three things:
9174 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
9175 - Register the mips16 hardware floating point stubs.
9176 - Register the gofast functions if selected using --enable-gofast. */
9178 #include "config/gofast.h"
9181 mips_init_libfuncs (void)
9183 if (TARGET_FIX_VR4120)
9185 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
9186 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
9189 if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
9191 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
9192 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
9193 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
9194 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
9196 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
9197 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
9198 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
9199 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
9200 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
9201 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
9202 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
9204 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
9205 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
9206 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
9208 if (TARGET_DOUBLE_FLOAT)
9210 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
9211 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
9212 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
9213 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
9215 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
9216 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
9217 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
9218 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
9219 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
9220 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
9221 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
9223 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
9224 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
9226 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
9227 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
9228 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__mips16_floatunsidf");
9232 gofast_maybe_init_libfuncs ();
9235 /* Return the length of INSN. LENGTH is the initial length computed by
9236 attributes in the machine-description file. */
9239 mips_adjust_insn_length (rtx insn, int length)
9241 /* A unconditional jump has an unfilled delay slot if it is not part
9242 of a sequence. A conditional jump normally has a delay slot, but
9243 does not on MIPS16. */
9244 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
9247 /* See how many nops might be needed to avoid hardware hazards. */
9248 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
9249 switch (get_attr_hazard (insn))
9263 /* All MIPS16 instructions are a measly two bytes. */
9271 /* Return an asm sequence to start a noat block and load the address
9272 of a label into $1. */
9275 mips_output_load_label (void)
9277 if (TARGET_EXPLICIT_RELOCS)
9281 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
9284 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
9287 if (ISA_HAS_LOAD_DELAY)
9288 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
9289 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
9293 if (Pmode == DImode)
9294 return "%[dla\t%@,%0";
9296 return "%[la\t%@,%0";
9300 /* Return the assembly code for INSN, which has the operands given by
9301 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
9302 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
9303 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
9304 version of BRANCH_IF_TRUE. */
9307 mips_output_conditional_branch (rtx insn, rtx *operands,
9308 const char *branch_if_true,
9309 const char *branch_if_false)
9311 unsigned int length;
9312 rtx taken, not_taken;
9314 length = get_attr_length (insn);
9317 /* Just a simple conditional branch. */
9318 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
9319 return branch_if_true;
9322 /* Generate a reversed branch around a direct jump. This fallback does
9323 not use branch-likely instructions. */
9324 mips_branch_likely = false;
9325 not_taken = gen_label_rtx ();
9326 taken = operands[1];
9328 /* Generate the reversed branch to NOT_TAKEN. */
9329 operands[1] = not_taken;
9330 output_asm_insn (branch_if_false, operands);
9332 /* If INSN has a delay slot, we must provide delay slots for both the
9333 branch to NOT_TAKEN and the conditional jump. We must also ensure
9334 that INSN's delay slot is executed in the appropriate cases. */
9337 /* This first delay slot will always be executed, so use INSN's
9338 delay slot if is not annulled. */
9339 if (!INSN_ANNULLED_BRANCH_P (insn))
9341 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9342 asm_out_file, optimize, 1, NULL);
9343 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9346 output_asm_insn ("nop", 0);
9347 fprintf (asm_out_file, "\n");
9350 /* Output the unconditional branch to TAKEN. */
9352 output_asm_insn ("j\t%0%/", &taken);
9355 output_asm_insn (mips_output_load_label (), &taken);
9356 output_asm_insn ("jr\t%@%]%/", 0);
9359 /* Now deal with its delay slot; see above. */
9362 /* This delay slot will only be executed if the branch is taken.
9363 Use INSN's delay slot if is annulled. */
9364 if (INSN_ANNULLED_BRANCH_P (insn))
9366 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9367 asm_out_file, optimize, 1, NULL);
9368 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9371 output_asm_insn ("nop", 0);
9372 fprintf (asm_out_file, "\n");
9375 /* Output NOT_TAKEN. */
9376 (*targetm.asm_out.internal_label) (asm_out_file, "L",
9377 CODE_LABEL_NUMBER (not_taken));
9381 /* Return the assembly code for INSN, which branches to OPERANDS[1]
9382 if some ordered condition is true. The condition is given by
9383 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
9384 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
9385 its second is always zero. */
9388 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
9390 const char *branch[2];
9392 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
9393 Make BRANCH[0] branch on the inverse condition. */
9394 switch (GET_CODE (operands[0]))
9396 /* These cases are equivalent to comparisons against zero. */
9398 inverted_p = !inverted_p;
9401 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
9402 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
9405 /* These cases are always true or always false. */
9407 inverted_p = !inverted_p;
9410 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
9411 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
9415 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
9416 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
9419 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
9422 /* Used to output div or ddiv instruction DIVISION, which has the operands
9423 given by OPERANDS. Add in a divide-by-zero check if needed.
9425 When working around R4000 and R4400 errata, we need to make sure that
9426 the division is not immediately followed by a shift[1][2]. We also
9427 need to stop the division from being put into a branch delay slot[3].
9428 The easiest way to avoid both problems is to add a nop after the
9429 division. When a divide-by-zero check is needed, this nop can be
9430 used to fill the branch delay slot.
9432 [1] If a double-word or a variable shift executes immediately
9433 after starting an integer division, the shift may give an
9434 incorrect result. See quotations of errata #16 and #28 from
9435 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9436 in mips.md for details.
9438 [2] A similar bug to [1] exists for all revisions of the
9439 R4000 and the R4400 when run in an MC configuration.
9440 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
9442 "19. In this following sequence:
9444 ddiv (or ddivu or div or divu)
9445 dsll32 (or dsrl32, dsra32)
9447 if an MPT stall occurs, while the divide is slipping the cpu
9448 pipeline, then the following double shift would end up with an
9451 Workaround: The compiler needs to avoid generating any
9452 sequence with divide followed by extended double shift."
9454 This erratum is also present in "MIPS R4400MC Errata, Processor
9455 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
9456 & 3.0" as errata #10 and #4, respectively.
9458 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9459 (also valid for MIPS R4000MC processors):
9461 "52. R4000SC: This bug does not apply for the R4000PC.
9463 There are two flavors of this bug:
9465 1) If the instruction just after divide takes an RF exception
9466 (tlb-refill, tlb-invalid) and gets an instruction cache
9467 miss (both primary and secondary) and the line which is
9468 currently in secondary cache at this index had the first
9469 data word, where the bits 5..2 are set, then R4000 would
9470 get a wrong result for the div.
9475 ------------------- # end-of page. -tlb-refill
9480 ------------------- # end-of page. -tlb-invalid
9483 2) If the divide is in the taken branch delay slot, where the
9484 target takes RF exception and gets an I-cache miss for the
9485 exception vector or where I-cache miss occurs for the
9486 target address, under the above mentioned scenarios, the
9487 div would get wrong results.
9490 j r2 # to next page mapped or unmapped
9491 div r8,r9 # this bug would be there as long
9492 # as there is an ICache miss and
9493 nop # the "data pattern" is present
9496 beq r0, r0, NextPage # to Next page
9500 This bug is present for div, divu, ddiv, and ddivu
9503 Workaround: For item 1), OS could make sure that the next page
9504 after the divide instruction is also mapped. For item 2), the
9505 compiler could make sure that the divide instruction is not in
9506 the branch delay slot."
9508 These processors have PRId values of 0x00004220 and 0x00004300 for
9509 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
9512 mips_output_division (const char *division, rtx *operands)
9517 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
9519 output_asm_insn (s, operands);
9522 if (TARGET_CHECK_ZERO_DIV)
9526 output_asm_insn (s, operands);
9527 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
9529 else if (GENERATE_DIVIDE_TRAPS)
9531 output_asm_insn (s, operands);
9536 output_asm_insn ("%(bne\t%2,%.,1f", operands);
9537 output_asm_insn (s, operands);
9538 s = "break\t7%)\n1:";
9544 /* Return true if INSN is a multiply-add or multiply-subtract
9545 instruction and PREV assigns to the accumulator operand. */
9548 mips_linked_madd_p (rtx prev, rtx insn)
9552 x = single_set (insn);
9558 if (GET_CODE (x) == PLUS
9559 && GET_CODE (XEXP (x, 0)) == MULT
9560 && reg_set_p (XEXP (x, 1), prev))
9563 if (GET_CODE (x) == MINUS
9564 && GET_CODE (XEXP (x, 1)) == MULT
9565 && reg_set_p (XEXP (x, 0), prev))
9571 /* Implements a store data bypass check. We need this because the cprestore
9572 pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
9573 default routine to abort. We just return false for that case. */
9574 /* ??? Should try to give a better result here than assuming false. */
9577 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
9579 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
9582 return ! store_data_bypass_p (out_insn, in_insn);
9585 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
9586 dependencies have no cost, except on the 20Kc where output-dependence
9587 is treated like input-dependence. */
9590 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
9591 rtx dep ATTRIBUTE_UNUSED, int cost)
9593 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
9596 if (REG_NOTE_KIND (link) != 0)
9601 /* Return the number of instructions that can be issued per cycle. */
9604 mips_issue_rate (void)
9608 case PROCESSOR_74KC:
9609 case PROCESSOR_74KF2_1:
9610 case PROCESSOR_74KF1_1:
9611 case PROCESSOR_74KF3_2:
9612 /* The 74k is not strictly quad-issue cpu, but can be seen as one
9613 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
9614 but in reality only a maximum of 3 insns can be issued as the
9615 floating point load/stores also require a slot in the AGEN pipe. */
9618 case PROCESSOR_20KC:
9619 case PROCESSOR_R4130:
9620 case PROCESSOR_R5400:
9621 case PROCESSOR_R5500:
9622 case PROCESSOR_R7000:
9623 case PROCESSOR_R9000:
9627 case PROCESSOR_SB1A:
9628 /* This is actually 4, but we get better performance if we claim 3.
9629 This is partly because of unwanted speculative code motion with the
9630 larger number, and partly because in most common cases we can't
9631 reach the theoretical max of 4. */
9639 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
9640 be as wide as the scheduling freedom in the DFA. */
9643 mips_multipass_dfa_lookahead (void)
9645 /* Can schedule up to 4 of the 6 function units in any one cycle. */
9652 /* Remove the instruction at index LOWER from ready queue READY and
9653 reinsert it in front of the instruction at index HIGHER. LOWER must
9657 mips_promote_ready (rtx *ready, int lower, int higher)
9662 new_head = ready[lower];
9663 for (i = lower; i < higher; i++)
9664 ready[i] = ready[i + 1];
9665 ready[i] = new_head;
9668 /* If the priority of the instruction at POS2 in the ready queue READY
9669 is within LIMIT units of that of the instruction at POS1, swap the
9670 instructions if POS2 is not already less than POS1. */
9673 mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
9676 && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
9680 ready[pos1] = ready[pos2];
9685 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
9686 that may clobber hi or lo. */
9688 static rtx mips_macc_chains_last_hilo;
9690 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
9691 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
9694 mips_macc_chains_record (rtx insn)
9696 if (get_attr_may_clobber_hilo (insn))
9697 mips_macc_chains_last_hilo = insn;
9700 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
9701 has NREADY elements, looking for a multiply-add or multiply-subtract
9702 instruction that is cumulative with mips_macc_chains_last_hilo.
9703 If there is one, promote it ahead of anything else that might
9704 clobber hi or lo. */
9707 mips_macc_chains_reorder (rtx *ready, int nready)
9711 if (mips_macc_chains_last_hilo != 0)
9712 for (i = nready - 1; i >= 0; i--)
9713 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
9715 for (j = nready - 1; j > i; j--)
9716 if (recog_memoized (ready[j]) >= 0
9717 && get_attr_may_clobber_hilo (ready[j]))
9719 mips_promote_ready (ready, i, j);
9726 /* The last instruction to be scheduled. */
9728 static rtx vr4130_last_insn;
9730 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
9731 points to an rtx that is initially an instruction. Nullify the rtx
9732 if the instruction uses the value of register X. */
9735 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
9737 rtx *insn_ptr = data;
9740 && reg_referenced_p (x, PATTERN (*insn_ptr)))
9744 /* Return true if there is true register dependence between vr4130_last_insn
9748 vr4130_true_reg_dependence_p (rtx insn)
9750 note_stores (PATTERN (vr4130_last_insn),
9751 vr4130_true_reg_dependence_p_1, &insn);
9755 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
9756 the ready queue and that INSN2 is the instruction after it, return
9757 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
9758 in which INSN1 and INSN2 can probably issue in parallel, but for
9759 which (INSN2, INSN1) should be less sensitive to instruction
9760 alignment than (INSN1, INSN2). See 4130.md for more details. */
9763 vr4130_swap_insns_p (rtx insn1, rtx insn2)
9765 sd_iterator_def sd_it;
9768 /* Check for the following case:
9770 1) there is some other instruction X with an anti dependence on INSN1;
9771 2) X has a higher priority than INSN2; and
9772 3) X is an arithmetic instruction (and thus has no unit restrictions).
9774 If INSN1 is the last instruction blocking X, it would better to
9775 choose (INSN1, X) over (INSN2, INSN1). */
9776 FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
9777 if (DEP_TYPE (dep) == REG_DEP_ANTI
9778 && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
9779 && recog_memoized (DEP_CON (dep)) >= 0
9780 && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
9783 if (vr4130_last_insn != 0
9784 && recog_memoized (insn1) >= 0
9785 && recog_memoized (insn2) >= 0)
9787 /* See whether INSN1 and INSN2 use different execution units,
9788 or if they are both ALU-type instructions. If so, they can
9789 probably execute in parallel. */
9790 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
9791 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
9792 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
9794 /* If only one of the instructions has a dependence on
9795 vr4130_last_insn, prefer to schedule the other one first. */
9796 bool dep1 = vr4130_true_reg_dependence_p (insn1);
9797 bool dep2 = vr4130_true_reg_dependence_p (insn2);
9801 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
9802 is not an ALU-type instruction and if INSN1 uses the same
9803 execution unit. (Note that if this condition holds, we already
9804 know that INSN2 uses a different execution unit.) */
9805 if (class1 != VR4130_CLASS_ALU
9806 && recog_memoized (vr4130_last_insn) >= 0
9807 && class1 == get_attr_vr4130_class (vr4130_last_insn))
9814 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
9815 queue with at least two instructions. Swap the first two if
9816 vr4130_swap_insns_p says that it could be worthwhile. */
9819 vr4130_reorder (rtx *ready, int nready)
9821 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
9822 mips_promote_ready (ready, nready - 2, nready - 1);
9825 /* Record whether last 74k AGEN instruction was a load or store. */
9827 static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
9829 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
9830 resets to TYPE_UNKNOWN state. */
9833 mips_74k_agen_init (rtx insn)
9835 if (!insn || !NONJUMP_INSN_P (insn))
9836 mips_last_74k_agen_insn = TYPE_UNKNOWN;
9837 else if (USEFUL_INSN_P (insn))
9839 enum attr_type type = get_attr_type (insn);
9840 if (type == TYPE_LOAD || type == TYPE_STORE)
9841 mips_last_74k_agen_insn = type;
9845 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
9846 loads to be grouped together, and multiple stores to be grouped
9847 together. Swap things around in the ready queue to make this happen. */
9850 mips_74k_agen_reorder (rtx *ready, int nready)
9853 int store_pos, load_pos;
9858 for (i = nready - 1; i >= 0; i--)
9860 rtx insn = ready[i];
9861 if (USEFUL_INSN_P (insn))
9862 switch (get_attr_type (insn))
9865 if (store_pos == -1)
9879 if (load_pos == -1 || store_pos == -1)
9882 switch (mips_last_74k_agen_insn)
9885 /* Prefer to schedule loads since they have a higher latency. */
9887 /* Swap loads to the front of the queue. */
9888 mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
9891 /* Swap stores to the front of the queue. */
9892 mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
9899 /* Implement TARGET_SCHED_INIT. */
9902 mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9903 int max_ready ATTRIBUTE_UNUSED)
9905 mips_macc_chains_last_hilo = 0;
9906 vr4130_last_insn = 0;
9907 mips_74k_agen_init (NULL_RTX);
9910 /* Implement TARGET_SCHED_REORDER and TARG_SCHED_REORDER2. */
9913 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9914 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
9916 if (!reload_completed
9919 mips_macc_chains_reorder (ready, *nreadyp);
9920 if (reload_completed
9922 && !TARGET_VR4130_ALIGN
9924 vr4130_reorder (ready, *nreadyp);
9926 mips_74k_agen_reorder (ready, *nreadyp);
9927 return mips_issue_rate ();
9930 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
9933 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9937 mips_74k_agen_init (insn);
9938 switch (GET_CODE (PATTERN (insn)))
9942 /* Don't count USEs and CLOBBERs against the issue rate. */
9947 if (!reload_completed && TUNE_MACC_CHAINS)
9948 mips_macc_chains_record (insn);
9949 vr4130_last_insn = insn;
9955 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
9956 return the first operand of the associated "pref" or "prefx" insn. */
9959 mips_prefetch_cookie (rtx write, rtx locality)
9961 /* store_streamed / load_streamed. */
9962 if (INTVAL (locality) <= 0)
9963 return GEN_INT (INTVAL (write) + 4);
9966 if (INTVAL (locality) <= 2)
9969 /* store_retained / load_retained. */
9970 return GEN_INT (INTVAL (write) + 6);
9973 /* MIPS builtin function support. */
9975 struct builtin_description
9977 /* The code of the main .md file instruction. See mips_builtin_type
9978 for more information. */
9979 enum insn_code icode;
9981 /* The floating-point comparison code to use with ICODE, if any. */
9982 enum mips_fp_condition cond;
9984 /* The name of the builtin function. */
9987 /* Specifies how the function should be expanded. */
9988 enum mips_builtin_type builtin_type;
9990 /* The function's prototype. */
9991 enum mips_function_type function_type;
9993 /* The target flags required for this function. */
9997 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
9998 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
9999 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
10000 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
10001 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
10003 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
10005 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
10006 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
10007 "__builtin_mips_" #INSN "_" #COND "_s", \
10008 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
10009 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
10010 "__builtin_mips_" #INSN "_" #COND "_d", \
10011 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
10013 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
10014 The lower and upper forms require TARGET_FLAGS while the any and all
10015 forms require MASK_MIPS3D. */
10016 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
10017 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10018 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
10019 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
10020 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10021 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
10022 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
10023 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10024 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
10025 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
10026 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10027 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
10028 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
10030 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
10031 require MASK_MIPS3D. */
10032 #define CMP_4S_BUILTINS(INSN, COND) \
10033 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
10034 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
10035 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10037 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
10038 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
10039 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10042 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
10043 instruction requires TARGET_FLAGS. */
10044 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
10045 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10046 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
10047 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10049 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10050 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
10051 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10054 /* Define all the builtins related to c.cond.fmt condition COND. */
10055 #define CMP_BUILTINS(COND) \
10056 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
10057 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
10058 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
10059 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
10060 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
10061 CMP_4S_BUILTINS (c, COND), \
10062 CMP_4S_BUILTINS (cabs, COND)
10064 static const struct builtin_description mips_bdesc[] =
10066 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10067 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10068 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10069 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10070 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
10071 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10072 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10073 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10075 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
10076 MASK_PAIRED_SINGLE_FLOAT),
10077 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10078 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10079 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10080 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10082 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
10083 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
10084 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10085 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
10086 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
10087 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10089 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
10090 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
10091 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10092 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
10093 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
10094 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10096 MIPS_FP_CONDITIONS (CMP_BUILTINS)
10099 /* Builtin functions for the SB-1 processor. */
10101 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
10103 static const struct builtin_description sb1_bdesc[] =
10105 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
10108 /* Builtin functions for DSP ASE. */
10110 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
10111 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
10112 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
10113 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
10114 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
10116 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
10117 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
10118 builtin_description fields. */
10119 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
10120 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
10121 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
10123 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
10124 branch instruction. TARGET_FLAGS is a builtin_description field. */
10125 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
10126 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
10127 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
10129 static const struct builtin_description dsp_bdesc[] =
10131 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10132 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10133 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10134 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10135 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10136 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10137 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10138 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10139 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10140 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10141 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10142 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10143 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10144 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
10145 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
10146 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
10147 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
10148 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
10149 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
10150 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
10151 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
10152 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
10153 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10154 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10155 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10156 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10157 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10158 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10159 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10160 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10161 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
10162 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10163 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10164 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10165 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
10166 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10167 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10168 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10169 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
10170 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
10171 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10172 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
10173 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
10174 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
10175 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10176 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
10177 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
10178 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10179 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10180 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10181 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10182 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10183 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10184 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10185 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10186 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10187 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10188 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10189 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10190 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
10191 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
10192 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
10193 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
10194 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
10195 BPOSGE_BUILTIN (32, MASK_DSP),
10197 /* The following are for the MIPS DSP ASE REV 2. */
10198 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, MASK_DSPR2),
10199 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10200 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10201 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10202 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10203 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
10204 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
10205 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10206 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10207 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10208 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10209 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10210 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10211 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10212 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10213 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10214 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
10215 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
10216 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
10217 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
10218 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
10219 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSPR2),
10220 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10221 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10222 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10223 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10224 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10225 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10226 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10227 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10228 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10229 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10230 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10231 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2)
10234 static const struct builtin_description dsp_32only_bdesc[] =
10236 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10237 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10238 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10239 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10240 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10241 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10242 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10243 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
10244 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
10245 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10246 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10247 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10248 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10249 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10250 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10251 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10252 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10253 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10254 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10255 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
10256 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
10258 /* The following are for the MIPS DSP ASE REV 2. */
10259 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10260 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10261 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
10262 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
10263 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
10264 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
10265 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10266 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, MASK_DSPR2),
10267 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, MASK_DSPR2),
10268 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10269 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10270 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10271 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10272 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10273 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2)
10276 /* This helps provide a mapping from builtin function codes to bdesc
10281 /* The builtin function table that this entry describes. */
10282 const struct builtin_description *bdesc;
10284 /* The number of entries in the builtin function table. */
10287 /* The target processor that supports these builtin functions.
10288 PROCESSOR_MAX means we enable them for all processors. */
10289 enum processor_type proc;
10291 /* If the target has these flags, this builtin function table
10292 will not be supported. */
10293 int unsupported_target_flags;
10296 static const struct bdesc_map bdesc_arrays[] =
10298 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX, 0 },
10299 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1, 0 },
10300 { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX, 0 },
10301 { dsp_32only_bdesc, ARRAY_SIZE (dsp_32only_bdesc), PROCESSOR_MAX,
10305 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
10308 mips_init_builtins (void)
10310 const struct builtin_description *d;
10311 const struct bdesc_map *m;
10312 tree types[(int) MIPS_MAX_FTYPE_MAX];
10313 tree V2SF_type_node;
10314 tree V2HI_type_node;
10315 tree V4QI_type_node;
10316 unsigned int offset;
10318 /* We have only builtins for -mpaired-single, -mips3d and -mdsp. */
10319 if (!TARGET_PAIRED_SINGLE_FLOAT && !TARGET_DSP)
10322 if (TARGET_PAIRED_SINGLE_FLOAT)
10324 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
10326 types[MIPS_V2SF_FTYPE_V2SF]
10327 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
10329 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
10330 = build_function_type_list (V2SF_type_node,
10331 V2SF_type_node, V2SF_type_node, NULL_TREE);
10333 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
10334 = build_function_type_list (V2SF_type_node,
10335 V2SF_type_node, V2SF_type_node,
10336 integer_type_node, NULL_TREE);
10338 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
10339 = build_function_type_list (V2SF_type_node,
10340 V2SF_type_node, V2SF_type_node,
10341 V2SF_type_node, V2SF_type_node, NULL_TREE);
10343 types[MIPS_V2SF_FTYPE_SF_SF]
10344 = build_function_type_list (V2SF_type_node,
10345 float_type_node, float_type_node, NULL_TREE);
10347 types[MIPS_INT_FTYPE_V2SF_V2SF]
10348 = build_function_type_list (integer_type_node,
10349 V2SF_type_node, V2SF_type_node, NULL_TREE);
10351 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
10352 = build_function_type_list (integer_type_node,
10353 V2SF_type_node, V2SF_type_node,
10354 V2SF_type_node, V2SF_type_node, NULL_TREE);
10356 types[MIPS_INT_FTYPE_SF_SF]
10357 = build_function_type_list (integer_type_node,
10358 float_type_node, float_type_node, NULL_TREE);
10360 types[MIPS_INT_FTYPE_DF_DF]
10361 = build_function_type_list (integer_type_node,
10362 double_type_node, double_type_node, NULL_TREE);
10364 types[MIPS_SF_FTYPE_V2SF]
10365 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
10367 types[MIPS_SF_FTYPE_SF]
10368 = build_function_type_list (float_type_node,
10369 float_type_node, NULL_TREE);
10371 types[MIPS_SF_FTYPE_SF_SF]
10372 = build_function_type_list (float_type_node,
10373 float_type_node, float_type_node, NULL_TREE);
10375 types[MIPS_DF_FTYPE_DF]
10376 = build_function_type_list (double_type_node,
10377 double_type_node, NULL_TREE);
10379 types[MIPS_DF_FTYPE_DF_DF]
10380 = build_function_type_list (double_type_node,
10381 double_type_node, double_type_node, NULL_TREE);
10386 V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
10387 V4QI_type_node = build_vector_type_for_mode (intQI_type_node, V4QImode);
10389 types[MIPS_V2HI_FTYPE_V2HI_V2HI]
10390 = build_function_type_list (V2HI_type_node,
10391 V2HI_type_node, V2HI_type_node,
10394 types[MIPS_SI_FTYPE_SI_SI]
10395 = build_function_type_list (intSI_type_node,
10396 intSI_type_node, intSI_type_node,
10399 types[MIPS_V4QI_FTYPE_V4QI_V4QI]
10400 = build_function_type_list (V4QI_type_node,
10401 V4QI_type_node, V4QI_type_node,
10404 types[MIPS_SI_FTYPE_V4QI]
10405 = build_function_type_list (intSI_type_node,
10409 types[MIPS_V2HI_FTYPE_V2HI]
10410 = build_function_type_list (V2HI_type_node,
10414 types[MIPS_SI_FTYPE_SI]
10415 = build_function_type_list (intSI_type_node,
10419 types[MIPS_V4QI_FTYPE_V2HI_V2HI]
10420 = build_function_type_list (V4QI_type_node,
10421 V2HI_type_node, V2HI_type_node,
10424 types[MIPS_V2HI_FTYPE_SI_SI]
10425 = build_function_type_list (V2HI_type_node,
10426 intSI_type_node, intSI_type_node,
10429 types[MIPS_SI_FTYPE_V2HI]
10430 = build_function_type_list (intSI_type_node,
10434 types[MIPS_V2HI_FTYPE_V4QI]
10435 = build_function_type_list (V2HI_type_node,
10439 types[MIPS_V4QI_FTYPE_V4QI_SI]
10440 = build_function_type_list (V4QI_type_node,
10441 V4QI_type_node, intSI_type_node,
10444 types[MIPS_V2HI_FTYPE_V2HI_SI]
10445 = build_function_type_list (V2HI_type_node,
10446 V2HI_type_node, intSI_type_node,
10449 types[MIPS_V2HI_FTYPE_V4QI_V2HI]
10450 = build_function_type_list (V2HI_type_node,
10451 V4QI_type_node, V2HI_type_node,
10454 types[MIPS_SI_FTYPE_V2HI_V2HI]
10455 = build_function_type_list (intSI_type_node,
10456 V2HI_type_node, V2HI_type_node,
10459 types[MIPS_DI_FTYPE_DI_V4QI_V4QI]
10460 = build_function_type_list (intDI_type_node,
10461 intDI_type_node, V4QI_type_node, V4QI_type_node,
10464 types[MIPS_DI_FTYPE_DI_V2HI_V2HI]
10465 = build_function_type_list (intDI_type_node,
10466 intDI_type_node, V2HI_type_node, V2HI_type_node,
10469 types[MIPS_DI_FTYPE_DI_SI_SI]
10470 = build_function_type_list (intDI_type_node,
10471 intDI_type_node, intSI_type_node, intSI_type_node,
10474 types[MIPS_V4QI_FTYPE_SI]
10475 = build_function_type_list (V4QI_type_node,
10479 types[MIPS_V2HI_FTYPE_SI]
10480 = build_function_type_list (V2HI_type_node,
10484 types[MIPS_VOID_FTYPE_V4QI_V4QI]
10485 = build_function_type_list (void_type_node,
10486 V4QI_type_node, V4QI_type_node,
10489 types[MIPS_SI_FTYPE_V4QI_V4QI]
10490 = build_function_type_list (intSI_type_node,
10491 V4QI_type_node, V4QI_type_node,
10494 types[MIPS_VOID_FTYPE_V2HI_V2HI]
10495 = build_function_type_list (void_type_node,
10496 V2HI_type_node, V2HI_type_node,
10499 types[MIPS_SI_FTYPE_DI_SI]
10500 = build_function_type_list (intSI_type_node,
10501 intDI_type_node, intSI_type_node,
10504 types[MIPS_DI_FTYPE_DI_SI]
10505 = build_function_type_list (intDI_type_node,
10506 intDI_type_node, intSI_type_node,
10509 types[MIPS_VOID_FTYPE_SI_SI]
10510 = build_function_type_list (void_type_node,
10511 intSI_type_node, intSI_type_node,
10514 types[MIPS_SI_FTYPE_PTR_SI]
10515 = build_function_type_list (intSI_type_node,
10516 ptr_type_node, intSI_type_node,
10519 types[MIPS_SI_FTYPE_VOID]
10520 = build_function_type (intSI_type_node, void_list_node);
10524 types[MIPS_V4QI_FTYPE_V4QI]
10525 = build_function_type_list (V4QI_type_node,
10529 types[MIPS_SI_FTYPE_SI_SI_SI]
10530 = build_function_type_list (intSI_type_node,
10531 intSI_type_node, intSI_type_node,
10532 intSI_type_node, NULL_TREE);
10534 types[MIPS_DI_FTYPE_DI_USI_USI]
10535 = build_function_type_list (intDI_type_node,
10537 unsigned_intSI_type_node,
10538 unsigned_intSI_type_node, NULL_TREE);
10540 types[MIPS_DI_FTYPE_SI_SI]
10541 = build_function_type_list (intDI_type_node,
10542 intSI_type_node, intSI_type_node,
10545 types[MIPS_DI_FTYPE_USI_USI]
10546 = build_function_type_list (intDI_type_node,
10547 unsigned_intSI_type_node,
10548 unsigned_intSI_type_node, NULL_TREE);
10550 types[MIPS_V2HI_FTYPE_SI_SI_SI]
10551 = build_function_type_list (V2HI_type_node,
10552 intSI_type_node, intSI_type_node,
10553 intSI_type_node, NULL_TREE);
10558 /* Iterate through all of the bdesc arrays, initializing all of the
10559 builtin functions. */
10562 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
10564 if ((m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
10565 && (m->unsupported_target_flags & target_flags) == 0)
10566 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
10567 if ((d->target_flags & target_flags) == d->target_flags)
10568 add_builtin_function (d->name, types[d->function_type],
10569 d - m->bdesc + offset,
10570 BUILT_IN_MD, NULL, NULL);
10575 /* Take the argument ARGNUM of the arglist of EXP and convert it into a form
10576 suitable for input operand OP of instruction ICODE. Return the value. */
10579 mips_prepare_builtin_arg (enum insn_code icode,
10580 unsigned int op, tree exp, unsigned int argnum)
10583 enum machine_mode mode;
10585 value = expand_normal (CALL_EXPR_ARG (exp, argnum));
10586 mode = insn_data[icode].operand[op].mode;
10587 if (!insn_data[icode].operand[op].predicate (value, mode))
10589 value = copy_to_mode_reg (mode, value);
10590 /* Check the predicate again. */
10591 if (!insn_data[icode].operand[op].predicate (value, mode))
10593 error ("invalid argument to builtin function");
10601 /* Return an rtx suitable for output operand OP of instruction ICODE.
10602 If TARGET is non-null, try to use it where possible. */
10605 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
10607 enum machine_mode mode;
10609 mode = insn_data[icode].operand[op].mode;
10610 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
10611 target = gen_reg_rtx (mode);
10616 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
10617 .md pattern and CALL is the function expr with arguments. TARGET,
10618 if nonnull, suggests a good place to put the result.
10619 HAS_TARGET indicates the function must return something. */
10622 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
10625 rtx ops[MAX_RECOG_OPERANDS];
10631 /* We save target to ops[0]. */
10632 ops[0] = mips_prepare_builtin_target (icode, 0, target);
10636 /* We need to test if the arglist is not zero. Some instructions have extra
10637 clobber registers. */
10638 for (; i < insn_data[icode].n_operands && i <= call_expr_nargs (exp); i++, j++)
10639 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
10644 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
10648 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
10652 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
10656 gcc_unreachable ();
10661 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
10662 function (TYPE says which). EXP is the tree for the function
10663 function, ICODE is the instruction that should be used to compare
10664 the first two arguments, and COND is the condition it should test.
10665 TARGET, if nonnull, suggests a good place to put the result. */
10668 mips_expand_builtin_movtf (enum mips_builtin_type type,
10669 enum insn_code icode, enum mips_fp_condition cond,
10670 rtx target, tree exp)
10672 rtx cmp_result, op0, op1;
10674 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10675 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
10676 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
10677 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
10679 icode = CODE_FOR_mips_cond_move_tf_ps;
10680 target = mips_prepare_builtin_target (icode, 0, target);
10681 if (type == MIPS_BUILTIN_MOVT)
10683 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
10684 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
10688 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
10689 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
10691 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
10695 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
10696 into TARGET otherwise. Return TARGET. */
10699 mips_builtin_branch_and_move (rtx condition, rtx target,
10700 rtx value_if_true, rtx value_if_false)
10702 rtx true_label, done_label;
10704 true_label = gen_label_rtx ();
10705 done_label = gen_label_rtx ();
10707 /* First assume that CONDITION is false. */
10708 mips_emit_move (target, value_if_false);
10710 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
10711 emit_jump_insn (gen_condjump (condition, true_label));
10712 emit_jump_insn (gen_jump (done_label));
10715 /* Fix TARGET if CONDITION is true. */
10716 emit_label (true_label);
10717 mips_emit_move (target, value_if_true);
10719 emit_label (done_label);
10723 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
10724 of the comparison instruction and COND is the condition it should test.
10725 EXP is the function call and arguments and TARGET, if nonnull,
10726 suggests a good place to put the boolean result. */
10729 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
10730 enum insn_code icode, enum mips_fp_condition cond,
10731 rtx target, tree exp)
10733 rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
10737 if (target == 0 || GET_MODE (target) != SImode)
10738 target = gen_reg_rtx (SImode);
10740 /* Prepare the operands to the comparison. */
10741 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10742 for (i = 1; i < insn_data[icode].n_operands - 1; i++, j++)
10743 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
10745 switch (insn_data[icode].n_operands)
10748 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond)));
10752 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2],
10753 ops[3], ops[4], GEN_INT (cond)));
10757 gcc_unreachable ();
10760 /* If the comparison sets more than one register, we define the result
10761 to be 0 if all registers are false and -1 if all registers are true.
10762 The value of the complete result is indeterminate otherwise. */
10763 switch (builtin_type)
10765 case MIPS_BUILTIN_CMP_ALL:
10766 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
10767 return mips_builtin_branch_and_move (condition, target,
10768 const0_rtx, const1_rtx);
10770 case MIPS_BUILTIN_CMP_UPPER:
10771 case MIPS_BUILTIN_CMP_LOWER:
10772 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
10773 condition = gen_single_cc (cmp_result, offset);
10774 return mips_builtin_branch_and_move (condition, target,
10775 const1_rtx, const0_rtx);
10778 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
10779 return mips_builtin_branch_and_move (condition, target,
10780 const1_rtx, const0_rtx);
10784 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
10785 suggests a good place to put the boolean result. */
10788 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
10790 rtx condition, cmp_result;
10793 if (target == 0 || GET_MODE (target) != SImode)
10794 target = gen_reg_rtx (SImode);
10796 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
10798 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
10803 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
10804 return mips_builtin_branch_and_move (condition, target,
10805 const1_rtx, const0_rtx);
10808 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
10811 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10812 enum machine_mode mode ATTRIBUTE_UNUSED,
10813 int ignore ATTRIBUTE_UNUSED)
10815 enum insn_code icode;
10816 enum mips_builtin_type type;
10818 unsigned int fcode;
10819 const struct builtin_description *bdesc;
10820 const struct bdesc_map *m;
10822 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10823 fcode = DECL_FUNCTION_CODE (fndecl);
10827 error ("built-in function %qs not supported for MIPS16",
10828 IDENTIFIER_POINTER (DECL_NAME (fndecl)));
10833 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
10835 if (fcode < m->size)
10838 icode = bdesc[fcode].icode;
10839 type = bdesc[fcode].builtin_type;
10849 case MIPS_BUILTIN_DIRECT:
10850 return mips_expand_builtin_direct (icode, target, exp, true);
10852 case MIPS_BUILTIN_DIRECT_NO_TARGET:
10853 return mips_expand_builtin_direct (icode, target, exp, false);
10855 case MIPS_BUILTIN_MOVT:
10856 case MIPS_BUILTIN_MOVF:
10857 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
10860 case MIPS_BUILTIN_CMP_ANY:
10861 case MIPS_BUILTIN_CMP_ALL:
10862 case MIPS_BUILTIN_CMP_UPPER:
10863 case MIPS_BUILTIN_CMP_LOWER:
10864 case MIPS_BUILTIN_CMP_SINGLE:
10865 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
10868 case MIPS_BUILTIN_BPOSGE32:
10869 return mips_expand_builtin_bposge (type, target);
10876 /* An entry in the mips16 constant pool. VALUE is the pool constant,
10877 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
10879 struct mips16_constant {
10880 struct mips16_constant *next;
10883 enum machine_mode mode;
10886 /* Information about an incomplete mips16 constant pool. FIRST is the
10887 first constant, HIGHEST_ADDRESS is the highest address that the first
10888 byte of the pool can have, and INSN_ADDRESS is the current instruction
10891 struct mips16_constant_pool {
10892 struct mips16_constant *first;
10893 int highest_address;
10897 /* Add constant VALUE to POOL and return its label. MODE is the
10898 value's mode (used for CONST_INTs, etc.). */
10901 add_constant (struct mips16_constant_pool *pool,
10902 rtx value, enum machine_mode mode)
10904 struct mips16_constant **p, *c;
10905 bool first_of_size_p;
10907 /* See whether the constant is already in the pool. If so, return the
10908 existing label, otherwise leave P pointing to the place where the
10909 constant should be added.
10911 Keep the pool sorted in increasing order of mode size so that we can
10912 reduce the number of alignments needed. */
10913 first_of_size_p = true;
10914 for (p = &pool->first; *p != 0; p = &(*p)->next)
10916 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
10917 return (*p)->label;
10918 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
10920 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
10921 first_of_size_p = false;
10924 /* In the worst case, the constant needed by the earliest instruction
10925 will end up at the end of the pool. The entire pool must then be
10926 accessible from that instruction.
10928 When adding the first constant, set the pool's highest address to
10929 the address of the first out-of-range byte. Adjust this address
10930 downwards each time a new constant is added. */
10931 if (pool->first == 0)
10932 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
10933 is the address of the instruction with the lowest two bits clear.
10934 The base PC value for ld has the lowest three bits clear. Assume
10935 the worst case here. */
10936 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
10937 pool->highest_address -= GET_MODE_SIZE (mode);
10938 if (first_of_size_p)
10939 /* Take into account the worst possible padding due to alignment. */
10940 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
10942 /* Create a new entry. */
10943 c = (struct mips16_constant *) xmalloc (sizeof *c);
10946 c->label = gen_label_rtx ();
10953 /* Output constant VALUE after instruction INSN and return the last
10954 instruction emitted. MODE is the mode of the constant. */
10957 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
10959 if (SCALAR_INT_MODE_P (mode)
10960 || ALL_SCALAR_FRACT_MODE_P (mode)
10961 || ALL_SCALAR_ACCUM_MODE_P (mode))
10963 rtx size = GEN_INT (GET_MODE_SIZE (mode));
10964 return emit_insn_after (gen_consttable_int (value, size), insn);
10967 if (SCALAR_FLOAT_MODE_P (mode))
10968 return emit_insn_after (gen_consttable_float (value), insn);
10970 if (VECTOR_MODE_P (mode))
10974 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
10975 insn = dump_constants_1 (GET_MODE_INNER (mode),
10976 CONST_VECTOR_ELT (value, i), insn);
10980 gcc_unreachable ();
10984 /* Dump out the constants in CONSTANTS after INSN. */
10987 dump_constants (struct mips16_constant *constants, rtx insn)
10989 struct mips16_constant *c, *next;
10993 for (c = constants; c != NULL; c = next)
10995 /* If necessary, increase the alignment of PC. */
10996 if (align < GET_MODE_SIZE (c->mode))
10998 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
10999 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
11001 align = GET_MODE_SIZE (c->mode);
11003 insn = emit_label_after (c->label, insn);
11004 insn = dump_constants_1 (c->mode, c->value, insn);
11010 emit_barrier_after (insn);
11013 /* Return the length of instruction INSN. */
11016 mips16_insn_length (rtx insn)
11020 rtx body = PATTERN (insn);
11021 if (GET_CODE (body) == ADDR_VEC)
11022 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
11023 if (GET_CODE (body) == ADDR_DIFF_VEC)
11024 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
11026 return get_attr_length (insn);
11029 /* If *X is a symbolic constant that refers to the constant pool, add
11030 the constant to POOL and rewrite *X to use the constant's label. */
11033 mips16_rewrite_pool_constant (struct mips16_constant_pool *pool, rtx *x)
11035 rtx base, offset, label;
11037 split_const (*x, &base, &offset);
11038 if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
11040 label = add_constant (pool, get_pool_constant (base),
11041 get_pool_mode (base));
11042 base = gen_rtx_LABEL_REF (Pmode, label);
11043 *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
11047 /* This structure is used to communicate with mips16_rewrite_pool_refs.
11048 INSN is the instruction we're rewriting and POOL points to the current
11050 struct mips16_rewrite_pool_refs_info {
11052 struct mips16_constant_pool *pool;
11055 /* Rewrite *X so that constant pool references refer to the constant's
11056 label instead. DATA points to a mips16_rewrite_pool_refs_info
11060 mips16_rewrite_pool_refs (rtx *x, void *data)
11062 struct mips16_rewrite_pool_refs_info *info = data;
11064 if (force_to_mem_operand (*x, Pmode))
11066 rtx mem = force_const_mem (GET_MODE (*x), *x);
11067 validate_change (info->insn, x, mem, false);
11072 mips16_rewrite_pool_constant (info->pool, &XEXP (*x, 0));
11076 if (TARGET_MIPS16_TEXT_LOADS)
11077 mips16_rewrite_pool_constant (info->pool, x);
11079 return GET_CODE (*x) == CONST ? -1 : 0;
11082 /* Build MIPS16 constant pools. */
11085 mips16_lay_out_constants (void)
11087 struct mips16_constant_pool pool;
11088 struct mips16_rewrite_pool_refs_info info;
11091 if (!TARGET_MIPS16_PCREL_LOADS)
11095 memset (&pool, 0, sizeof (pool));
11096 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
11098 /* Rewrite constant pool references in INSN. */
11103 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &info);
11106 pool.insn_address += mips16_insn_length (insn);
11108 if (pool.first != NULL)
11110 /* If there are no natural barriers between the first user of
11111 the pool and the highest acceptable address, we'll need to
11112 create a new instruction to jump around the constant pool.
11113 In the worst case, this instruction will be 4 bytes long.
11115 If it's too late to do this transformation after INSN,
11116 do it immediately before INSN. */
11117 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
11121 label = gen_label_rtx ();
11123 jump = emit_jump_insn_before (gen_jump (label), insn);
11124 JUMP_LABEL (jump) = label;
11125 LABEL_NUSES (label) = 1;
11126 barrier = emit_barrier_after (jump);
11128 emit_label_after (label, barrier);
11129 pool.insn_address += 4;
11132 /* See whether the constant pool is now out of range of the first
11133 user. If so, output the constants after the previous barrier.
11134 Note that any instructions between BARRIER and INSN (inclusive)
11135 will use negative offsets to refer to the pool. */
11136 if (pool.insn_address > pool.highest_address)
11138 dump_constants (pool.first, barrier);
11142 else if (BARRIER_P (insn))
11146 dump_constants (pool.first, get_last_insn ());
11149 /* A temporary variable used by for_each_rtx callbacks, etc. */
11150 static rtx mips_sim_insn;
11152 /* A structure representing the state of the processor pipeline.
11153 Used by the mips_sim_* family of functions. */
11155 /* The maximum number of instructions that can be issued in a cycle.
11156 (Caches mips_issue_rate.) */
11157 unsigned int issue_rate;
11159 /* The current simulation time. */
11162 /* How many more instructions can be issued in the current cycle. */
11163 unsigned int insns_left;
11165 /* LAST_SET[X].INSN is the last instruction to set register X.
11166 LAST_SET[X].TIME is the time at which that instruction was issued.
11167 INSN is null if no instruction has yet set register X. */
11171 } last_set[FIRST_PSEUDO_REGISTER];
11173 /* The pipeline's current DFA state. */
11177 /* Reset STATE to the initial simulation state. */
11180 mips_sim_reset (struct mips_sim *state)
11183 state->insns_left = state->issue_rate;
11184 memset (&state->last_set, 0, sizeof (state->last_set));
11185 state_reset (state->dfa_state);
11188 /* Initialize STATE before its first use. DFA_STATE points to an
11189 allocated but uninitialized DFA state. */
11192 mips_sim_init (struct mips_sim *state, state_t dfa_state)
11194 state->issue_rate = mips_issue_rate ();
11195 state->dfa_state = dfa_state;
11196 mips_sim_reset (state);
11199 /* Advance STATE by one clock cycle. */
11202 mips_sim_next_cycle (struct mips_sim *state)
11205 state->insns_left = state->issue_rate;
11206 state_transition (state->dfa_state, 0);
11209 /* Advance simulation state STATE until instruction INSN can read
11213 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
11217 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
11218 if (state->last_set[REGNO (reg) + i].insn != 0)
11222 t = state->last_set[REGNO (reg) + i].time;
11223 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
11224 while (state->time < t)
11225 mips_sim_next_cycle (state);
11229 /* A for_each_rtx callback. If *X is a register, advance simulation state
11230 DATA until mips_sim_insn can read the register's value. */
11233 mips_sim_wait_regs_2 (rtx *x, void *data)
11236 mips_sim_wait_reg (data, mips_sim_insn, *x);
11240 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
11243 mips_sim_wait_regs_1 (rtx *x, void *data)
11245 for_each_rtx (x, mips_sim_wait_regs_2, data);
11248 /* Advance simulation state STATE until all of INSN's register
11249 dependencies are satisfied. */
11252 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
11254 mips_sim_insn = insn;
11255 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
11258 /* Advance simulation state STATE until the units required by
11259 instruction INSN are available. */
11262 mips_sim_wait_units (struct mips_sim *state, rtx insn)
11266 tmp_state = alloca (state_size ());
11267 while (state->insns_left == 0
11268 || (memcpy (tmp_state, state->dfa_state, state_size ()),
11269 state_transition (tmp_state, insn) >= 0))
11270 mips_sim_next_cycle (state);
11273 /* Advance simulation state STATE until INSN is ready to issue. */
11276 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
11278 mips_sim_wait_regs (state, insn);
11279 mips_sim_wait_units (state, insn);
11282 /* mips_sim_insn has just set X. Update the LAST_SET array
11283 in simulation state DATA. */
11286 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
11288 struct mips_sim *state;
11293 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
11295 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
11296 state->last_set[REGNO (x) + i].time = state->time;
11300 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
11301 can issue immediately (i.e., that mips_sim_wait_insn has already
11305 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
11307 state_transition (state->dfa_state, insn);
11308 state->insns_left--;
11310 mips_sim_insn = insn;
11311 note_stores (PATTERN (insn), mips_sim_record_set, state);
11314 /* Simulate issuing a NOP in state STATE. */
11317 mips_sim_issue_nop (struct mips_sim *state)
11319 if (state->insns_left == 0)
11320 mips_sim_next_cycle (state);
11321 state->insns_left--;
11324 /* Update simulation state STATE so that it's ready to accept the instruction
11325 after INSN. INSN should be part of the main rtl chain, not a member of a
11329 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
11331 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
11333 mips_sim_issue_nop (state);
11335 switch (GET_CODE (SEQ_BEGIN (insn)))
11339 /* We can't predict the processor state after a call or label. */
11340 mips_sim_reset (state);
11344 /* The delay slots of branch likely instructions are only executed
11345 when the branch is taken. Therefore, if the caller has simulated
11346 the delay slot instruction, STATE does not really reflect the state
11347 of the pipeline for the instruction after the delay slot. Also,
11348 branch likely instructions tend to incur a penalty when not taken,
11349 so there will probably be an extra delay between the branch and
11350 the instruction after the delay slot. */
11351 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
11352 mips_sim_reset (state);
11360 /* The VR4130 pipeline issues aligned pairs of instructions together,
11361 but it stalls the second instruction if it depends on the first.
11362 In order to cut down the amount of logic required, this dependence
11363 check is not based on a full instruction decode. Instead, any non-SPECIAL
11364 instruction is assumed to modify the register specified by bits 20-16
11365 (which is usually the "rt" field).
11367 In beq, beql, bne and bnel instructions, the rt field is actually an
11368 input, so we can end up with a false dependence between the branch
11369 and its delay slot. If this situation occurs in instruction INSN,
11370 try to avoid it by swapping rs and rt. */
11373 vr4130_avoid_branch_rt_conflict (rtx insn)
11377 first = SEQ_BEGIN (insn);
11378 second = SEQ_END (insn);
11380 && NONJUMP_INSN_P (second)
11381 && GET_CODE (PATTERN (first)) == SET
11382 && GET_CODE (SET_DEST (PATTERN (first))) == PC
11383 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
11385 /* Check for the right kind of condition. */
11386 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
11387 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
11388 && REG_P (XEXP (cond, 0))
11389 && REG_P (XEXP (cond, 1))
11390 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
11391 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
11393 /* SECOND mentions the rt register but not the rs register. */
11394 rtx tmp = XEXP (cond, 0);
11395 XEXP (cond, 0) = XEXP (cond, 1);
11396 XEXP (cond, 1) = tmp;
11401 /* Implement -mvr4130-align. Go through each basic block and simulate the
11402 processor pipeline. If we find that a pair of instructions could execute
11403 in parallel, and the first of those instruction is not 8-byte aligned,
11404 insert a nop to make it aligned. */
11407 vr4130_align_insns (void)
11409 struct mips_sim state;
11410 rtx insn, subinsn, last, last2, next;
11415 /* LAST is the last instruction before INSN to have a nonzero length.
11416 LAST2 is the last such instruction before LAST. */
11420 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
11423 mips_sim_init (&state, alloca (state_size ()));
11424 for (insn = get_insns (); insn != 0; insn = next)
11426 unsigned int length;
11428 next = NEXT_INSN (insn);
11430 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
11431 This isn't really related to the alignment pass, but we do it on
11432 the fly to avoid a separate instruction walk. */
11433 vr4130_avoid_branch_rt_conflict (insn);
11435 if (USEFUL_INSN_P (insn))
11436 FOR_EACH_SUBINSN (subinsn, insn)
11438 mips_sim_wait_insn (&state, subinsn);
11440 /* If we want this instruction to issue in parallel with the
11441 previous one, make sure that the previous instruction is
11442 aligned. There are several reasons why this isn't worthwhile
11443 when the second instruction is a call:
11445 - Calls are less likely to be performance critical,
11446 - There's a good chance that the delay slot can execute
11447 in parallel with the call.
11448 - The return address would then be unaligned.
11450 In general, if we're going to insert a nop between instructions
11451 X and Y, it's better to insert it immediately after X. That
11452 way, if the nop makes Y aligned, it will also align any labels
11453 between X and Y. */
11454 if (state.insns_left != state.issue_rate
11455 && !CALL_P (subinsn))
11457 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
11459 /* SUBINSN is the first instruction in INSN and INSN is
11460 aligned. We want to align the previous instruction
11461 instead, so insert a nop between LAST2 and LAST.
11463 Note that LAST could be either a single instruction
11464 or a branch with a delay slot. In the latter case,
11465 LAST, like INSN, is already aligned, but the delay
11466 slot must have some extra delay that stops it from
11467 issuing at the same time as the branch. We therefore
11468 insert a nop before the branch in order to align its
11470 emit_insn_after (gen_nop (), last2);
11473 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
11475 /* SUBINSN is the delay slot of INSN, but INSN is
11476 currently unaligned. Insert a nop between
11477 LAST and INSN to align it. */
11478 emit_insn_after (gen_nop (), last);
11482 mips_sim_issue_insn (&state, subinsn);
11484 mips_sim_finish_insn (&state, insn);
11486 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
11487 length = get_attr_length (insn);
11490 /* If the instruction is an asm statement or multi-instruction
11491 mips.md patern, the length is only an estimate. Insert an
11492 8 byte alignment after it so that the following instructions
11493 can be handled correctly. */
11494 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
11495 && (recog_memoized (insn) < 0 || length >= 8))
11497 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
11498 next = NEXT_INSN (next);
11499 mips_sim_next_cycle (&state);
11502 else if (length & 4)
11503 aligned_p = !aligned_p;
11508 /* See whether INSN is an aligned label. */
11509 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
11515 /* Subroutine of mips_reorg. If there is a hazard between INSN
11516 and a previous instruction, avoid it by inserting nops after
11519 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
11520 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
11521 before using the value of that register. *HILO_DELAY counts the
11522 number of instructions since the last hilo hazard (that is,
11523 the number of instructions since the last mflo or mfhi).
11525 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
11526 for the next instruction.
11528 LO_REG is an rtx for the LO register, used in dependence checking. */
11531 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
11532 rtx *delayed_reg, rtx lo_reg)
11535 int nops, ninsns, hazard_set;
11537 if (!INSN_P (insn))
11540 pattern = PATTERN (insn);
11542 /* Do not put the whole function in .set noreorder if it contains
11543 an asm statement. We don't know whether there will be hazards
11544 between the asm statement and the gcc-generated code. */
11545 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
11546 cfun->machine->all_noreorder_p = false;
11548 /* Ignore zero-length instructions (barriers and the like). */
11549 ninsns = get_attr_length (insn) / 4;
11553 /* Work out how many nops are needed. Note that we only care about
11554 registers that are explicitly mentioned in the instruction's pattern.
11555 It doesn't matter that calls use the argument registers or that they
11556 clobber hi and lo. */
11557 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
11558 nops = 2 - *hilo_delay;
11559 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
11564 /* Insert the nops between this instruction and the previous one.
11565 Each new nop takes us further from the last hilo hazard. */
11566 *hilo_delay += nops;
11568 emit_insn_after (gen_hazard_nop (), after);
11570 /* Set up the state for the next instruction. */
11571 *hilo_delay += ninsns;
11573 if (INSN_CODE (insn) >= 0)
11574 switch (get_attr_hazard (insn))
11584 hazard_set = (int) get_attr_hazard_set (insn);
11585 if (hazard_set == 0)
11586 set = single_set (insn);
11589 gcc_assert (GET_CODE (PATTERN (insn)) == PARALLEL);
11590 set = XVECEXP (PATTERN (insn), 0, hazard_set - 1);
11592 gcc_assert (set && GET_CODE (set) == SET);
11593 *delayed_reg = SET_DEST (set);
11599 /* Go through the instruction stream and insert nops where necessary.
11600 See if the whole function can then be put into .set noreorder &
11604 mips_avoid_hazards (void)
11606 rtx insn, last_insn, lo_reg, delayed_reg;
11609 /* Force all instructions to be split into their final form. */
11610 split_all_insns_noflow ();
11612 /* Recalculate instruction lengths without taking nops into account. */
11613 cfun->machine->ignore_hazard_length_p = true;
11614 shorten_branches (get_insns ());
11616 cfun->machine->all_noreorder_p = true;
11618 /* Profiled functions can't be all noreorder because the profiler
11619 support uses assembler macros. */
11620 if (current_function_profile)
11621 cfun->machine->all_noreorder_p = false;
11623 /* Code compiled with -mfix-vr4120 can't be all noreorder because
11624 we rely on the assembler to work around some errata. */
11625 if (TARGET_FIX_VR4120)
11626 cfun->machine->all_noreorder_p = false;
11628 /* The same is true for -mfix-vr4130 if we might generate mflo or
11629 mfhi instructions. Note that we avoid using mflo and mfhi if
11630 the VR4130 macc and dmacc instructions are available instead;
11631 see the *mfhilo_{si,di}_macc patterns. */
11632 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
11633 cfun->machine->all_noreorder_p = false;
11638 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
11640 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
11643 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
11644 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
11645 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
11646 &hilo_delay, &delayed_reg, lo_reg);
11648 mips_avoid_hazard (last_insn, insn, &hilo_delay,
11649 &delayed_reg, lo_reg);
11656 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
11661 mips16_lay_out_constants ();
11662 if (TARGET_EXPLICIT_RELOCS)
11664 if (mips_flag_delayed_branch)
11665 dbr_schedule (get_insns ());
11666 mips_avoid_hazards ();
11667 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
11668 vr4130_align_insns ();
11672 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
11673 in order to avoid duplicating too much logic from elsewhere. */
11676 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
11677 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
11680 rtx this, temp1, temp2, insn, fnaddr;
11681 bool use_sibcall_p;
11683 /* Pretend to be a post-reload pass while generating rtl. */
11684 reload_completed = 1;
11686 /* Mark the end of the (empty) prologue. */
11687 emit_note (NOTE_INSN_PROLOGUE_END);
11689 /* Determine if we can use a sibcall to call FUNCTION directly. */
11690 fnaddr = XEXP (DECL_RTL (function), 0);
11691 use_sibcall_p = (mips_function_ok_for_sibcall (function, NULL)
11692 && const_call_insn_operand (fnaddr, Pmode));
11694 /* Determine if we need to load FNADDR from the GOT. */
11695 if (!use_sibcall_p)
11696 switch (mips_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA))
11698 case SYMBOL_GOT_PAGE_OFST:
11699 case SYMBOL_GOT_DISP:
11700 /* Pick a global pointer. Use a call-clobbered register if
11701 TARGET_CALL_SAVED_GP. */
11702 cfun->machine->global_pointer =
11703 TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
11704 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
11706 /* Set up the global pointer for n32 or n64 abicalls. */
11707 mips_emit_loadgp ();
11714 /* We need two temporary registers in some cases. */
11715 temp1 = gen_rtx_REG (Pmode, 2);
11716 temp2 = gen_rtx_REG (Pmode, 3);
11718 /* Find out which register contains the "this" pointer. */
11719 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
11720 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
11722 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
11724 /* Add DELTA to THIS. */
11727 rtx offset = GEN_INT (delta);
11728 if (!SMALL_OPERAND (delta))
11730 mips_emit_move (temp1, offset);
11733 emit_insn (gen_add3_insn (this, this, offset));
11736 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
11737 if (vcall_offset != 0)
11741 /* Set TEMP1 to *THIS. */
11742 mips_emit_move (temp1, gen_rtx_MEM (Pmode, this));
11744 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
11745 addr = mips_add_offset (temp2, temp1, vcall_offset);
11747 /* Load the offset and add it to THIS. */
11748 mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
11749 emit_insn (gen_add3_insn (this, this, temp1));
11752 /* Jump to the target function. Use a sibcall if direct jumps are
11753 allowed, otherwise load the address into a register first. */
11756 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
11757 SIBLING_CALL_P (insn) = 1;
11761 /* This is messy. gas treats "la $25,foo" as part of a call
11762 sequence and may allow a global "foo" to be lazily bound.
11763 The general move patterns therefore reject this combination.
11765 In this context, lazy binding would actually be OK
11766 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
11767 TARGET_CALL_SAVED_GP; see mips_load_call_address.
11768 We must therefore load the address via a temporary
11769 register if mips_dangerous_for_la25_p.
11771 If we jump to the temporary register rather than $25, the assembler
11772 can use the move insn to fill the jump's delay slot. */
11773 if (TARGET_USE_PIC_FN_ADDR_REG
11774 && !mips_dangerous_for_la25_p (fnaddr))
11775 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
11776 mips_load_call_address (temp1, fnaddr, true);
11778 if (TARGET_USE_PIC_FN_ADDR_REG
11779 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
11780 mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
11781 emit_jump_insn (gen_indirect_jump (temp1));
11784 /* Run just enough of rest_of_compilation. This sequence was
11785 "borrowed" from alpha.c. */
11786 insn = get_insns ();
11787 insn_locators_alloc ();
11788 split_all_insns_noflow ();
11789 mips16_lay_out_constants ();
11790 shorten_branches (insn);
11791 final_start_function (insn, file, 1);
11792 final (insn, file, 1);
11793 final_end_function ();
11795 /* Clean up the vars set above. Note that final_end_function resets
11796 the global pointer for us. */
11797 reload_completed = 0;
11800 static GTY(()) int was_mips16_p = -1;
11802 /* Set up the target-dependent global state so that it matches the
11803 current function's ISA mode. */
11806 mips_set_mips16_mode (int mips16_p)
11808 if (mips16_p == was_mips16_p)
11811 /* Restore base settings of various flags. */
11812 target_flags = mips_base_target_flags;
11813 flag_delayed_branch = mips_flag_delayed_branch;
11814 flag_schedule_insns = mips_base_schedule_insns;
11815 flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
11816 flag_move_loop_invariants = mips_base_move_loop_invariants;
11817 align_loops = mips_base_align_loops;
11818 align_jumps = mips_base_align_jumps;
11819 align_functions = mips_base_align_functions;
11823 /* Select mips16 instruction set. */
11824 target_flags |= MASK_MIPS16;
11826 /* Don't run the scheduler before reload, since it tends to
11827 increase register pressure. */
11828 flag_schedule_insns = 0;
11830 /* Don't do hot/cold partitioning. The constant layout code expects
11831 the whole function to be in a single section. */
11832 flag_reorder_blocks_and_partition = 0;
11834 /* Don't move loop invariants, because it tends to increase
11835 register pressure. It also introduces an extra move in cases
11836 where the constant is the first operand in a two-operand binary
11837 instruction, or when it forms a register argument to a functon
11839 flag_move_loop_invariants = 0;
11841 /* Silently disable -mexplicit-relocs since it doesn't apply
11842 to mips16 code. Even so, it would overly pedantic to warn
11843 about "-mips16 -mexplicit-relocs", especially given that
11844 we use a %gprel() operator. */
11845 target_flags &= ~MASK_EXPLICIT_RELOCS;
11847 /* Experiments suggest we get the best overall results from using
11848 the range of an unextended lw or sw. Code that makes heavy use
11849 of byte or short accesses can do better with ranges of 0...31
11850 and 0...63 respectively, but most code is sensitive to the range
11851 of lw and sw instead. */
11852 targetm.min_anchor_offset = 0;
11853 targetm.max_anchor_offset = 127;
11855 if (flag_pic || TARGET_ABICALLS)
11856 sorry ("MIPS16 PIC");
11860 /* Reset to select base non-mips16 ISA. */
11861 target_flags &= ~MASK_MIPS16;
11863 /* When using explicit relocs, we call dbr_schedule from within
11865 if (TARGET_EXPLICIT_RELOCS)
11866 flag_delayed_branch = 0;
11868 /* Provide default values for align_* for 64-bit targets. */
11871 if (align_loops == 0)
11873 if (align_jumps == 0)
11875 if (align_functions == 0)
11876 align_functions = 8;
11879 targetm.min_anchor_offset = -32768;
11880 targetm.max_anchor_offset = 32767;
11883 /* (Re)initialize mips target internals for new ISA. */
11884 mips_init_split_addresses ();
11885 mips_init_relocs ();
11887 if (was_mips16_p >= 0)
11888 /* Reinitialize target-dependent state. */
11891 was_mips16_p = TARGET_MIPS16;
11894 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
11895 function should use the MIPS16 ISA and switch modes accordingly. */
11898 mips_set_current_function (tree fndecl)
11900 mips_set_mips16_mode (mips_use_mips16_mode_p (fndecl));
11903 /* Allocate a chunk of memory for per-function machine-dependent data. */
11904 static struct machine_function *
11905 mips_init_machine_status (void)
11907 return ((struct machine_function *)
11908 ggc_alloc_cleared (sizeof (struct machine_function)));
11911 /* Return the processor associated with the given ISA level, or null
11912 if the ISA isn't valid. */
11914 static const struct mips_cpu_info *
11915 mips_cpu_info_from_isa (int isa)
11919 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
11920 if (mips_cpu_info_table[i].isa == isa)
11921 return mips_cpu_info_table + i;
11926 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
11927 with a final "000" replaced by "k". Ignore case.
11929 Note: this function is shared between GCC and GAS. */
11932 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
11934 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
11935 given++, canonical++;
11937 return ((*given == 0 && *canonical == 0)
11938 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
11942 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
11943 CPU name. We've traditionally allowed a lot of variation here.
11945 Note: this function is shared between GCC and GAS. */
11948 mips_matching_cpu_name_p (const char *canonical, const char *given)
11950 /* First see if the name matches exactly, or with a final "000"
11951 turned into "k". */
11952 if (mips_strict_matching_cpu_name_p (canonical, given))
11955 /* If not, try comparing based on numerical designation alone.
11956 See if GIVEN is an unadorned number, or 'r' followed by a number. */
11957 if (TOLOWER (*given) == 'r')
11959 if (!ISDIGIT (*given))
11962 /* Skip over some well-known prefixes in the canonical name,
11963 hoping to find a number there too. */
11964 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
11966 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
11968 else if (TOLOWER (canonical[0]) == 'r')
11971 return mips_strict_matching_cpu_name_p (canonical, given);
11975 /* Return the mips_cpu_info entry for the processor or ISA given
11976 by CPU_STRING. Return null if the string isn't recognized.
11978 A similar function exists in GAS. */
11980 static const struct mips_cpu_info *
11981 mips_parse_cpu (const char *cpu_string)
11986 /* In the past, we allowed upper-case CPU names, but it doesn't
11987 work well with the multilib machinery. */
11988 for (s = cpu_string; *s != 0; s++)
11991 warning (0, "the cpu name must be lower case");
11995 /* 'from-abi' selects the most compatible architecture for the given
11996 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
11997 EABIs, we have to decide whether we're using the 32-bit or 64-bit
11998 version. Look first at the -mgp options, if given, otherwise base
11999 the choice on MASK_64BIT in TARGET_DEFAULT. */
12000 if (strcasecmp (cpu_string, "from-abi") == 0)
12001 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
12002 : ABI_NEEDS_64BIT_REGS ? 3
12003 : (TARGET_64BIT ? 3 : 1));
12005 /* 'default' has traditionally been a no-op. Probably not very useful. */
12006 if (strcasecmp (cpu_string, "default") == 0)
12009 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
12010 if (mips_matching_cpu_name_p (mips_cpu_info_table[i].name, cpu_string))
12011 return mips_cpu_info_table + i;
12017 /* Set up globals to generate code for the ISA or processor
12018 described by INFO. */
12021 mips_set_architecture (const struct mips_cpu_info *info)
12025 mips_arch_info = info;
12026 mips_arch = info->cpu;
12027 mips_isa = info->isa;
12032 /* Likewise for tuning. */
12035 mips_set_tune (const struct mips_cpu_info *info)
12039 mips_tune_info = info;
12040 mips_tune = info->cpu;
12044 /* Implement TARGET_HANDLE_OPTION. */
12047 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
12052 if (strcmp (arg, "32") == 0)
12054 else if (strcmp (arg, "o64") == 0)
12055 mips_abi = ABI_O64;
12056 else if (strcmp (arg, "n32") == 0)
12057 mips_abi = ABI_N32;
12058 else if (strcmp (arg, "64") == 0)
12060 else if (strcmp (arg, "eabi") == 0)
12061 mips_abi = ABI_EABI;
12068 return mips_parse_cpu (arg) != 0;
12071 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
12072 return mips_isa_info != 0;
12074 case OPT_mno_flush_func:
12075 mips_cache_flush_func = NULL;
12078 case OPT_mcode_readable_:
12079 if (strcmp (arg, "yes") == 0)
12080 mips_code_readable = CODE_READABLE_YES;
12081 else if (strcmp (arg, "pcrel") == 0)
12082 mips_code_readable = CODE_READABLE_PCREL;
12083 else if (strcmp (arg, "no") == 0)
12084 mips_code_readable = CODE_READABLE_NO;
12094 /* Set up the threshold for data to go into the small data area, instead
12095 of the normal data area, and detect any conflicts in the switches. */
12098 override_options (void)
12100 int i, start, regno;
12101 enum machine_mode mode;
12103 #ifdef SUBTARGET_OVERRIDE_OPTIONS
12104 SUBTARGET_OVERRIDE_OPTIONS;
12107 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
12109 /* The following code determines the architecture and register size.
12110 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
12111 The GAS and GCC code should be kept in sync as much as possible. */
12113 if (mips_arch_string != 0)
12114 mips_set_architecture (mips_parse_cpu (mips_arch_string));
12116 if (mips_isa_info != 0)
12118 if (mips_arch_info == 0)
12119 mips_set_architecture (mips_isa_info);
12120 else if (mips_arch_info->isa != mips_isa_info->isa)
12121 error ("-%s conflicts with the other architecture options, "
12122 "which specify a %s processor",
12123 mips_isa_info->name,
12124 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
12127 if (mips_arch_info == 0)
12129 #ifdef MIPS_CPU_STRING_DEFAULT
12130 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
12132 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
12136 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
12137 error ("-march=%s is not compatible with the selected ABI",
12138 mips_arch_info->name);
12140 /* Optimize for mips_arch, unless -mtune selects a different processor. */
12141 if (mips_tune_string != 0)
12142 mips_set_tune (mips_parse_cpu (mips_tune_string));
12144 if (mips_tune_info == 0)
12145 mips_set_tune (mips_arch_info);
12147 /* Set cost structure for the processor. */
12149 mips_cost = &mips_rtx_cost_optimize_size;
12151 mips_cost = &mips_rtx_cost_data[mips_tune];
12153 /* If the user hasn't specified a branch cost, use the processor's
12155 if (mips_branch_cost == 0)
12156 mips_branch_cost = mips_cost->branch_cost;
12158 if ((target_flags_explicit & MASK_64BIT) != 0)
12160 /* The user specified the size of the integer registers. Make sure
12161 it agrees with the ABI and ISA. */
12162 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
12163 error ("-mgp64 used with a 32-bit processor");
12164 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
12165 error ("-mgp32 used with a 64-bit ABI");
12166 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
12167 error ("-mgp64 used with a 32-bit ABI");
12171 /* Infer the integer register size from the ABI and processor.
12172 Restrict ourselves to 32-bit registers if that's all the
12173 processor has, or if the ABI cannot handle 64-bit registers. */
12174 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
12175 target_flags &= ~MASK_64BIT;
12177 target_flags |= MASK_64BIT;
12180 if ((target_flags_explicit & MASK_FLOAT64) != 0)
12182 /* Really, -mfp32 and -mfp64 are ornamental options. There's
12183 only one right answer here. */
12184 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
12185 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
12186 else if (!TARGET_64BIT && TARGET_FLOAT64
12187 && !(ISA_HAS_MXHC1 && mips_abi == ABI_32))
12188 error ("-mgp32 and -mfp64 can only be combined if the target"
12189 " supports the mfhc1 and mthc1 instructions");
12190 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
12191 error ("unsupported combination: %s", "-mfp64 -msingle-float");
12195 /* -msingle-float selects 32-bit float registers. Otherwise the
12196 float registers should be the same size as the integer ones. */
12197 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
12198 target_flags |= MASK_FLOAT64;
12200 target_flags &= ~MASK_FLOAT64;
12203 /* End of code shared with GAS. */
12205 if ((target_flags_explicit & MASK_LONG64) == 0)
12207 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
12208 target_flags |= MASK_LONG64;
12210 target_flags &= ~MASK_LONG64;
12213 if (!TARGET_OLDABI)
12214 flag_pcc_struct_return = 0;
12216 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
12218 /* If neither -mbranch-likely nor -mno-branch-likely was given
12219 on the command line, set MASK_BRANCHLIKELY based on the target
12220 architecture and tuning flags. Annulled delay slots are a
12221 size win, so we only consider the processor-specific tuning
12222 for !optimize_size. */
12223 if (ISA_HAS_BRANCHLIKELY
12225 || (mips_tune_info->tune_flags & PTF_AVOID_BRANCHLIKELY) == 0))
12226 target_flags |= MASK_BRANCHLIKELY;
12228 target_flags &= ~MASK_BRANCHLIKELY;
12230 else if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
12231 warning (0, "the %qs architecture does not support branch-likely"
12232 " instructions", mips_arch_info->name);
12234 /* The effect of -mabicalls isn't defined for the EABI. */
12235 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
12237 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
12238 target_flags &= ~MASK_ABICALLS;
12241 /* MIPS16 cannot generate PIC yet. */
12242 if (TARGET_MIPS16 && (flag_pic || TARGET_ABICALLS))
12244 sorry ("MIPS16 PIC");
12245 target_flags &= ~MASK_ABICALLS;
12246 flag_pic = flag_pie = flag_shlib = 0;
12249 if (TARGET_ABICALLS)
12250 /* We need to set flag_pic for executables as well as DSOs
12251 because we may reference symbols that are not defined in
12252 the final executable. (MIPS does not use things like
12253 copy relocs, for example.)
12255 Also, there is a body of code that uses __PIC__ to distinguish
12256 between -mabicalls and -mno-abicalls code. */
12259 /* -mvr4130-align is a "speed over size" optimization: it usually produces
12260 faster code, but at the expense of more nops. Enable it at -O3 and
12262 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
12263 target_flags |= MASK_VR4130_ALIGN;
12265 /* Prefer a call to memcpy over inline code when optimizing for size,
12266 though see MOVE_RATIO in mips.h. */
12267 if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
12268 target_flags |= MASK_MEMCPY;
12270 /* If we have a nonzero small-data limit, check that the -mgpopt
12271 setting is consistent with the other target flags. */
12272 if (mips_section_threshold > 0)
12276 if (!TARGET_MIPS16 && !TARGET_EXPLICIT_RELOCS)
12277 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
12279 TARGET_LOCAL_SDATA = false;
12280 TARGET_EXTERN_SDATA = false;
12284 if (TARGET_VXWORKS_RTP)
12285 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
12287 if (TARGET_ABICALLS)
12288 warning (0, "cannot use small-data accesses for %qs",
12293 #ifdef MIPS_TFMODE_FORMAT
12294 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
12297 /* Make sure that the user didn't turn off paired single support when
12298 MIPS-3D support is requested. */
12299 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
12300 && !TARGET_PAIRED_SINGLE_FLOAT)
12301 error ("-mips3d requires -mpaired-single");
12303 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
12305 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
12307 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
12308 and TARGET_HARD_FLOAT_ABI are both true. */
12309 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI))
12310 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
12312 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
12314 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
12315 error ("-mips3d/-mpaired-single must be used with -mips64");
12317 /* If TARGET_DSPR2, enable MASK_DSP. */
12319 target_flags |= MASK_DSP;
12321 mips_print_operand_punct['?'] = 1;
12322 mips_print_operand_punct['#'] = 1;
12323 mips_print_operand_punct['/'] = 1;
12324 mips_print_operand_punct['&'] = 1;
12325 mips_print_operand_punct['!'] = 1;
12326 mips_print_operand_punct['*'] = 1;
12327 mips_print_operand_punct['@'] = 1;
12328 mips_print_operand_punct['.'] = 1;
12329 mips_print_operand_punct['('] = 1;
12330 mips_print_operand_punct[')'] = 1;
12331 mips_print_operand_punct['['] = 1;
12332 mips_print_operand_punct[']'] = 1;
12333 mips_print_operand_punct['<'] = 1;
12334 mips_print_operand_punct['>'] = 1;
12335 mips_print_operand_punct['{'] = 1;
12336 mips_print_operand_punct['}'] = 1;
12337 mips_print_operand_punct['^'] = 1;
12338 mips_print_operand_punct['$'] = 1;
12339 mips_print_operand_punct['+'] = 1;
12340 mips_print_operand_punct['~'] = 1;
12341 mips_print_operand_punct['|'] = 1;
12342 mips_print_operand_punct['-'] = 1;
12344 /* Set up array to map GCC register number to debug register number.
12345 Ignore the special purpose register numbers. */
12347 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
12349 mips_dbx_regno[i] = INVALID_REGNUM;
12350 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
12351 mips_dwarf_regno[i] = i;
12353 mips_dwarf_regno[i] = INVALID_REGNUM;
12356 start = GP_DBX_FIRST - GP_REG_FIRST;
12357 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
12358 mips_dbx_regno[i] = i + start;
12360 start = FP_DBX_FIRST - FP_REG_FIRST;
12361 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
12362 mips_dbx_regno[i] = i + start;
12364 /* HI and LO debug registers use big-endian ordering. */
12365 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
12366 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
12367 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
12368 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
12369 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
12371 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
12372 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
12375 /* Set up array giving whether a given register can hold a given mode. */
12377 for (mode = VOIDmode;
12378 mode != MAX_MACHINE_MODE;
12379 mode = (enum machine_mode) ((int)mode + 1))
12381 register int size = GET_MODE_SIZE (mode);
12382 register enum mode_class class = GET_MODE_CLASS (mode);
12384 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
12388 if (mode == CCV2mode)
12389 temp = (ISA_HAS_8CC
12390 && ST_REG_P (regno)
12391 && (regno - ST_REG_FIRST) % 2 == 0);
12393 else if (mode == CCV4mode)
12394 temp = (ISA_HAS_8CC
12395 && ST_REG_P (regno)
12396 && (regno - ST_REG_FIRST) % 4 == 0);
12398 else if (mode == CCmode)
12401 temp = (regno == FPSW_REGNUM);
12403 temp = (ST_REG_P (regno) || GP_REG_P (regno)
12404 || FP_REG_P (regno));
12407 else if (GP_REG_P (regno))
12408 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
12410 else if (FP_REG_P (regno))
12411 temp = ((((regno % MAX_FPRS_PER_FMT) == 0)
12412 || (MIN_FPRS_PER_FMT == 1
12413 && size <= UNITS_PER_FPREG))
12414 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
12415 || class == MODE_VECTOR_FLOAT)
12416 && size <= UNITS_PER_FPVALUE)
12417 /* Allow integer modes that fit into a single
12418 register. We need to put integers into FPRs
12419 when using instructions like cvt and trunc.
12420 We can't allow sizes smaller than a word,
12421 the FPU has no appropriate load/store
12422 instructions for those. */
12423 || (class == MODE_INT
12424 && size >= MIN_UNITS_PER_WORD
12425 && size <= UNITS_PER_FPREG)
12426 /* Allow TFmode for CCmode reloads. */
12427 || (ISA_HAS_8CC && mode == TFmode)));
12429 else if (ACC_REG_P (regno))
12430 temp = ((INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode))
12431 && size <= UNITS_PER_WORD * 2
12432 && (size <= UNITS_PER_WORD
12433 || regno == MD_REG_FIRST
12434 || (DSP_ACC_REG_P (regno)
12435 && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)));
12437 else if (ALL_COP_REG_P (regno))
12438 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
12442 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
12446 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
12447 initialized yet, so we can't use that here. */
12448 gpr_mode = TARGET_64BIT ? DImode : SImode;
12450 /* Function to allocate machine-dependent function status. */
12451 init_machine_status = &mips_init_machine_status;
12453 /* Default to working around R4000 errata only if the processor
12454 was selected explicitly. */
12455 if ((target_flags_explicit & MASK_FIX_R4000) == 0
12456 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
12457 target_flags |= MASK_FIX_R4000;
12459 /* Default to working around R4400 errata only if the processor
12460 was selected explicitly. */
12461 if ((target_flags_explicit & MASK_FIX_R4400) == 0
12462 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
12463 target_flags |= MASK_FIX_R4400;
12465 /* Save base state of options. */
12466 mips_base_mips16 = TARGET_MIPS16;
12467 mips_base_target_flags = target_flags;
12468 mips_flag_delayed_branch = flag_delayed_branch;
12469 mips_base_schedule_insns = flag_schedule_insns;
12470 mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
12471 mips_base_move_loop_invariants = flag_move_loop_invariants;
12472 mips_base_align_loops = align_loops;
12473 mips_base_align_jumps = align_jumps;
12474 mips_base_align_functions = align_functions;
12476 /* Now select the mips16 or 32-bit instruction set, as requested. */
12477 mips_set_mips16_mode (mips_base_mips16);
12480 /* Swap the register information for registers I and I + 1, which
12481 currently have the wrong endianness. Note that the registers'
12482 fixedness and call-clobberedness might have been set on the
12486 mips_swap_registers (unsigned int i)
12491 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
12492 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
12494 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
12495 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
12496 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
12497 SWAP_STRING (reg_names[i], reg_names[i + 1]);
12503 /* Implement CONDITIONAL_REGISTER_USAGE. */
12506 mips_conditional_register_usage (void)
12512 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
12513 fixed_regs[regno] = call_used_regs[regno] = 1;
12515 if (!TARGET_HARD_FLOAT)
12519 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
12520 fixed_regs[regno] = call_used_regs[regno] = 1;
12521 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
12522 fixed_regs[regno] = call_used_regs[regno] = 1;
12524 else if (! ISA_HAS_8CC)
12528 /* We only have a single condition code register. We
12529 implement this by hiding all the condition code registers,
12530 and generating RTL that refers directly to ST_REG_FIRST. */
12531 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
12532 fixed_regs[regno] = call_used_regs[regno] = 1;
12534 /* In mips16 mode, we permit the $t temporary registers to be used
12535 for reload. We prohibit the unused $s registers, since they
12536 are caller saved, and saving them via a mips16 register would
12537 probably waste more time than just reloading the value. */
12540 fixed_regs[18] = call_used_regs[18] = 1;
12541 fixed_regs[19] = call_used_regs[19] = 1;
12542 fixed_regs[20] = call_used_regs[20] = 1;
12543 fixed_regs[21] = call_used_regs[21] = 1;
12544 fixed_regs[22] = call_used_regs[22] = 1;
12545 fixed_regs[23] = call_used_regs[23] = 1;
12546 fixed_regs[26] = call_used_regs[26] = 1;
12547 fixed_regs[27] = call_used_regs[27] = 1;
12548 fixed_regs[30] = call_used_regs[30] = 1;
12550 /* fp20-23 are now caller saved. */
12551 if (mips_abi == ABI_64)
12554 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
12555 call_really_used_regs[regno] = call_used_regs[regno] = 1;
12557 /* Odd registers from fp21 to fp31 are now caller saved. */
12558 if (mips_abi == ABI_N32)
12561 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
12562 call_really_used_regs[regno] = call_used_regs[regno] = 1;
12564 /* Make sure that double-register accumulator values are correctly
12565 ordered for the current endianness. */
12566 if (TARGET_LITTLE_ENDIAN)
12569 mips_swap_registers (MD_REG_FIRST);
12570 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
12571 mips_swap_registers (regno);
12575 /* On the mips16, we want to allocate $24 (T_REG) before other
12576 registers for instructions for which it is possible. This helps
12577 avoid shuffling registers around in order to set up for an xor,
12578 encouraging the compiler to use a cmp instead. */
12581 mips_order_regs_for_local_alloc (void)
12585 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
12586 reg_alloc_order[i] = i;
12590 /* It really doesn't matter where we put register 0, since it is
12591 a fixed register anyhow. */
12592 reg_alloc_order[0] = 24;
12593 reg_alloc_order[24] = 0;
12597 /* Initialize the GCC target structure. */
12598 #undef TARGET_ASM_ALIGNED_HI_OP
12599 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
12600 #undef TARGET_ASM_ALIGNED_SI_OP
12601 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
12602 #undef TARGET_ASM_ALIGNED_DI_OP
12603 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
12605 #undef TARGET_ASM_FUNCTION_PROLOGUE
12606 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
12607 #undef TARGET_ASM_FUNCTION_EPILOGUE
12608 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
12609 #undef TARGET_ASM_SELECT_RTX_SECTION
12610 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
12611 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
12612 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
12614 #undef TARGET_SCHED_INIT
12615 #define TARGET_SCHED_INIT mips_sched_init
12616 #undef TARGET_SCHED_REORDER
12617 #define TARGET_SCHED_REORDER mips_sched_reorder
12618 #undef TARGET_SCHED_REORDER2
12619 #define TARGET_SCHED_REORDER2 mips_sched_reorder
12620 #undef TARGET_SCHED_VARIABLE_ISSUE
12621 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
12622 #undef TARGET_SCHED_ADJUST_COST
12623 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
12624 #undef TARGET_SCHED_ISSUE_RATE
12625 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
12626 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
12627 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
12628 mips_multipass_dfa_lookahead
12630 #undef TARGET_DEFAULT_TARGET_FLAGS
12631 #define TARGET_DEFAULT_TARGET_FLAGS \
12633 | TARGET_CPU_DEFAULT \
12634 | TARGET_ENDIAN_DEFAULT \
12635 | TARGET_FP_EXCEPTIONS_DEFAULT \
12636 | MASK_CHECK_ZERO_DIV \
12638 #undef TARGET_HANDLE_OPTION
12639 #define TARGET_HANDLE_OPTION mips_handle_option
12641 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
12642 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
12644 #undef TARGET_INSERT_ATTRIBUTES
12645 #define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
12646 #undef TARGET_MERGE_DECL_ATTRIBUTES
12647 #define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
12648 #undef TARGET_SET_CURRENT_FUNCTION
12649 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
12651 #undef TARGET_VALID_POINTER_MODE
12652 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
12653 #undef TARGET_RTX_COSTS
12654 #define TARGET_RTX_COSTS mips_rtx_costs
12655 #undef TARGET_ADDRESS_COST
12656 #define TARGET_ADDRESS_COST mips_address_cost
12658 #undef TARGET_IN_SMALL_DATA_P
12659 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
12661 #undef TARGET_MACHINE_DEPENDENT_REORG
12662 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
12664 #undef TARGET_ASM_FILE_START
12665 #define TARGET_ASM_FILE_START mips_file_start
12666 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
12667 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
12669 #undef TARGET_INIT_LIBFUNCS
12670 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
12672 #undef TARGET_BUILD_BUILTIN_VA_LIST
12673 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
12674 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
12675 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
12677 #undef TARGET_PROMOTE_FUNCTION_ARGS
12678 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
12679 #undef TARGET_PROMOTE_FUNCTION_RETURN
12680 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
12681 #undef TARGET_PROMOTE_PROTOTYPES
12682 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
12684 #undef TARGET_RETURN_IN_MEMORY
12685 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
12686 #undef TARGET_RETURN_IN_MSB
12687 #define TARGET_RETURN_IN_MSB mips_return_in_msb
12689 #undef TARGET_ASM_OUTPUT_MI_THUNK
12690 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
12691 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
12692 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
12694 #undef TARGET_SETUP_INCOMING_VARARGS
12695 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
12696 #undef TARGET_STRICT_ARGUMENT_NAMING
12697 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
12698 #undef TARGET_MUST_PASS_IN_STACK
12699 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
12700 #undef TARGET_PASS_BY_REFERENCE
12701 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
12702 #undef TARGET_CALLEE_COPIES
12703 #define TARGET_CALLEE_COPIES mips_callee_copies
12704 #undef TARGET_ARG_PARTIAL_BYTES
12705 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
12707 #undef TARGET_MODE_REP_EXTENDED
12708 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
12710 #undef TARGET_VECTOR_MODE_SUPPORTED_P
12711 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
12713 #undef TARGET_SCALAR_MODE_SUPPORTED_P
12714 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
12716 #undef TARGET_INIT_BUILTINS
12717 #define TARGET_INIT_BUILTINS mips_init_builtins
12718 #undef TARGET_EXPAND_BUILTIN
12719 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
12721 #undef TARGET_HAVE_TLS
12722 #define TARGET_HAVE_TLS HAVE_AS_TLS
12724 #undef TARGET_CANNOT_FORCE_CONST_MEM
12725 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
12727 #undef TARGET_ENCODE_SECTION_INFO
12728 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
12730 #undef TARGET_ATTRIBUTE_TABLE
12731 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
12732 /* All our function attributes are related to how out-of-line copies should
12733 be compiled or called. They don't in themselves prevent inlining. */
12734 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
12735 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
12737 #undef TARGET_EXTRA_LIVE_ON_ENTRY
12738 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
12740 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
12741 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
12742 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
12743 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
12745 #undef TARGET_COMP_TYPE_ATTRIBUTES
12746 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
12748 #ifdef HAVE_AS_DTPRELWORD
12749 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
12750 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
12752 #undef TARGET_DWARF_REGISTER_SPAN
12753 #define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
12755 struct gcc_target targetm = TARGET_INITIALIZER;
12757 #include "gt-mips.h"