1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
60 #include "diagnostic.h"
62 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
63 #define UNSPEC_ADDRESS_P(X) \
64 (GET_CODE (X) == UNSPEC \
65 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
66 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
68 /* Extract the symbol or label from UNSPEC wrapper X. */
69 #define UNSPEC_ADDRESS(X) \
72 /* Extract the symbol type from UNSPEC wrapper X. */
73 #define UNSPEC_ADDRESS_TYPE(X) \
74 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
76 /* The maximum distance between the top of the stack frame and the
77 value $sp has when we save and restore registers.
79 The value for normal-mode code must be a SMALL_OPERAND and must
80 preserve the maximum stack alignment. We therefore use a value
81 of 0x7ff0 in this case.
83 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
84 up to 0x7f8 bytes and can usually save or restore all the registers
85 that we need to save or restore. (Note that we can only use these
86 instructions for o32, for which the stack alignment is 8 bytes.)
88 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
89 RESTORE are not available. We can then use unextended instructions
90 to save and restore registers, and to allocate and deallocate the top
92 #define MIPS_MAX_FIRST_STACK_STEP \
93 (!TARGET_MIPS16 ? 0x7ff0 \
94 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
95 : TARGET_64BIT ? 0x100 : 0x400)
97 /* True if INSN is a mips.md pattern or asm statement. */
98 #define USEFUL_INSN_P(INSN) \
100 && GET_CODE (PATTERN (INSN)) != USE \
101 && GET_CODE (PATTERN (INSN)) != CLOBBER \
102 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
103 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
105 /* If INSN is a delayed branch sequence, return the first instruction
106 in the sequence, otherwise return INSN itself. */
107 #define SEQ_BEGIN(INSN) \
108 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
109 ? XVECEXP (PATTERN (INSN), 0, 0) \
112 /* Likewise for the last instruction in a delayed branch sequence. */
113 #define SEQ_END(INSN) \
114 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
115 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
118 /* Execute the following loop body with SUBINSN set to each instruction
119 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
120 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
121 for ((SUBINSN) = SEQ_BEGIN (INSN); \
122 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
123 (SUBINSN) = NEXT_INSN (SUBINSN))
125 /* True if bit BIT is set in VALUE. */
126 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
128 /* Classifies an address.
131 A natural register + offset address. The register satisfies
132 mips_valid_base_register_p and the offset is a const_arith_operand.
135 A LO_SUM rtx. The first operand is a valid base register and
136 the second operand is a symbolic address.
139 A signed 16-bit constant address.
142 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
143 enum mips_address_type {
150 /* Macros to create an enumeration identifier for a function prototype. */
151 #define MIPS_FTYPE_NAME1(A, B) MIPS_##A##_FTYPE_##B
152 #define MIPS_FTYPE_NAME2(A, B, C) MIPS_##A##_FTYPE_##B##_##C
153 #define MIPS_FTYPE_NAME3(A, B, C, D) MIPS_##A##_FTYPE_##B##_##C##_##D
154 #define MIPS_FTYPE_NAME4(A, B, C, D, E) MIPS_##A##_FTYPE_##B##_##C##_##D##_##E
156 /* Classifies the prototype of a builtin function. */
157 enum mips_function_type
159 #define DEF_MIPS_FTYPE(NARGS, LIST) MIPS_FTYPE_NAME##NARGS LIST,
160 #include "config/mips/mips-ftypes.def"
161 #undef DEF_MIPS_FTYPE
165 /* Specifies how a builtin function should be converted into rtl. */
166 enum mips_builtin_type
168 /* The builtin corresponds directly to an .md pattern. The return
169 value is mapped to operand 0 and the arguments are mapped to
170 operands 1 and above. */
173 /* The builtin corresponds directly to an .md pattern. There is no return
174 value and the arguments are mapped to operands 0 and above. */
175 MIPS_BUILTIN_DIRECT_NO_TARGET,
177 /* The builtin corresponds to a comparison instruction followed by
178 a mips_cond_move_tf_ps pattern. The first two arguments are the
179 values to compare and the second two arguments are the vector
180 operands for the movt.ps or movf.ps instruction (in assembly order). */
184 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
185 of this instruction is the result of the comparison, which has mode
186 CCV2 or CCV4. The function arguments are mapped to operands 1 and
187 above. The function's return value is an SImode boolean that is
188 true under the following conditions:
190 MIPS_BUILTIN_CMP_ANY: one of the registers is true
191 MIPS_BUILTIN_CMP_ALL: all of the registers are true
192 MIPS_BUILTIN_CMP_LOWER: the first register is true
193 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
194 MIPS_BUILTIN_CMP_ANY,
195 MIPS_BUILTIN_CMP_ALL,
196 MIPS_BUILTIN_CMP_UPPER,
197 MIPS_BUILTIN_CMP_LOWER,
199 /* As above, but the instruction only sets a single $fcc register. */
200 MIPS_BUILTIN_CMP_SINGLE,
202 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
203 MIPS_BUILTIN_BPOSGE32
206 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
207 #define MIPS_FP_CONDITIONS(MACRO) \
225 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
226 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
227 enum mips_fp_condition {
228 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
231 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
232 #define STRINGIFY(X) #X
233 static const char *const mips_fp_conditions[] = {
234 MIPS_FP_CONDITIONS (STRINGIFY)
237 /* Information about a function's frame layout. */
238 struct mips_frame_info GTY(())
240 /* The size of the frame in bytes. */
241 HOST_WIDE_INT total_size;
243 /* The number of bytes allocated to variables. */
244 HOST_WIDE_INT var_size;
246 /* The number of bytes allocated to outgoing function arguments. */
247 HOST_WIDE_INT args_size;
249 /* The number of bytes allocated to the .cprestore slot, or 0 if there
251 HOST_WIDE_INT cprestore_size;
253 /* Bit X is set if the function saves or restores GPR X. */
256 /* Likewise FPR X. */
259 /* The number of GPRs and FPRs saved. */
263 /* The offset of the topmost GPR and FPR save slots from the top of
264 the frame, or zero if no such slots are needed. */
265 HOST_WIDE_INT gp_save_offset;
266 HOST_WIDE_INT fp_save_offset;
268 /* Likewise, but giving offsets from the bottom of the frame. */
269 HOST_WIDE_INT gp_sp_offset;
270 HOST_WIDE_INT fp_sp_offset;
272 /* The offset of arg_pointer_rtx from frame_pointer_rtx. */
273 HOST_WIDE_INT arg_pointer_offset;
275 /* The offset of hard_frame_pointer_rtx from frame_pointer_rtx. */
276 HOST_WIDE_INT hard_frame_pointer_offset;
279 struct machine_function GTY(()) {
280 /* Pseudo-reg holding the value of $28 in a mips16 function which
281 refers to GP relative global variables. */
282 rtx mips16_gp_pseudo_rtx;
284 /* The number of extra stack bytes taken up by register varargs.
285 This area is allocated by the callee at the very top of the frame. */
288 /* Current frame information, calculated by mips_compute_frame_info. */
289 struct mips_frame_info frame;
291 /* The register to use as the global pointer within this function. */
292 unsigned int global_pointer;
294 /* True if mips_adjust_insn_length should ignore an instruction's
296 bool ignore_hazard_length_p;
298 /* True if the whole function is suitable for .set noreorder and
300 bool all_noreorder_p;
302 /* True if the function is known to have an instruction that needs $gp. */
305 /* True if we have emitted an instruction to initialize
306 mips16_gp_pseudo_rtx. */
307 bool initialized_mips16_gp_pseudo_p;
310 /* Information about a single argument. */
313 /* True if the argument is passed in a floating-point register, or
314 would have been if we hadn't run out of registers. */
317 /* The number of words passed in registers, rounded up. */
318 unsigned int reg_words;
320 /* For EABI, the offset of the first register from GP_ARG_FIRST or
321 FP_ARG_FIRST. For other ABIs, the offset of the first register from
322 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
323 comment for details).
325 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
327 unsigned int reg_offset;
329 /* The number of words that must be passed on the stack, rounded up. */
330 unsigned int stack_words;
332 /* The offset from the start of the stack overflow area of the argument's
333 first stack word. Only meaningful when STACK_WORDS is nonzero. */
334 unsigned int stack_offset;
338 /* Information about an address described by mips_address_type.
344 REG is the base register and OFFSET is the constant offset.
347 REG is the register that contains the high part of the address,
348 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
349 is the type of OFFSET's symbol.
352 SYMBOL_TYPE is the type of symbol being referenced. */
354 struct mips_address_info
356 enum mips_address_type type;
359 enum mips_symbol_type symbol_type;
363 /* One stage in a constant building sequence. These sequences have
367 A = A CODE[1] VALUE[1]
368 A = A CODE[2] VALUE[2]
371 where A is an accumulator, each CODE[i] is a binary rtl operation
372 and each VALUE[i] is a constant integer. */
373 struct mips_integer_op {
375 unsigned HOST_WIDE_INT value;
379 /* The largest number of operations needed to load an integer constant.
380 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
381 When the lowest bit is clear, we can try, but reject a sequence with
382 an extra SLL at the end. */
383 #define MIPS_MAX_INTEGER_OPS 7
385 /* Information about a MIPS16e SAVE or RESTORE instruction. */
386 struct mips16e_save_restore_info {
387 /* The number of argument registers saved by a SAVE instruction.
388 0 for RESTORE instructions. */
391 /* Bit X is set if the instruction saves or restores GPR X. */
394 /* The total number of bytes to allocate. */
398 /* Global variables for machine-dependent things. */
400 /* Threshold for data being put into the small data/bss area, instead
401 of the normal data area. */
402 int mips_section_threshold = -1;
404 /* Count the number of .file directives, so that .loc is up to date. */
405 int num_source_filenames = 0;
407 /* Name of the file containing the current function. */
408 const char *current_function_file = "";
410 /* Count the number of sdb related labels are generated (to find block
411 start and end boundaries). */
412 int sdb_label_count = 0;
414 /* Next label # for each statement for Silicon Graphics IRIS systems. */
417 /* Map GCC register number to debugger register number. */
418 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
419 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
421 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
426 /* The next branch instruction is a branch likely, not branch normal. */
427 int mips_branch_likely;
429 /* The operands passed to the last cmpMM expander. */
432 /* The target cpu for code generation. */
433 enum processor_type mips_arch;
434 const struct mips_cpu_info *mips_arch_info;
436 /* The target cpu for optimization and scheduling. */
437 enum processor_type mips_tune;
438 const struct mips_cpu_info *mips_tune_info;
440 /* Which instruction set architecture to use. */
443 /* The architecture selected by -mipsN. */
444 static const struct mips_cpu_info *mips_isa_info;
446 /* Which ABI to use. */
447 int mips_abi = MIPS_ABI_DEFAULT;
449 /* Cost information to use. */
450 const struct mips_rtx_cost_data *mips_cost;
452 /* Remember the ambient target flags, excluding mips16. */
453 static int mips_base_target_flags;
454 /* The mips16 command-line target flags only. */
455 static bool mips_base_mips16;
456 /* Similar copies of option settings. */
457 static int mips_flag_delayed_branch; /* flag_delayed_branch */
458 static int mips_base_schedule_insns; /* flag_schedule_insns */
459 static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
460 static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
461 static int mips_base_align_loops; /* align_loops */
462 static int mips_base_align_jumps; /* align_jumps */
463 static int mips_base_align_functions; /* align_functions */
465 /* The -mtext-loads setting. */
466 enum mips_code_readable_setting mips_code_readable = CODE_READABLE_YES;
468 /* If TRUE, we split addresses into their high and low parts in the RTL. */
469 int mips_split_addresses;
471 /* Mode used for saving/restoring general purpose registers. */
472 static enum machine_mode gpr_mode;
474 /* Array giving truth value on whether or not a given hard register
475 can support a given mode. */
476 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
478 /* List of all MIPS punctuation characters used by print_operand. */
479 char mips_print_operand_punct[256];
481 static GTY (()) int mips_output_filename_first_time = 1;
483 /* mips_split_p[X] is true if symbols of type X can be split by
484 mips_split_symbol(). */
485 bool mips_split_p[NUM_SYMBOL_TYPES];
487 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
488 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
489 if they are matched by a special .md file pattern. */
490 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
492 /* Likewise for HIGHs. */
493 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
495 /* Map hard register number to register class */
496 const enum reg_class mips_regno_to_class[] =
498 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
499 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
500 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
501 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
502 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
503 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
504 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
505 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
506 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
507 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
508 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
509 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
510 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
511 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
512 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
513 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
514 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
515 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
516 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
517 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
518 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
519 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
520 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
521 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
522 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
523 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
524 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
525 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
526 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
527 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
528 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
529 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
530 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
531 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
532 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
533 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
534 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
535 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
536 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
537 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
538 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
539 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
540 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
541 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
542 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
543 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
544 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
547 /* Table of machine dependent attributes. */
548 const struct attribute_spec mips_attribute_table[] =
550 { "long_call", 0, 0, false, true, true, NULL },
551 { "far", 0, 0, false, true, true, NULL },
552 { "near", 0, 0, false, true, true, NULL },
553 /* Switch MIPS16 ASE on and off per-function. We would really like
554 to make these type attributes, but GCC doesn't provide the hooks
555 we need to support the right conversion rules. As declaration
556 attributes, they affect code generation but don't carry other
558 { "mips16", 0, 0, true, false, false, NULL },
559 { "nomips16", 0, 0, true, false, false, NULL },
560 { NULL, 0, 0, false, false, false, NULL }
563 /* A table describing all the processors gcc knows about. Names are
564 matched in the order listed. The first mention of an ISA level is
565 taken as the canonical name for that ISA.
567 To ease comparison, please keep this table in the same order
568 as gas's mips_cpu_info_table[]. Please also make sure that
569 MIPS_ISA_LEVEL_SPEC and MIPS_ARCH_FLOAT_SPEC handle all -march
570 options correctly. */
571 const struct mips_cpu_info mips_cpu_info_table[] = {
572 /* Entries for generic ISAs */
573 { "mips1", PROCESSOR_R3000, 1, 0 },
574 { "mips2", PROCESSOR_R6000, 2, 0 },
575 { "mips3", PROCESSOR_R4000, 3, 0 },
576 { "mips4", PROCESSOR_R8000, 4, 0 },
577 /* Prefer not to use branch-likely instructions for generic MIPS32rX
578 and MIPS64rX code. The instructions were officially deprecated
579 in revisions 2 and earlier, but revision 3 is likely to downgrade
580 that to a recommendation to avoid the instructions in code that
581 isn't tuned to a specific processor. */
582 { "mips32", PROCESSOR_4KC, 32, PTF_AVOID_BRANCHLIKELY },
583 { "mips32r2", PROCESSOR_M4K, 33, PTF_AVOID_BRANCHLIKELY },
584 { "mips64", PROCESSOR_5KC, 64, PTF_AVOID_BRANCHLIKELY },
587 { "r3000", PROCESSOR_R3000, 1, 0 },
588 { "r2000", PROCESSOR_R3000, 1, 0 }, /* = r3000 */
589 { "r3900", PROCESSOR_R3900, 1, 0 },
592 { "r6000", PROCESSOR_R6000, 2, 0 },
595 { "r4000", PROCESSOR_R4000, 3, 0 },
596 { "vr4100", PROCESSOR_R4100, 3, 0 },
597 { "vr4111", PROCESSOR_R4111, 3, 0 },
598 { "vr4120", PROCESSOR_R4120, 3, 0 },
599 { "vr4130", PROCESSOR_R4130, 3, 0 },
600 { "vr4300", PROCESSOR_R4300, 3, 0 },
601 { "r4400", PROCESSOR_R4000, 3, 0 }, /* = r4000 */
602 { "r4600", PROCESSOR_R4600, 3, 0 },
603 { "orion", PROCESSOR_R4600, 3, 0 }, /* = r4600 */
604 { "r4650", PROCESSOR_R4650, 3, 0 },
607 { "r8000", PROCESSOR_R8000, 4, 0 },
608 { "vr5000", PROCESSOR_R5000, 4, 0 },
609 { "vr5400", PROCESSOR_R5400, 4, 0 },
610 { "vr5500", PROCESSOR_R5500, 4, PTF_AVOID_BRANCHLIKELY },
611 { "rm7000", PROCESSOR_R7000, 4, 0 },
612 { "rm9000", PROCESSOR_R9000, 4, 0 },
615 { "4kc", PROCESSOR_4KC, 32, 0 },
616 { "4km", PROCESSOR_4KC, 32, 0 }, /* = 4kc */
617 { "4kp", PROCESSOR_4KP, 32, 0 },
618 { "4ksc", PROCESSOR_4KC, 32, 0 },
620 /* MIPS32 Release 2 */
621 { "m4k", PROCESSOR_M4K, 33, 0 },
622 { "4kec", PROCESSOR_4KC, 33, 0 },
623 { "4kem", PROCESSOR_4KC, 33, 0 },
624 { "4kep", PROCESSOR_4KP, 33, 0 },
625 { "4ksd", PROCESSOR_4KC, 33, 0 },
627 { "24kc", PROCESSOR_24KC, 33, 0 },
628 { "24kf2_1", PROCESSOR_24KF2_1, 33, 0 },
629 { "24kf", PROCESSOR_24KF2_1, 33, 0 },
630 { "24kf1_1", PROCESSOR_24KF1_1, 33, 0 },
631 { "24kfx", PROCESSOR_24KF1_1, 33, 0 },
632 { "24kx", PROCESSOR_24KF1_1, 33, 0 },
634 { "24kec", PROCESSOR_24KC, 33, 0 }, /* 24K with DSP */
635 { "24kef2_1", PROCESSOR_24KF2_1, 33, 0 },
636 { "24kef", PROCESSOR_24KF2_1, 33, 0 },
637 { "24kef1_1", PROCESSOR_24KF1_1, 33, 0 },
638 { "24kefx", PROCESSOR_24KF1_1, 33, 0 },
639 { "24kex", PROCESSOR_24KF1_1, 33, 0 },
641 { "34kc", PROCESSOR_24KC, 33, 0 }, /* 34K with MT/DSP */
642 { "34kf2_1", PROCESSOR_24KF2_1, 33, 0 },
643 { "34kf", PROCESSOR_24KF2_1, 33, 0 },
644 { "34kf1_1", PROCESSOR_24KF1_1, 33, 0 },
645 { "34kfx", PROCESSOR_24KF1_1, 33, 0 },
646 { "34kx", PROCESSOR_24KF1_1, 33, 0 },
648 { "74kc", PROCESSOR_74KC, 33, 0 }, /* 74K with DSPr2 */
649 { "74kf2_1", PROCESSOR_74KF2_1, 33, 0 },
650 { "74kf", PROCESSOR_74KF2_1, 33, 0 },
651 { "74kf1_1", PROCESSOR_74KF1_1, 33, 0 },
652 { "74kfx", PROCESSOR_74KF1_1, 33, 0 },
653 { "74kx", PROCESSOR_74KF1_1, 33, 0 },
654 { "74kf3_2", PROCESSOR_74KF3_2, 33, 0 },
657 { "5kc", PROCESSOR_5KC, 64, 0 },
658 { "5kf", PROCESSOR_5KF, 64, 0 },
659 { "20kc", PROCESSOR_20KC, 64, PTF_AVOID_BRANCHLIKELY },
660 { "sb1", PROCESSOR_SB1, 64, PTF_AVOID_BRANCHLIKELY },
661 { "sb1a", PROCESSOR_SB1A, 64, PTF_AVOID_BRANCHLIKELY },
662 { "sr71000", PROCESSOR_SR71000, 64, PTF_AVOID_BRANCHLIKELY },
665 /* Default costs. If these are used for a processor we should look
666 up the actual costs. */
667 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
668 COSTS_N_INSNS (7), /* fp_mult_sf */ \
669 COSTS_N_INSNS (8), /* fp_mult_df */ \
670 COSTS_N_INSNS (23), /* fp_div_sf */ \
671 COSTS_N_INSNS (36), /* fp_div_df */ \
672 COSTS_N_INSNS (10), /* int_mult_si */ \
673 COSTS_N_INSNS (10), /* int_mult_di */ \
674 COSTS_N_INSNS (69), /* int_div_si */ \
675 COSTS_N_INSNS (69), /* int_div_di */ \
676 2, /* branch_cost */ \
677 4 /* memory_latency */
679 /* Need to replace these with the costs of calling the appropriate
681 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
682 COSTS_N_INSNS (256), /* fp_mult_sf */ \
683 COSTS_N_INSNS (256), /* fp_mult_df */ \
684 COSTS_N_INSNS (256), /* fp_div_sf */ \
685 COSTS_N_INSNS (256) /* fp_div_df */
687 static struct mips_rtx_cost_data const mips_rtx_cost_optimize_size =
689 COSTS_N_INSNS (1), /* fp_add */
690 COSTS_N_INSNS (1), /* fp_mult_sf */
691 COSTS_N_INSNS (1), /* fp_mult_df */
692 COSTS_N_INSNS (1), /* fp_div_sf */
693 COSTS_N_INSNS (1), /* fp_div_df */
694 COSTS_N_INSNS (1), /* int_mult_si */
695 COSTS_N_INSNS (1), /* int_mult_di */
696 COSTS_N_INSNS (1), /* int_div_si */
697 COSTS_N_INSNS (1), /* int_div_di */
699 4 /* memory_latency */
702 static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
705 COSTS_N_INSNS (2), /* fp_add */
706 COSTS_N_INSNS (4), /* fp_mult_sf */
707 COSTS_N_INSNS (5), /* fp_mult_df */
708 COSTS_N_INSNS (12), /* fp_div_sf */
709 COSTS_N_INSNS (19), /* fp_div_df */
710 COSTS_N_INSNS (12), /* int_mult_si */
711 COSTS_N_INSNS (12), /* int_mult_di */
712 COSTS_N_INSNS (35), /* int_div_si */
713 COSTS_N_INSNS (35), /* int_div_di */
715 4 /* memory_latency */
720 COSTS_N_INSNS (6), /* int_mult_si */
721 COSTS_N_INSNS (6), /* int_mult_di */
722 COSTS_N_INSNS (36), /* int_div_si */
723 COSTS_N_INSNS (36), /* int_div_di */
725 4 /* memory_latency */
729 COSTS_N_INSNS (36), /* int_mult_si */
730 COSTS_N_INSNS (36), /* int_mult_di */
731 COSTS_N_INSNS (37), /* int_div_si */
732 COSTS_N_INSNS (37), /* int_div_di */
734 4 /* memory_latency */
738 COSTS_N_INSNS (4), /* int_mult_si */
739 COSTS_N_INSNS (11), /* int_mult_di */
740 COSTS_N_INSNS (36), /* int_div_si */
741 COSTS_N_INSNS (68), /* int_div_di */
743 4 /* memory_latency */
746 COSTS_N_INSNS (4), /* fp_add */
747 COSTS_N_INSNS (4), /* fp_mult_sf */
748 COSTS_N_INSNS (5), /* fp_mult_df */
749 COSTS_N_INSNS (17), /* fp_div_sf */
750 COSTS_N_INSNS (32), /* fp_div_df */
751 COSTS_N_INSNS (4), /* int_mult_si */
752 COSTS_N_INSNS (11), /* int_mult_di */
753 COSTS_N_INSNS (36), /* int_div_si */
754 COSTS_N_INSNS (68), /* int_div_di */
756 4 /* memory_latency */
759 COSTS_N_INSNS (4), /* fp_add */
760 COSTS_N_INSNS (4), /* fp_mult_sf */
761 COSTS_N_INSNS (5), /* fp_mult_df */
762 COSTS_N_INSNS (17), /* fp_div_sf */
763 COSTS_N_INSNS (32), /* fp_div_df */
764 COSTS_N_INSNS (4), /* int_mult_si */
765 COSTS_N_INSNS (7), /* int_mult_di */
766 COSTS_N_INSNS (42), /* int_div_si */
767 COSTS_N_INSNS (72), /* int_div_di */
769 4 /* memory_latency */
773 COSTS_N_INSNS (5), /* int_mult_si */
774 COSTS_N_INSNS (5), /* int_mult_di */
775 COSTS_N_INSNS (41), /* int_div_si */
776 COSTS_N_INSNS (41), /* int_div_di */
778 4 /* memory_latency */
781 COSTS_N_INSNS (8), /* fp_add */
782 COSTS_N_INSNS (8), /* fp_mult_sf */
783 COSTS_N_INSNS (10), /* fp_mult_df */
784 COSTS_N_INSNS (34), /* fp_div_sf */
785 COSTS_N_INSNS (64), /* fp_div_df */
786 COSTS_N_INSNS (5), /* int_mult_si */
787 COSTS_N_INSNS (5), /* int_mult_di */
788 COSTS_N_INSNS (41), /* int_div_si */
789 COSTS_N_INSNS (41), /* int_div_di */
791 4 /* memory_latency */
794 COSTS_N_INSNS (4), /* fp_add */
795 COSTS_N_INSNS (4), /* fp_mult_sf */
796 COSTS_N_INSNS (5), /* fp_mult_df */
797 COSTS_N_INSNS (17), /* fp_div_sf */
798 COSTS_N_INSNS (32), /* fp_div_df */
799 COSTS_N_INSNS (5), /* int_mult_si */
800 COSTS_N_INSNS (5), /* int_mult_di */
801 COSTS_N_INSNS (41), /* int_div_si */
802 COSTS_N_INSNS (41), /* int_div_di */
804 4 /* memory_latency */
808 COSTS_N_INSNS (5), /* int_mult_si */
809 COSTS_N_INSNS (5), /* int_mult_di */
810 COSTS_N_INSNS (41), /* int_div_si */
811 COSTS_N_INSNS (41), /* int_div_di */
813 4 /* memory_latency */
816 COSTS_N_INSNS (8), /* fp_add */
817 COSTS_N_INSNS (8), /* fp_mult_sf */
818 COSTS_N_INSNS (10), /* fp_mult_df */
819 COSTS_N_INSNS (34), /* fp_div_sf */
820 COSTS_N_INSNS (64), /* fp_div_df */
821 COSTS_N_INSNS (5), /* int_mult_si */
822 COSTS_N_INSNS (5), /* int_mult_di */
823 COSTS_N_INSNS (41), /* int_div_si */
824 COSTS_N_INSNS (41), /* int_div_di */
826 4 /* memory_latency */
829 COSTS_N_INSNS (4), /* fp_add */
830 COSTS_N_INSNS (4), /* fp_mult_sf */
831 COSTS_N_INSNS (5), /* fp_mult_df */
832 COSTS_N_INSNS (17), /* fp_div_sf */
833 COSTS_N_INSNS (32), /* fp_div_df */
834 COSTS_N_INSNS (5), /* int_mult_si */
835 COSTS_N_INSNS (5), /* int_mult_di */
836 COSTS_N_INSNS (41), /* int_div_si */
837 COSTS_N_INSNS (41), /* int_div_di */
839 4 /* memory_latency */
842 COSTS_N_INSNS (6), /* fp_add */
843 COSTS_N_INSNS (6), /* fp_mult_sf */
844 COSTS_N_INSNS (7), /* fp_mult_df */
845 COSTS_N_INSNS (25), /* fp_div_sf */
846 COSTS_N_INSNS (48), /* fp_div_df */
847 COSTS_N_INSNS (5), /* int_mult_si */
848 COSTS_N_INSNS (5), /* int_mult_di */
849 COSTS_N_INSNS (41), /* int_div_si */
850 COSTS_N_INSNS (41), /* int_div_di */
852 4 /* memory_latency */
858 COSTS_N_INSNS (2), /* fp_add */
859 COSTS_N_INSNS (4), /* fp_mult_sf */
860 COSTS_N_INSNS (5), /* fp_mult_df */
861 COSTS_N_INSNS (12), /* fp_div_sf */
862 COSTS_N_INSNS (19), /* fp_div_df */
863 COSTS_N_INSNS (2), /* int_mult_si */
864 COSTS_N_INSNS (2), /* int_mult_di */
865 COSTS_N_INSNS (35), /* int_div_si */
866 COSTS_N_INSNS (35), /* int_div_di */
868 4 /* memory_latency */
871 COSTS_N_INSNS (3), /* fp_add */
872 COSTS_N_INSNS (5), /* fp_mult_sf */
873 COSTS_N_INSNS (6), /* fp_mult_df */
874 COSTS_N_INSNS (15), /* fp_div_sf */
875 COSTS_N_INSNS (16), /* fp_div_df */
876 COSTS_N_INSNS (17), /* int_mult_si */
877 COSTS_N_INSNS (17), /* int_mult_di */
878 COSTS_N_INSNS (38), /* int_div_si */
879 COSTS_N_INSNS (38), /* int_div_di */
881 6 /* memory_latency */
884 COSTS_N_INSNS (6), /* fp_add */
885 COSTS_N_INSNS (7), /* fp_mult_sf */
886 COSTS_N_INSNS (8), /* fp_mult_df */
887 COSTS_N_INSNS (23), /* fp_div_sf */
888 COSTS_N_INSNS (36), /* fp_div_df */
889 COSTS_N_INSNS (10), /* int_mult_si */
890 COSTS_N_INSNS (10), /* int_mult_di */
891 COSTS_N_INSNS (69), /* int_div_si */
892 COSTS_N_INSNS (69), /* int_div_di */
894 6 /* memory_latency */
906 /* The only costs that appear to be updated here are
907 integer multiplication. */
909 COSTS_N_INSNS (4), /* int_mult_si */
910 COSTS_N_INSNS (6), /* int_mult_di */
911 COSTS_N_INSNS (69), /* int_div_si */
912 COSTS_N_INSNS (69), /* int_div_di */
914 4 /* memory_latency */
926 COSTS_N_INSNS (6), /* fp_add */
927 COSTS_N_INSNS (4), /* fp_mult_sf */
928 COSTS_N_INSNS (5), /* fp_mult_df */
929 COSTS_N_INSNS (23), /* fp_div_sf */
930 COSTS_N_INSNS (36), /* fp_div_df */
931 COSTS_N_INSNS (5), /* int_mult_si */
932 COSTS_N_INSNS (5), /* int_mult_di */
933 COSTS_N_INSNS (36), /* int_div_si */
934 COSTS_N_INSNS (36), /* int_div_di */
936 4 /* memory_latency */
939 COSTS_N_INSNS (6), /* fp_add */
940 COSTS_N_INSNS (5), /* fp_mult_sf */
941 COSTS_N_INSNS (6), /* fp_mult_df */
942 COSTS_N_INSNS (30), /* fp_div_sf */
943 COSTS_N_INSNS (59), /* fp_div_df */
944 COSTS_N_INSNS (3), /* int_mult_si */
945 COSTS_N_INSNS (4), /* int_mult_di */
946 COSTS_N_INSNS (42), /* int_div_si */
947 COSTS_N_INSNS (74), /* int_div_di */
949 4 /* memory_latency */
952 COSTS_N_INSNS (6), /* fp_add */
953 COSTS_N_INSNS (5), /* fp_mult_sf */
954 COSTS_N_INSNS (6), /* fp_mult_df */
955 COSTS_N_INSNS (30), /* fp_div_sf */
956 COSTS_N_INSNS (59), /* fp_div_df */
957 COSTS_N_INSNS (5), /* int_mult_si */
958 COSTS_N_INSNS (9), /* int_mult_di */
959 COSTS_N_INSNS (42), /* int_div_si */
960 COSTS_N_INSNS (74), /* int_div_di */
962 4 /* memory_latency */
965 /* The only costs that are changed here are
966 integer multiplication. */
967 COSTS_N_INSNS (6), /* fp_add */
968 COSTS_N_INSNS (7), /* fp_mult_sf */
969 COSTS_N_INSNS (8), /* fp_mult_df */
970 COSTS_N_INSNS (23), /* fp_div_sf */
971 COSTS_N_INSNS (36), /* fp_div_df */
972 COSTS_N_INSNS (5), /* int_mult_si */
973 COSTS_N_INSNS (9), /* int_mult_di */
974 COSTS_N_INSNS (69), /* int_div_si */
975 COSTS_N_INSNS (69), /* int_div_di */
977 4 /* memory_latency */
983 /* The only costs that are changed here are
984 integer multiplication. */
985 COSTS_N_INSNS (6), /* fp_add */
986 COSTS_N_INSNS (7), /* fp_mult_sf */
987 COSTS_N_INSNS (8), /* fp_mult_df */
988 COSTS_N_INSNS (23), /* fp_div_sf */
989 COSTS_N_INSNS (36), /* fp_div_df */
990 COSTS_N_INSNS (3), /* int_mult_si */
991 COSTS_N_INSNS (8), /* int_mult_di */
992 COSTS_N_INSNS (69), /* int_div_si */
993 COSTS_N_INSNS (69), /* int_div_di */
995 4 /* memory_latency */
998 /* These costs are the same as the SB-1A below. */
999 COSTS_N_INSNS (4), /* fp_add */
1000 COSTS_N_INSNS (4), /* fp_mult_sf */
1001 COSTS_N_INSNS (4), /* fp_mult_df */
1002 COSTS_N_INSNS (24), /* fp_div_sf */
1003 COSTS_N_INSNS (32), /* fp_div_df */
1004 COSTS_N_INSNS (3), /* int_mult_si */
1005 COSTS_N_INSNS (4), /* int_mult_di */
1006 COSTS_N_INSNS (36), /* int_div_si */
1007 COSTS_N_INSNS (68), /* int_div_di */
1008 1, /* branch_cost */
1009 4 /* memory_latency */
1012 /* These costs are the same as the SB-1 above. */
1013 COSTS_N_INSNS (4), /* fp_add */
1014 COSTS_N_INSNS (4), /* fp_mult_sf */
1015 COSTS_N_INSNS (4), /* fp_mult_df */
1016 COSTS_N_INSNS (24), /* fp_div_sf */
1017 COSTS_N_INSNS (32), /* fp_div_df */
1018 COSTS_N_INSNS (3), /* int_mult_si */
1019 COSTS_N_INSNS (4), /* int_mult_di */
1020 COSTS_N_INSNS (36), /* int_div_si */
1021 COSTS_N_INSNS (68), /* int_div_di */
1022 1, /* branch_cost */
1023 4 /* memory_latency */
1030 /* Use a hash table to keep track of implicit mips16/nomips16 attributes
1031 for -mflip_mips16. It maps decl names onto a boolean mode setting. */
1033 struct mflip_mips16_entry GTY (()) {
1037 static GTY ((param_is (struct mflip_mips16_entry))) htab_t mflip_mips16_htab;
1039 /* Hash table callbacks for mflip_mips16_htab. */
1042 mflip_mips16_htab_hash (const void *entry)
1044 return htab_hash_string (((const struct mflip_mips16_entry *) entry)->name);
1048 mflip_mips16_htab_eq (const void *entry, const void *name)
1050 return strcmp (((const struct mflip_mips16_entry *) entry)->name,
1051 (const char *) name) == 0;
1054 static GTY(()) int mips16_flipper;
1056 /* DECL is a function that needs a default "mips16" or "nomips16" attribute
1057 for -mflip-mips16. Return true if it should use "mips16" and false if
1058 it should use "nomips16". */
1061 mflip_mips16_use_mips16_p (tree decl)
1063 struct mflip_mips16_entry *entry;
1068 /* Use the opposite of the command-line setting for anonymous decls. */
1069 if (!DECL_NAME (decl))
1070 return !mips_base_mips16;
1072 if (!mflip_mips16_htab)
1073 mflip_mips16_htab = htab_create_ggc (37, mflip_mips16_htab_hash,
1074 mflip_mips16_htab_eq, NULL);
1076 name = IDENTIFIER_POINTER (DECL_NAME (decl));
1077 hash = htab_hash_string (name);
1078 slot = htab_find_slot_with_hash (mflip_mips16_htab, name, hash, INSERT);
1079 entry = (struct mflip_mips16_entry *) *slot;
1082 mips16_flipper = !mips16_flipper;
1083 entry = GGC_NEW (struct mflip_mips16_entry);
1085 entry->mips16_p = mips16_flipper ? !mips_base_mips16 : mips_base_mips16;
1088 return entry->mips16_p;
1091 /* Predicates to test for presence of "near" and "far"/"long_call"
1092 attributes on the given TYPE. */
1095 mips_near_type_p (const_tree type)
1097 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1101 mips_far_type_p (const_tree type)
1103 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1104 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1107 /* Similar predicates for "mips16"/"nomips16" attributes. */
1110 mips_mips16_decl_p (const_tree decl)
1112 return lookup_attribute ("mips16", DECL_ATTRIBUTES (decl)) != NULL;
1116 mips_nomips16_decl_p (const_tree decl)
1118 return lookup_attribute ("nomips16", DECL_ATTRIBUTES (decl)) != NULL;
1121 /* Return true if function DECL is a MIPS16 function. Return the ambient
1122 setting if DECL is null. */
1125 mips_use_mips16_mode_p (tree decl)
1129 /* Nested functions must use the same frame pointer as their
1130 parent and must therefore use the same ISA mode. */
1131 tree parent = decl_function_context (decl);
1134 if (mips_mips16_decl_p (decl))
1136 if (mips_nomips16_decl_p (decl))
1139 return mips_base_mips16;
1142 /* Return 0 if the attributes for two types are incompatible, 1 if they
1143 are compatible, and 2 if they are nearly compatible (which causes a
1144 warning to be generated). */
1147 mips_comp_type_attributes (const_tree type1, const_tree type2)
1149 /* Check for mismatch of non-default calling convention. */
1150 if (TREE_CODE (type1) != FUNCTION_TYPE)
1153 /* Disallow mixed near/far attributes. */
1154 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1156 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1162 /* Implement TARGET_INSERT_ATTRIBUTES. */
1165 mips_insert_attributes (tree decl, tree *attributes)
1168 bool mips16_p, nomips16_p;
1170 /* Check for "mips16" and "nomips16" attributes. */
1171 mips16_p = lookup_attribute ("mips16", *attributes) != NULL;
1172 nomips16_p = lookup_attribute ("nomips16", *attributes) != NULL;
1173 if (TREE_CODE (decl) != FUNCTION_DECL)
1176 error ("%qs attribute only applies to functions", "mips16");
1178 error ("%qs attribute only applies to functions", "nomips16");
1182 mips16_p |= mips_mips16_decl_p (decl);
1183 nomips16_p |= mips_nomips16_decl_p (decl);
1184 if (mips16_p || nomips16_p)
1186 /* DECL cannot be simultaneously mips16 and nomips16. */
1187 if (mips16_p && nomips16_p)
1188 error ("%qs cannot have both %<mips16%> and "
1189 "%<nomips16%> attributes",
1190 IDENTIFIER_POINTER (DECL_NAME (decl)));
1192 else if (TARGET_FLIP_MIPS16 && !DECL_ARTIFICIAL (decl))
1194 /* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
1195 "mips16" attribute, arbitrarily pick one. We must pick the same
1196 setting for duplicate declarations of a function. */
1197 name = mflip_mips16_use_mips16_p (decl) ? "mips16" : "nomips16";
1198 *attributes = tree_cons (get_identifier (name), NULL, *attributes);
1203 /* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
1206 mips_merge_decl_attributes (tree olddecl, tree newdecl)
1208 /* The decls' "mips16" and "nomips16" attributes must match exactly. */
1209 if (mips_mips16_decl_p (olddecl) != mips_mips16_decl_p (newdecl))
1210 error ("%qs redeclared with conflicting %qs attributes",
1211 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "mips16");
1212 if (mips_nomips16_decl_p (olddecl) != mips_nomips16_decl_p (newdecl))
1213 error ("%qs redeclared with conflicting %qs attributes",
1214 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "nomips16");
1216 return merge_attributes (DECL_ATTRIBUTES (olddecl),
1217 DECL_ATTRIBUTES (newdecl));
1220 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1221 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1224 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1226 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1228 *base_ptr = XEXP (x, 0);
1229 *offset_ptr = INTVAL (XEXP (x, 1));
1238 static unsigned int mips_build_integer (struct mips_integer_op *,
1239 unsigned HOST_WIDE_INT);
1241 /* Subroutine of mips_build_integer (with the same interface).
1242 Assume that the final action in the sequence should be a left shift. */
1245 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
1247 unsigned int i, shift;
1249 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
1250 since signed numbers are easier to load than unsigned ones. */
1252 while ((value & 1) == 0)
1253 value /= 2, shift++;
1255 i = mips_build_integer (codes, value);
1256 codes[i].code = ASHIFT;
1257 codes[i].value = shift;
1262 /* As for mips_build_shift, but assume that the final action will be
1263 an IOR or PLUS operation. */
1266 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
1268 unsigned HOST_WIDE_INT high;
1271 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
1272 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
1274 /* The constant is too complex to load with a simple lui/ori pair
1275 so our goal is to clear as many trailing zeros as possible.
1276 In this case, we know bit 16 is set and that the low 16 bits
1277 form a negative number. If we subtract that number from VALUE,
1278 we will clear at least the lowest 17 bits, maybe more. */
1279 i = mips_build_integer (codes, CONST_HIGH_PART (value));
1280 codes[i].code = PLUS;
1281 codes[i].value = CONST_LOW_PART (value);
1285 i = mips_build_integer (codes, high);
1286 codes[i].code = IOR;
1287 codes[i].value = value & 0xffff;
1293 /* Fill CODES with a sequence of rtl operations to load VALUE.
1294 Return the number of operations needed. */
1297 mips_build_integer (struct mips_integer_op *codes,
1298 unsigned HOST_WIDE_INT value)
1300 if (SMALL_OPERAND (value)
1301 || SMALL_OPERAND_UNSIGNED (value)
1302 || LUI_OPERAND (value))
1304 /* The value can be loaded with a single instruction. */
1305 codes[0].code = UNKNOWN;
1306 codes[0].value = value;
1309 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
1311 /* Either the constant is a simple LUI/ORI combination or its
1312 lowest bit is set. We don't want to shift in this case. */
1313 return mips_build_lower (codes, value);
1315 else if ((value & 0xffff) == 0)
1317 /* The constant will need at least three actions. The lowest
1318 16 bits are clear, so the final action will be a shift. */
1319 return mips_build_shift (codes, value);
1323 /* The final action could be a shift, add or inclusive OR.
1324 Rather than use a complex condition to select the best
1325 approach, try both mips_build_shift and mips_build_lower
1326 and pick the one that gives the shortest sequence.
1327 Note that this case is only used once per constant. */
1328 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
1329 unsigned int cost, alt_cost;
1331 cost = mips_build_shift (codes, value);
1332 alt_cost = mips_build_lower (alt_codes, value);
1333 if (alt_cost < cost)
1335 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
1342 /* Return true if X is a thread-local symbol. */
1345 mips_tls_operand_p (rtx x)
1347 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1350 /* Return true if SYMBOL_REF X is associated with a global symbol
1351 (in the STB_GLOBAL sense). */
1354 mips_global_symbol_p (const_rtx x)
1356 const_tree const decl = SYMBOL_REF_DECL (x);
1359 return !SYMBOL_REF_LOCAL_P (x);
1361 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1362 or weak symbols. Relocations in the object file will be against
1363 the target symbol, so it's that symbol's binding that matters here. */
1364 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1367 /* Return true if SYMBOL_REF X binds locally. */
1370 mips_symbol_binds_local_p (const_rtx x)
1372 return (SYMBOL_REF_DECL (x)
1373 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1374 : SYMBOL_REF_LOCAL_P (x));
1377 /* Return true if rtx constants of mode MODE should be put into a small
1381 mips_rtx_constant_in_small_data_p (enum machine_mode mode)
1383 return (!TARGET_EMBEDDED_DATA
1384 && TARGET_LOCAL_SDATA
1385 && GET_MODE_SIZE (mode) <= mips_section_threshold);
1388 /* Return true if X should not be moved directly into register $25.
1389 We need this because many versions of GAS will treat "la $25,foo" as
1390 part of a call sequence and so allow a global "foo" to be lazily bound. */
1393 mips_dangerous_for_la25_p (rtx x)
1395 return (!TARGET_EXPLICIT_RELOCS
1397 && GET_CODE (x) == SYMBOL_REF
1398 && mips_global_symbol_p (x));
1401 /* Return the method that should be used to access SYMBOL_REF or
1402 LABEL_REF X in context CONTEXT. */
1404 static enum mips_symbol_type
1405 mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
1408 return SYMBOL_GOT_DISP;
1410 if (GET_CODE (x) == LABEL_REF)
1412 /* LABEL_REFs are used for jump tables as well as text labels.
1413 Only return SYMBOL_PC_RELATIVE if we know the label is in
1414 the text section. */
1415 if (TARGET_MIPS16_SHORT_JUMP_TABLES)
1416 return SYMBOL_PC_RELATIVE;
1417 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1418 return SYMBOL_GOT_PAGE_OFST;
1419 return SYMBOL_ABSOLUTE;
1422 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1424 if (SYMBOL_REF_TLS_MODEL (x))
1427 if (CONSTANT_POOL_ADDRESS_P (x))
1429 if (TARGET_MIPS16_TEXT_LOADS)
1430 return SYMBOL_PC_RELATIVE;
1432 if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
1433 return SYMBOL_PC_RELATIVE;
1435 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
1436 return SYMBOL_GP_RELATIVE;
1439 /* Do not use small-data accesses for weak symbols; they may end up
1442 && SYMBOL_REF_SMALL_P (x)
1443 && !SYMBOL_REF_WEAK (x))
1444 return SYMBOL_GP_RELATIVE;
1446 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1449 && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1451 /* There are three cases to consider:
1453 - o32 PIC (either with or without explicit relocs)
1454 - n32/n64 PIC without explicit relocs
1455 - n32/n64 PIC with explicit relocs
1457 In the first case, both local and global accesses will use an
1458 R_MIPS_GOT16 relocation. We must correctly predict which of
1459 the two semantics (local or global) the assembler and linker
1460 will apply. The choice depends on the symbol's binding rather
1461 than its visibility.
1463 In the second case, the assembler will not use R_MIPS_GOT16
1464 relocations, but it chooses between local and global accesses
1465 in the same way as for o32 PIC.
1467 In the third case we have more freedom since both forms of
1468 access will work for any kind of symbol. However, there seems
1469 little point in doing things differently. */
1470 if (mips_global_symbol_p (x))
1471 return SYMBOL_GOT_DISP;
1473 return SYMBOL_GOT_PAGE_OFST;
1476 if (TARGET_MIPS16_PCREL_LOADS && context != SYMBOL_CONTEXT_CALL)
1477 return SYMBOL_FORCE_TO_MEM;
1478 return SYMBOL_ABSOLUTE;
1481 /* Classify symbolic expression X, given that it appears in context
1484 static enum mips_symbol_type
1485 mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
1489 split_const (x, &x, &offset);
1490 if (UNSPEC_ADDRESS_P (x))
1491 return UNSPEC_ADDRESS_TYPE (x);
1493 return mips_classify_symbol (x, context);
1496 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1497 is the alignment (in bytes) of SYMBOL_REF X. */
1500 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1502 /* If for some reason we can't get the alignment for the
1503 symbol, initializing this to one means we will only accept
1505 HOST_WIDE_INT align = 1;
1508 /* Get the alignment of the symbol we're referring to. */
1509 t = SYMBOL_REF_DECL (x);
1511 align = DECL_ALIGN_UNIT (t);
1513 return offset >= 0 && offset < align;
1516 /* Return true if X is a symbolic constant that can be used in context
1517 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1520 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1521 enum mips_symbol_type *symbol_type)
1525 split_const (x, &x, &offset);
1526 if (UNSPEC_ADDRESS_P (x))
1528 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1529 x = UNSPEC_ADDRESS (x);
1531 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1533 *symbol_type = mips_classify_symbol (x, context);
1534 if (*symbol_type == SYMBOL_TLS)
1540 if (offset == const0_rtx)
1543 /* Check whether a nonzero offset is valid for the underlying
1545 switch (*symbol_type)
1547 case SYMBOL_ABSOLUTE:
1548 case SYMBOL_FORCE_TO_MEM:
1549 case SYMBOL_32_HIGH:
1550 case SYMBOL_64_HIGH:
1553 /* If the target has 64-bit pointers and the object file only
1554 supports 32-bit symbols, the values of those symbols will be
1555 sign-extended. In this case we can't allow an arbitrary offset
1556 in case the 32-bit value X + OFFSET has a different sign from X. */
1557 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1558 return offset_within_block_p (x, INTVAL (offset));
1560 /* In other cases the relocations can handle any offset. */
1563 case SYMBOL_PC_RELATIVE:
1564 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1565 In this case, we no longer have access to the underlying constant,
1566 but the original symbol-based access was known to be valid. */
1567 if (GET_CODE (x) == LABEL_REF)
1572 case SYMBOL_GP_RELATIVE:
1573 /* Make sure that the offset refers to something within the
1574 same object block. This should guarantee that the final
1575 PC- or GP-relative offset is within the 16-bit limit. */
1576 return offset_within_block_p (x, INTVAL (offset));
1578 case SYMBOL_GOT_PAGE_OFST:
1579 case SYMBOL_GOTOFF_PAGE:
1580 /* If the symbol is global, the GOT entry will contain the symbol's
1581 address, and we will apply a 16-bit offset after loading it.
1582 If the symbol is local, the linker should provide enough local
1583 GOT entries for a 16-bit offset, but larger offsets may lead
1585 return SMALL_INT (offset);
1589 /* There is no carry between the HI and LO REL relocations, so the
1590 offset is only valid if we know it won't lead to such a carry. */
1591 return mips_offset_within_alignment_p (x, INTVAL (offset));
1593 case SYMBOL_GOT_DISP:
1594 case SYMBOL_GOTOFF_DISP:
1595 case SYMBOL_GOTOFF_CALL:
1596 case SYMBOL_GOTOFF_LOADGP:
1599 case SYMBOL_GOTTPREL:
1607 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1608 single instruction. We rely on the fact that, in the worst case,
1609 all instructions involved in a MIPS16 address calculation are usually
1613 mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
1617 case SYMBOL_ABSOLUTE:
1618 /* When using 64-bit symbols, we need 5 preparatory instructions,
1621 lui $at,%highest(symbol)
1622 daddiu $at,$at,%higher(symbol)
1624 daddiu $at,$at,%hi(symbol)
1627 The final address is then $at + %lo(symbol). With 32-bit
1628 symbols we just need a preparatory lui for normal mode and
1629 a preparatory "li; sll" for MIPS16. */
1630 return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
1632 case SYMBOL_GP_RELATIVE:
1633 /* Treat GP-relative accesses as taking a single instruction on
1634 MIPS16 too; the copy of $gp can often be shared. */
1637 case SYMBOL_PC_RELATIVE:
1638 /* PC-relative constants can be only be used with addiupc,
1640 if (mode == MAX_MACHINE_MODE
1641 || GET_MODE_SIZE (mode) == 4
1642 || GET_MODE_SIZE (mode) == 8)
1645 /* The constant must be loaded using addiupc first. */
1648 case SYMBOL_FORCE_TO_MEM:
1649 /* LEAs will be converted into constant-pool references by
1651 if (mode == MAX_MACHINE_MODE)
1654 /* The constant must be loaded from the constant pool. */
1657 case SYMBOL_GOT_DISP:
1658 /* The constant will have to be loaded from the GOT before it
1659 is used in an address. */
1660 if (mode != MAX_MACHINE_MODE)
1665 case SYMBOL_GOT_PAGE_OFST:
1666 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1667 the local/global classification is accurate. See override_options
1670 The worst cases are:
1672 (1) For local symbols when generating o32 or o64 code. The assembler
1678 ...and the final address will be $at + %lo(symbol).
1680 (2) For global symbols when -mxgot. The assembler will use:
1682 lui $at,%got_hi(symbol)
1685 ...and the final address will be $at + %got_lo(symbol). */
1688 case SYMBOL_GOTOFF_PAGE:
1689 case SYMBOL_GOTOFF_DISP:
1690 case SYMBOL_GOTOFF_CALL:
1691 case SYMBOL_GOTOFF_LOADGP:
1692 case SYMBOL_32_HIGH:
1693 case SYMBOL_64_HIGH:
1699 case SYMBOL_GOTTPREL:
1702 /* A 16-bit constant formed by a single relocation, or a 32-bit
1703 constant formed from a high 16-bit relocation and a low 16-bit
1704 relocation. Use mips_split_p to determine which. */
1705 return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
1708 /* We don't treat a bare TLS symbol as a constant. */
1714 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1715 to load symbols of type TYPE into a register. Return 0 if the given
1716 type of symbol cannot be used as an immediate operand.
1718 Otherwise, return the number of instructions needed to load or store
1719 values of mode MODE to or from addresses of type TYPE. Return 0 if
1720 the given type of symbol is not valid in addresses.
1722 In both cases, treat extended MIPS16 instructions as two instructions. */
1725 mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
1727 return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
1730 /* Return true if X can not be forced into a constant pool. */
1733 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1735 return mips_tls_operand_p (*x);
1738 /* Return true if X can not be forced into a constant pool. */
1741 mips_cannot_force_const_mem (rtx x)
1747 /* As an optimization, reject constants that mips_legitimize_move
1750 Suppose we have a multi-instruction sequence that loads constant C
1751 into register R. If R does not get allocated a hard register, and
1752 R is used in an operand that allows both registers and memory
1753 references, reload will consider forcing C into memory and using
1754 one of the instruction's memory alternatives. Returning false
1755 here will force it to use an input reload instead. */
1756 if (GET_CODE (x) == CONST_INT)
1759 split_const (x, &base, &offset);
1760 if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
1764 if (for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
1770 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
1771 constants when we're using a per-function constant pool. */
1774 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1775 const_rtx x ATTRIBUTE_UNUSED)
1777 return !TARGET_MIPS16_PCREL_LOADS;
1780 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1783 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1785 if (!HARD_REGISTER_NUM_P (regno))
1789 regno = reg_renumber[regno];
1792 /* These fake registers will be eliminated to either the stack or
1793 hard frame pointer, both of which are usually valid base registers.
1794 Reload deals with the cases where the eliminated form isn't valid. */
1795 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1798 /* In mips16 mode, the stack pointer can only address word and doubleword
1799 values, nothing smaller. There are two problems here:
1801 (a) Instantiating virtual registers can introduce new uses of the
1802 stack pointer. If these virtual registers are valid addresses,
1803 the stack pointer should be too.
1805 (b) Most uses of the stack pointer are not made explicit until
1806 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1807 We don't know until that stage whether we'll be eliminating to the
1808 stack pointer (which needs the restriction) or the hard frame
1809 pointer (which doesn't).
1811 All in all, it seems more consistent to only enforce this restriction
1812 during and after reload. */
1813 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1814 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1816 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1820 /* Return true if X is a valid base register for the given mode.
1821 Allow only hard registers if STRICT. */
1824 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1826 if (!strict && GET_CODE (x) == SUBREG)
1830 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1834 /* Return true if X is a valid address for machine mode MODE. If it is,
1835 fill in INFO appropriately. STRICT is true if we should only accept
1836 hard base registers. */
1839 mips_classify_address (struct mips_address_info *info, rtx x,
1840 enum machine_mode mode, int strict)
1842 switch (GET_CODE (x))
1846 info->type = ADDRESS_REG;
1848 info->offset = const0_rtx;
1849 return mips_valid_base_register_p (info->reg, mode, strict);
1852 info->type = ADDRESS_REG;
1853 info->reg = XEXP (x, 0);
1854 info->offset = XEXP (x, 1);
1855 return (mips_valid_base_register_p (info->reg, mode, strict)
1856 && const_arith_operand (info->offset, VOIDmode));
1859 info->type = ADDRESS_LO_SUM;
1860 info->reg = XEXP (x, 0);
1861 info->offset = XEXP (x, 1);
1862 /* We have to trust the creator of the LO_SUM to do something vaguely
1863 sane. Target-independent code that creates a LO_SUM should also
1864 create and verify the matching HIGH. Target-independent code that
1865 adds an offset to a LO_SUM must prove that the offset will not
1866 induce a carry. Failure to do either of these things would be
1867 a bug, and we are not required to check for it here. The MIPS
1868 backend itself should only create LO_SUMs for valid symbolic
1869 constants, with the high part being either a HIGH or a copy
1872 = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
1873 return (mips_valid_base_register_p (info->reg, mode, strict)
1874 && mips_symbol_insns (info->symbol_type, mode) > 0
1875 && mips_lo_relocs[info->symbol_type] != 0);
1878 /* Small-integer addresses don't occur very often, but they
1879 are legitimate if $0 is a valid base register. */
1880 info->type = ADDRESS_CONST_INT;
1881 return !TARGET_MIPS16 && SMALL_INT (x);
1886 info->type = ADDRESS_SYMBOLIC;
1887 return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
1889 && mips_symbol_insns (info->symbol_type, mode) > 0
1890 && !mips_split_p[info->symbol_type]);
1897 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
1898 returns a nonzero value if X is a legitimate address for a memory
1899 operand of the indicated MODE. STRICT is nonzero if this function
1900 is called during reload. */
1903 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1905 struct mips_address_info addr;
1907 return mips_classify_address (&addr, x, mode, strict);
1910 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1913 mips_stack_address_p (rtx x, enum machine_mode mode)
1915 struct mips_address_info addr;
1917 return (mips_classify_address (&addr, x, mode, false)
1918 && addr.type == ADDRESS_REG
1919 && addr.reg == stack_pointer_rtx);
1922 /* Return true if ADDR matches the pattern for the lwxs load scaled indexed
1923 address instruction. */
1926 mips_lwxs_address_p (rtx addr)
1929 && GET_CODE (addr) == PLUS
1930 && REG_P (XEXP (addr, 1)))
1932 rtx offset = XEXP (addr, 0);
1933 if (GET_CODE (offset) == MULT
1934 && REG_P (XEXP (offset, 0))
1935 && GET_CODE (XEXP (offset, 1)) == CONST_INT
1936 && INTVAL (XEXP (offset, 1)) == 4)
1942 /* Return true if a value at OFFSET bytes from BASE can be accessed
1943 using an unextended mips16 instruction. MODE is the mode of the
1946 Usually the offset in an unextended instruction is a 5-bit field.
1947 The offset is unsigned and shifted left once for HIs, twice
1948 for SIs, and so on. An exception is SImode accesses off the
1949 stack pointer, which have an 8-bit immediate field. */
1952 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
1955 && GET_CODE (offset) == CONST_INT
1956 && INTVAL (offset) >= 0
1957 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
1959 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1960 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
1961 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
1967 /* Return the number of instructions needed to load or store a value
1968 of mode MODE at X. Return 0 if X isn't valid for MODE. Assume that
1969 multiword moves may need to be split into word moves if MIGHT_SPLIT_P,
1970 otherwise assume that a single load or store is enough.
1972 For mips16 code, count extended instructions as two instructions. */
1975 mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
1977 struct mips_address_info addr;
1980 /* BLKmode is used for single unaligned loads and stores and should
1981 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
1982 meaningless, so we have to single it out as a special case one way
1984 if (mode != BLKmode && might_split_p)
1985 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1989 if (mips_classify_address (&addr, x, mode, false))
1994 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
1998 case ADDRESS_LO_SUM:
1999 return (TARGET_MIPS16 ? factor * 2 : factor);
2001 case ADDRESS_CONST_INT:
2004 case ADDRESS_SYMBOLIC:
2005 return factor * mips_symbol_insns (addr.symbol_type, mode);
2011 /* Likewise for constant X. */
2014 mips_const_insns (rtx x)
2016 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2017 enum mips_symbol_type symbol_type;
2020 switch (GET_CODE (x))
2023 if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2025 || !mips_split_p[symbol_type])
2028 /* This is simply an lui for normal mode. It is an extended
2029 "li" followed by an extended "sll" for MIPS16. */
2030 return TARGET_MIPS16 ? 4 : 1;
2034 /* Unsigned 8-bit constants can be loaded using an unextended
2035 LI instruction. Unsigned 16-bit constants can be loaded
2036 using an extended LI. Negative constants must be loaded
2037 using LI and then negated. */
2038 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
2039 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2040 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
2041 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2044 return mips_build_integer (codes, INTVAL (x));
2048 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
2054 /* See if we can refer to X directly. */
2055 if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2056 return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2058 /* Otherwise try splitting the constant into a base and offset.
2059 16-bit offsets can be added using an extra addiu. Larger offsets
2060 must be calculated separately and then added to the base. */
2061 split_const (x, &x, &offset);
2064 int n = mips_const_insns (x);
2067 if (SMALL_INT (offset))
2070 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2077 return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2086 /* Return the number of instructions needed to implement INSN,
2087 given that it loads from or stores to MEM. Count extended
2088 mips16 instructions as two instructions. */
2091 mips_load_store_insns (rtx mem, rtx insn)
2093 enum machine_mode mode;
2097 gcc_assert (MEM_P (mem));
2098 mode = GET_MODE (mem);
2100 /* Try to prove that INSN does not need to be split. */
2101 might_split_p = true;
2102 if (GET_MODE_BITSIZE (mode) == 64)
2104 set = single_set (insn);
2105 if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
2106 might_split_p = false;
2109 return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2113 /* Return the number of instructions needed for an integer division. */
2116 mips_idiv_insns (void)
2121 if (TARGET_CHECK_ZERO_DIV)
2123 if (GENERATE_DIVIDE_TRAPS)
2129 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2134 /* Emit a move from SRC to DEST. Assume that the move expanders can
2135 handle all moves if !can_create_pseudo_p (). The distinction is
2136 important because, unlike emit_move_insn, the move expanders know
2137 how to force Pmode objects into the constant pool even when the
2138 constant pool address is not itself legitimate. */
2141 mips_emit_move (rtx dest, rtx src)
2143 return (can_create_pseudo_p ()
2144 ? emit_move_insn (dest, src)
2145 : emit_move_insn_1 (dest, src));
2148 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
2151 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
2153 emit_insn (gen_rtx_SET (VOIDmode, target,
2154 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
2157 /* Copy VALUE to a register and return that register. If new psuedos
2158 are allowed, copy it into a new register, otherwise use DEST. */
2161 mips_force_temporary (rtx dest, rtx value)
2163 if (can_create_pseudo_p ())
2164 return force_reg (Pmode, value);
2167 mips_emit_move (copy_rtx (dest), value);
2172 /* If we can access small data directly (using gp-relative relocation
2173 operators) return the small data pointer, otherwise return null.
2175 For each mips16 function which refers to GP relative symbols, we
2176 use a pseudo register, initialized at the start of the function, to
2177 hold the $gp value. */
2180 mips16_gp_pseudo_reg (void)
2182 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
2183 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
2185 /* Don't initialize the pseudo register if we are being called from
2186 the tree optimizers' cost-calculation routines. */
2187 if (!cfun->machine->initialized_mips16_gp_pseudo_p
2188 && (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl))
2192 /* We want to initialize this to a value which gcc will believe
2194 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
2196 push_topmost_sequence ();
2197 /* We need to emit the initialization after the FUNCTION_BEG
2198 note, so that it will be integrated. */
2199 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
2201 && NOTE_KIND (scan) == NOTE_INSN_FUNCTION_BEG)
2203 if (scan == NULL_RTX)
2204 scan = get_insns ();
2205 insn = emit_insn_after (insn, scan);
2206 pop_topmost_sequence ();
2208 cfun->machine->initialized_mips16_gp_pseudo_p = true;
2211 return cfun->machine->mips16_gp_pseudo_rtx;
2214 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2215 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2216 constant in that context and can be split into a high part and a LO_SUM.
2217 If so, and if LO_SUM_OUT is nonnull, emit the high part and return
2218 the LO_SUM in *LO_SUM_OUT. Leave *LO_SUM_OUT unchanged otherwise.
2220 TEMP is as for mips_force_temporary and is used to load the high
2221 part into a register. */
2224 mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *lo_sum_out)
2226 enum mips_symbol_context context;
2227 enum mips_symbol_type symbol_type;
2230 context = (mode == MAX_MACHINE_MODE
2231 ? SYMBOL_CONTEXT_LEA
2232 : SYMBOL_CONTEXT_MEM);
2233 if (!mips_symbolic_constant_p (addr, context, &symbol_type)
2234 || mips_symbol_insns (symbol_type, mode) == 0
2235 || !mips_split_p[symbol_type])
2240 if (symbol_type == SYMBOL_GP_RELATIVE)
2242 if (!can_create_pseudo_p ())
2244 emit_insn (gen_load_const_gp (copy_rtx (temp)));
2248 high = mips16_gp_pseudo_reg ();
2252 high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2253 high = mips_force_temporary (temp, high);
2255 *lo_sum_out = gen_rtx_LO_SUM (Pmode, high, addr);
2261 /* Wrap symbol or label BASE in an unspec address of type SYMBOL_TYPE
2262 and add CONST_INT OFFSET to the result. */
2265 mips_unspec_address_offset (rtx base, rtx offset,
2266 enum mips_symbol_type symbol_type)
2268 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2269 UNSPEC_ADDRESS_FIRST + symbol_type);
2270 if (offset != const0_rtx)
2271 base = gen_rtx_PLUS (Pmode, base, offset);
2272 return gen_rtx_CONST (Pmode, base);
2275 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2276 type SYMBOL_TYPE. */
2279 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2283 split_const (address, &base, &offset);
2284 return mips_unspec_address_offset (base, offset, symbol_type);
2288 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2289 high part to BASE and return the result. Just return BASE otherwise.
2290 TEMP is available as a temporary register if needed.
2292 The returned expression can be used as the first operand to a LO_SUM. */
2295 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2296 enum mips_symbol_type symbol_type)
2298 if (mips_split_p[symbol_type])
2300 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2301 addr = mips_force_temporary (temp, addr);
2302 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2308 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2309 mips_force_temporary; it is only needed when OFFSET is not a
2313 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2315 if (!SMALL_OPERAND (offset))
2320 /* Load the full offset into a register so that we can use
2321 an unextended instruction for the address itself. */
2322 high = GEN_INT (offset);
2327 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2328 high = GEN_INT (CONST_HIGH_PART (offset));
2329 offset = CONST_LOW_PART (offset);
2331 high = mips_force_temporary (temp, high);
2332 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2334 return plus_constant (reg, offset);
2337 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
2338 referencing, and TYPE is the symbol type to use (either global
2339 dynamic or local dynamic). V0 is an RTX for the return value
2340 location. The entire insn sequence is returned. */
2342 static GTY(()) rtx mips_tls_symbol;
2345 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2347 rtx insn, loc, tga, a0;
2349 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2351 if (!mips_tls_symbol)
2352 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2354 loc = mips_unspec_address (sym, type);
2358 emit_insn (gen_rtx_SET (Pmode, a0,
2359 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2360 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
2361 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
2362 CONST_OR_PURE_CALL_P (insn) = 1;
2363 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
2364 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2365 insn = get_insns ();
2372 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2373 return value will be a valid address and move_operand (either a REG
2377 mips_legitimize_tls_address (rtx loc)
2379 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2380 enum tls_model model;
2384 sorry ("MIPS16 TLS");
2385 return gen_reg_rtx (Pmode);
2388 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2389 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2391 model = SYMBOL_REF_TLS_MODEL (loc);
2392 /* Only TARGET_ABICALLS code can have more than one module; other
2393 code must be be static and should not use a GOT. All TLS models
2394 reduce to local exec in this situation. */
2395 if (!TARGET_ABICALLS)
2396 model = TLS_MODEL_LOCAL_EXEC;
2400 case TLS_MODEL_GLOBAL_DYNAMIC:
2401 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2402 dest = gen_reg_rtx (Pmode);
2403 emit_libcall_block (insn, dest, v0, loc);
2406 case TLS_MODEL_LOCAL_DYNAMIC:
2407 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2408 tmp1 = gen_reg_rtx (Pmode);
2410 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2411 share the LDM result with other LD model accesses. */
2412 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2414 emit_libcall_block (insn, tmp1, v0, eqv);
2416 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2417 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2418 mips_unspec_address (loc, SYMBOL_DTPREL));
2421 case TLS_MODEL_INITIAL_EXEC:
2422 tmp1 = gen_reg_rtx (Pmode);
2423 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2424 if (Pmode == DImode)
2426 emit_insn (gen_tls_get_tp_di (v1));
2427 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2431 emit_insn (gen_tls_get_tp_si (v1));
2432 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2434 dest = gen_reg_rtx (Pmode);
2435 emit_insn (gen_add3_insn (dest, tmp1, v1));
2438 case TLS_MODEL_LOCAL_EXEC:
2439 if (Pmode == DImode)
2440 emit_insn (gen_tls_get_tp_di (v1));
2442 emit_insn (gen_tls_get_tp_si (v1));
2444 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2445 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2446 mips_unspec_address (loc, SYMBOL_TPREL));
2456 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2457 be legitimized in a way that the generic machinery might not expect,
2458 put the new address in *XLOC and return true. MODE is the mode of
2459 the memory being accessed. */
2462 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2464 if (mips_tls_operand_p (*xloc))
2466 *xloc = mips_legitimize_tls_address (*xloc);
2470 /* See if the address can split into a high part and a LO_SUM. */
2471 if (mips_split_symbol (NULL, *xloc, mode, xloc))
2474 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
2476 /* Handle REG + CONSTANT using mips_add_offset. */
2479 reg = XEXP (*xloc, 0);
2480 if (!mips_valid_base_register_p (reg, mode, 0))
2481 reg = copy_to_mode_reg (Pmode, reg);
2482 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
2490 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2493 mips_move_integer (rtx dest, rtx temp, unsigned HOST_WIDE_INT value)
2495 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2496 enum machine_mode mode;
2497 unsigned int i, cost;
2500 mode = GET_MODE (dest);
2501 cost = mips_build_integer (codes, value);
2503 /* Apply each binary operation to X. Invariant: X is a legitimate
2504 source operand for a SET pattern. */
2505 x = GEN_INT (codes[0].value);
2506 for (i = 1; i < cost; i++)
2508 if (!can_create_pseudo_p ())
2510 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2514 x = force_reg (mode, x);
2515 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2518 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2522 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2523 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2527 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2531 /* Split moves of big integers into smaller pieces. */
2532 if (splittable_const_int_operand (src, mode))
2534 mips_move_integer (dest, dest, INTVAL (src));
2538 /* Split moves of symbolic constants into high/low pairs. */
2539 if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
2541 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
2545 if (mips_tls_operand_p (src))
2547 mips_emit_move (dest, mips_legitimize_tls_address (src));
2551 /* If we have (const (plus symbol offset)), and that expression cannot
2552 be forced into memory, load the symbol first and add in the offset.
2553 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
2554 forced into memory, as it usually produces better code. */
2555 split_const (src, &base, &offset);
2556 if (offset != const0_rtx
2557 && (targetm.cannot_force_const_mem (src)
2558 || (!TARGET_MIPS16 && can_create_pseudo_p ())))
2560 base = mips_force_temporary (dest, base);
2561 mips_emit_move (dest, mips_add_offset (0, base, INTVAL (offset)));
2565 src = force_const_mem (mode, src);
2567 /* When using explicit relocs, constant pool references are sometimes
2568 not legitimate addresses. */
2569 mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
2570 mips_emit_move (dest, src);
2574 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2575 sequence that is valid. */
2578 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2580 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2582 mips_emit_move (dest, force_reg (mode, src));
2586 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2587 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2588 && REG_P (src) && MD_REG_P (REGNO (src))
2589 && REG_P (dest) && GP_REG_P (REGNO (dest)))
2591 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2592 if (GET_MODE_SIZE (mode) <= 4)
2593 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
2594 gen_rtx_REG (SImode, REGNO (src)),
2595 gen_rtx_REG (SImode, other_regno)));
2597 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
2598 gen_rtx_REG (DImode, REGNO (src)),
2599 gen_rtx_REG (DImode, other_regno)));
2603 /* We need to deal with constants that would be legitimate
2604 immediate_operands but not legitimate move_operands. */
2605 if (CONSTANT_P (src) && !move_operand (src, mode))
2607 mips_legitimize_const_move (mode, dest, src);
2608 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2614 /* Return true if X in context CONTEXT is a small data address that can
2615 be rewritten as a LO_SUM. */
2618 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
2620 enum mips_symbol_type symbol_type;
2622 return (TARGET_EXPLICIT_RELOCS
2623 && mips_symbolic_constant_p (x, context, &symbol_type)
2624 && symbol_type == SYMBOL_GP_RELATIVE);
2628 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
2629 containing MEM, or null if none. */
2632 mips_small_data_pattern_1 (rtx *loc, void *data)
2634 enum mips_symbol_context context;
2636 if (GET_CODE (*loc) == LO_SUM)
2641 if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
2646 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
2647 return mips_rewrite_small_data_p (*loc, context);
2650 /* Return true if OP refers to small data symbols directly, not through
2654 mips_small_data_pattern_p (rtx op)
2656 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
2659 /* A for_each_rtx callback, used by mips_rewrite_small_data.
2660 DATA is the containing MEM, or null if none. */
2663 mips_rewrite_small_data_1 (rtx *loc, void *data)
2665 enum mips_symbol_context context;
2669 for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
2673 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
2674 if (mips_rewrite_small_data_p (*loc, context))
2675 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
2677 if (GET_CODE (*loc) == LO_SUM)
2683 /* If possible, rewrite OP so that it refers to small data using
2684 explicit relocations. */
2687 mips_rewrite_small_data (rtx op)
2689 op = copy_insn (op);
2690 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
2694 /* We need a lot of little routines to check constant values on the
2695 mips16. These are used to figure out how long the instruction will
2696 be. It would be much better to do this using constraints, but
2697 there aren't nearly enough letters available. */
2700 m16_check_op (rtx op, int low, int high, int mask)
2702 return (GET_CODE (op) == CONST_INT
2703 && INTVAL (op) >= low
2704 && INTVAL (op) <= high
2705 && (INTVAL (op) & mask) == 0);
2709 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2711 return m16_check_op (op, 0x1, 0x8, 0);
2715 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2717 return m16_check_op (op, - 0x8, 0x7, 0);
2721 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2723 return m16_check_op (op, - 0x7, 0x8, 0);
2727 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2729 return m16_check_op (op, - 0x10, 0xf, 0);
2733 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2735 return m16_check_op (op, - 0xf, 0x10, 0);
2739 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2741 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2745 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2747 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2751 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2753 return m16_check_op (op, - 0x80, 0x7f, 0);
2757 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2759 return m16_check_op (op, - 0x7f, 0x80, 0);
2763 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2765 return m16_check_op (op, 0x0, 0xff, 0);
2769 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2771 return m16_check_op (op, - 0xff, 0x0, 0);
2775 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2777 return m16_check_op (op, - 0x1, 0xfe, 0);
2781 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2783 return m16_check_op (op, 0x0, 0xff << 2, 3);
2787 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2789 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2793 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2795 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2799 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2801 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2804 /* The cost of loading values from the constant pool. It should be
2805 larger than the cost of any constant we want to synthesize inline. */
2807 #define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
2809 /* Return the cost of X when used as an operand to the MIPS16 instruction
2810 that implements CODE. Return -1 if there is no such instruction, or if
2811 X is not a valid immediate operand for it. */
2814 mips16_constant_cost (int code, HOST_WIDE_INT x)
2821 /* Shifts by between 1 and 8 bits (inclusive) are unextended,
2822 other shifts are extended. The shift patterns truncate the shift
2823 count to the right size, so there are no out-of-range values. */
2824 if (IN_RANGE (x, 1, 8))
2826 return COSTS_N_INSNS (1);
2829 if (IN_RANGE (x, -128, 127))
2831 if (SMALL_OPERAND (x))
2832 return COSTS_N_INSNS (1);
2836 /* Like LE, but reject the always-true case. */
2840 /* We add 1 to the immediate and use SLT. */
2843 /* We can use CMPI for an xor with an unsigned 16-bit X. */
2846 if (IN_RANGE (x, 0, 255))
2848 if (SMALL_OPERAND_UNSIGNED (x))
2849 return COSTS_N_INSNS (1);
2854 /* Equality comparisons with 0 are cheap. */
2864 /* Return true if there is a non-MIPS16 instruction that implements CODE
2865 and if that instruction accepts X as an immediate operand. */
2868 mips_immediate_operand_p (int code, HOST_WIDE_INT x)
2875 /* All shift counts are truncated to a valid constant. */
2880 /* Likewise rotates, if the target supports rotates at all. */
2886 /* These instructions take 16-bit unsigned immediates. */
2887 return SMALL_OPERAND_UNSIGNED (x);
2892 /* These instructions take 16-bit signed immediates. */
2893 return SMALL_OPERAND (x);
2899 /* The "immediate" forms of these instructions are really
2900 implemented as comparisons with register 0. */
2905 /* Likewise, meaning that the only valid immediate operand is 1. */
2909 /* We add 1 to the immediate and use SLT. */
2910 return SMALL_OPERAND (x + 1);
2913 /* Likewise SLTU, but reject the always-true case. */
2914 return SMALL_OPERAND (x + 1) && x + 1 != 0;
2918 /* The bit position and size are immediate operands. */
2919 return ISA_HAS_EXT_INS;
2922 /* By default assume that $0 can be used for 0. */
2927 /* Return the cost of binary operation X, given that the instruction
2928 sequence for a word-sized or smaller operation has cost SINGLE_COST
2929 and that the sequence of a double-word operation has cost DOUBLE_COST. */
2932 mips_binary_cost (rtx x, int single_cost, int double_cost)
2936 if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
2941 + rtx_cost (XEXP (x, 0), 0)
2942 + rtx_cost (XEXP (x, 1), GET_CODE (x)));
2945 /* Return the cost of floating-point multiplications of mode MODE. */
2948 mips_fp_mult_cost (enum machine_mode mode)
2950 return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf;
2953 /* Return the cost of floating-point divisions of mode MODE. */
2956 mips_fp_div_cost (enum machine_mode mode)
2958 return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf;
2961 /* Return the cost of sign-extending OP to mode MODE, not including the
2962 cost of OP itself. */
2965 mips_sign_extend_cost (enum machine_mode mode, rtx op)
2968 /* Extended loads are as cheap as unextended ones. */
2971 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
2972 /* A sign extension from SImode to DImode in 64-bit mode is free. */
2975 if (ISA_HAS_SEB_SEH || GENERATE_MIPS16E)
2976 /* We can use SEB or SEH. */
2977 return COSTS_N_INSNS (1);
2979 /* We need to use a shift left and a shift right. */
2980 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
2983 /* Return the cost of zero-extending OP to mode MODE, not including the
2984 cost of OP itself. */
2987 mips_zero_extend_cost (enum machine_mode mode, rtx op)
2990 /* Extended loads are as cheap as unextended ones. */
2993 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
2994 /* We need a shift left by 32 bits and a shift right by 32 bits. */
2995 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
2997 if (GENERATE_MIPS16E)
2998 /* We can use ZEB or ZEH. */
2999 return COSTS_N_INSNS (1);
3002 /* We need to load 0xff or 0xffff into a register and use AND. */
3003 return COSTS_N_INSNS (GET_MODE (op) == QImode ? 2 : 3);
3005 /* We can use ANDI. */
3006 return COSTS_N_INSNS (1);
3009 /* Implement TARGET_RTX_COSTS. */
3012 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
3014 enum machine_mode mode = GET_MODE (x);
3015 bool float_mode_p = FLOAT_MODE_P (mode);
3019 /* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
3020 appear in the instruction stream, and the cost of a comparison is
3021 really the cost of the branch or scc condition. At the time of
3022 writing, gcc only uses an explicit outer COMPARE code when optabs
3023 is testing whether a constant is expensive enough to force into a
3024 register. We want optabs to pass such constants through the MIPS
3025 expanders instead, so make all constants very cheap here. */
3026 if (outer_code == COMPARE)
3028 gcc_assert (CONSTANT_P (x));
3036 /* Treat *clear_upper32-style ANDs as having zero cost in the
3037 second operand. The cost is entirely in the first operand.
3039 ??? This is needed because we would otherwise try to CSE
3040 the constant operand. Although that's the right thing for
3041 instructions that continue to be a register operation throughout
3042 compilation, it is disastrous for instructions that could
3043 later be converted into a memory operation. */
3045 && outer_code == AND
3046 && UINTVAL (x) == 0xffffffff)
3054 cost = mips16_constant_cost (outer_code, INTVAL (x));
3063 /* When not optimizing for size, we care more about the cost
3064 of hot code, and hot code is often in a loop. If a constant
3065 operand needs to be forced into a register, we will often be
3066 able to hoist the constant load out of the loop, so the load
3067 should not contribute to the cost. */
3069 || mips_immediate_operand_p (outer_code, INTVAL (x)))
3081 if (force_to_mem_operand (x, VOIDmode))
3083 *total = COSTS_N_INSNS (1);
3086 cost = mips_const_insns (x);
3089 /* If the constant is likely to be stored in a GPR, SETs of
3090 single-insn constants are as cheap as register sets; we
3091 never want to CSE them.
3093 Don't reduce the cost of storing a floating-point zero in
3094 FPRs. If we have a zero in an FPR for other reasons, we
3095 can get better cfg-cleanup and delayed-branch results by
3096 using it consistently, rather than using $0 sometimes and
3097 an FPR at other times. Also, moves between floating-point
3098 registers are sometimes cheaper than (D)MTC1 $0. */
3100 && outer_code == SET
3101 && !(float_mode_p && TARGET_HARD_FLOAT))
3103 /* When non-MIPS16 code loads a constant N>1 times, we rarely
3104 want to CSE the constant itself. It is usually better to
3105 have N copies of the last operation in the sequence and one
3106 shared copy of the other operations. (Note that this is
3107 not true for MIPS16 code, where the final operation in the
3108 sequence is often an extended instruction.)
3110 Also, if we have a CONST_INT, we don't know whether it is
3111 for a word or doubleword operation, so we cannot rely on
3112 the result of mips_build_integer. */
3113 else if (!TARGET_MIPS16
3114 && (outer_code == SET || mode == VOIDmode))
3116 *total = COSTS_N_INSNS (cost);
3119 /* The value will need to be fetched from the constant pool. */
3120 *total = CONSTANT_POOL_COST;
3124 /* If the address is legitimate, return the number of
3125 instructions it needs. */
3127 cost = mips_address_insns (addr, mode, true);
3130 *total = COSTS_N_INSNS (cost + 1);
3133 /* Check for a scaled indexed address. */
3134 if (mips_lwxs_address_p (addr))
3136 *total = COSTS_N_INSNS (2);
3139 /* Otherwise use the default handling. */
3143 *total = COSTS_N_INSNS (6);
3147 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
3151 /* Check for a *clear_upper32 pattern and treat it like a zero
3152 extension. See the pattern's comment for details. */
3155 && CONST_INT_P (XEXP (x, 1))
3156 && UINTVAL (XEXP (x, 1)) == 0xffffffff)
3158 *total = (mips_zero_extend_cost (mode, XEXP (x, 0))
3159 + rtx_cost (XEXP (x, 0), 0));
3166 /* Double-word operations use two single-word operations. */
3167 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2));
3175 if (CONSTANT_P (XEXP (x, 1)))
3176 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3178 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12));
3183 *total = mips_cost->fp_add;
3185 *total = COSTS_N_INSNS (4);
3189 /* Low-part immediates need an extended MIPS16 instruction. */
3190 *total = (COSTS_N_INSNS (TARGET_MIPS16 ? 2 : 1)
3191 + rtx_cost (XEXP (x, 0), 0));
3206 /* Branch comparisons have VOIDmode, so use the first operand's
3208 mode = GET_MODE (XEXP (x, 0));
3209 if (FLOAT_MODE_P (mode))
3211 *total = mips_cost->fp_add;
3214 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3219 && ISA_HAS_NMADD_NMSUB
3220 && TARGET_FUSED_MADD
3221 && !HONOR_NANS (mode)
3222 && !HONOR_SIGNED_ZEROS (mode))
3224 /* See if we can use NMADD or NMSUB. See mips.md for the
3225 associated patterns. */
3226 rtx op0 = XEXP (x, 0);
3227 rtx op1 = XEXP (x, 1);
3228 if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
3230 *total = (mips_fp_mult_cost (mode)
3231 + rtx_cost (XEXP (XEXP (op0, 0), 0), 0)
3232 + rtx_cost (XEXP (op0, 1), 0)
3233 + rtx_cost (op1, 0));
3236 if (GET_CODE (op1) == MULT)
3238 *total = (mips_fp_mult_cost (mode)
3240 + rtx_cost (XEXP (op1, 0), 0)
3241 + rtx_cost (XEXP (op1, 1), 0));
3251 && TARGET_FUSED_MADD
3252 && GET_CODE (XEXP (x, 0)) == MULT)
3255 *total = mips_cost->fp_add;
3259 /* Double-word operations require three single-word operations and
3260 an SLTU. The MIPS16 version then needs to move the result of
3261 the SLTU from $24 to a MIPS16 register. */
3262 *total = mips_binary_cost (x, COSTS_N_INSNS (1),
3263 COSTS_N_INSNS (TARGET_MIPS16 ? 5 : 4));
3268 && ISA_HAS_NMADD_NMSUB
3269 && TARGET_FUSED_MADD
3270 && !HONOR_NANS (mode)
3271 && HONOR_SIGNED_ZEROS (mode))
3273 /* See if we can use NMADD or NMSUB. See mips.md for the
3274 associated patterns. */
3275 rtx op = XEXP (x, 0);
3276 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3277 && GET_CODE (XEXP (op, 0)) == MULT)
3279 *total = (mips_fp_mult_cost (mode)
3280 + rtx_cost (XEXP (XEXP (op, 0), 0), 0)
3281 + rtx_cost (XEXP (XEXP (op, 0), 1), 0)
3282 + rtx_cost (XEXP (op, 1), 0));
3288 *total = mips_cost->fp_add;
3290 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
3295 *total = mips_fp_mult_cost (mode);
3296 else if (mode == DImode && !TARGET_64BIT)
3297 /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
3298 where the mulsidi3 always includes an MFHI and an MFLO. */
3299 *total = (optimize_size
3300 ? COSTS_N_INSNS (ISA_HAS_MUL3 ? 7 : 9)
3301 : mips_cost->int_mult_si * 3 + 6);
3302 else if (optimize_size)
3303 *total = (ISA_HAS_MUL3 ? 1 : 2);
3304 else if (mode == DImode)
3305 *total = mips_cost->int_mult_di;
3307 *total = mips_cost->int_mult_si;
3311 /* Check for a reciprocal. */
3312 if (float_mode_p && XEXP (x, 0) == CONST1_RTX (mode))
3315 && flag_unsafe_math_optimizations
3316 && (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT))
3318 /* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
3319 division as being free. */
3320 *total = rtx_cost (XEXP (x, 1), 0);
3325 *total = mips_fp_div_cost (mode) + rtx_cost (XEXP (x, 1), 0);
3335 *total = mips_fp_div_cost (mode);
3344 /* It is our responsibility to make division by a power of 2
3345 as cheap as 2 register additions if we want the division
3346 expanders to be used for such operations; see the setting
3347 of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
3348 should always produce shorter code than using
3349 expand_sdiv2_pow2. */
3351 && CONST_INT_P (XEXP (x, 1))
3352 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
3354 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), 0);
3357 *total = COSTS_N_INSNS (mips_idiv_insns ());
3359 else if (mode == DImode)
3360 *total = mips_cost->int_div_di;
3362 *total = mips_cost->int_div_si;
3366 *total = mips_sign_extend_cost (mode, XEXP (x, 0));
3370 *total = mips_zero_extend_cost (mode, XEXP (x, 0));
3374 case UNSIGNED_FLOAT:
3377 case FLOAT_TRUNCATE:
3378 *total = mips_cost->fp_add;
3386 /* Provide the costs of an addressing mode that contains ADDR.
3387 If ADDR is not a valid address, its cost is irrelevant. */
3390 mips_address_cost (rtx addr)
3392 return mips_address_insns (addr, SImode, false);
3395 /* Return one word of double-word value OP, taking into account the fixed
3396 endianness of certain registers. HIGH_P is true to select the high part,
3397 false to select the low part. */
3400 mips_subword (rtx op, int high_p)
3402 unsigned int byte, offset;
3403 enum machine_mode mode;
3405 mode = GET_MODE (op);
3406 if (mode == VOIDmode)
3409 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
3410 byte = UNITS_PER_WORD;
3414 if (FP_REG_RTX_P (op))
3416 /* Paired FPRs are always ordered little-endian. */
3417 offset = (UNITS_PER_WORD < UNITS_PER_HWFPVALUE ? high_p : byte != 0);
3418 return gen_rtx_REG (word_mode, REGNO (op) + offset);
3422 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
3424 return simplify_gen_subreg (word_mode, op, mode, byte);
3428 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3431 mips_split_64bit_move_p (rtx dest, rtx src)
3436 /* FP->FP moves can be done in a single instruction. */
3437 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
3440 /* Check for floating-point loads and stores. They can be done using
3441 ldc1 and sdc1 on MIPS II and above. */
3444 if (FP_REG_RTX_P (dest) && MEM_P (src))
3446 if (FP_REG_RTX_P (src) && MEM_P (dest))
3453 /* Split a doubleword move from SRC to DEST. On 32-bit targets,
3454 this function handles 64-bit moves for which mips_split_64bit_move_p
3455 holds. For 64-bit targets, this function handles 128-bit moves. */
3458 mips_split_doubleword_move (rtx dest, rtx src)
3460 if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src))
3462 if (!TARGET_64BIT && GET_MODE (dest) == DImode)
3463 emit_insn (gen_move_doubleword_fprdi (dest, src));
3464 else if (!TARGET_64BIT && GET_MODE (dest) == DFmode)
3465 emit_insn (gen_move_doubleword_fprdf (dest, src));
3466 else if (TARGET_64BIT && GET_MODE (dest) == TFmode)
3467 emit_insn (gen_move_doubleword_fprtf (dest, src));
3473 /* The operation can be split into two normal moves. Decide in
3474 which order to do them. */
3477 low_dest = mips_subword (dest, 0);
3478 if (REG_P (low_dest)
3479 && reg_overlap_mentioned_p (low_dest, src))
3481 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3482 mips_emit_move (low_dest, mips_subword (src, 0));
3486 mips_emit_move (low_dest, mips_subword (src, 0));
3487 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3492 /* Return the appropriate instructions to move SRC into DEST. Assume
3493 that SRC is operand 1 and DEST is operand 0. */
3496 mips_output_move (rtx dest, rtx src)
3498 enum rtx_code dest_code, src_code;
3499 enum mips_symbol_type symbol_type;
3502 dest_code = GET_CODE (dest);
3503 src_code = GET_CODE (src);
3504 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
3506 if (dbl_p && mips_split_64bit_move_p (dest, src))
3509 if ((src_code == REG && GP_REG_P (REGNO (src)))
3510 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
3512 if (dest_code == REG)
3514 if (GP_REG_P (REGNO (dest)))
3515 return "move\t%0,%z1";
3517 if (MD_REG_P (REGNO (dest)))
3520 if (DSP_ACC_REG_P (REGNO (dest)))
3522 static char retval[] = "mt__\t%z1,%q0";
3523 retval[2] = reg_names[REGNO (dest)][4];
3524 retval[3] = reg_names[REGNO (dest)][5];
3528 if (FP_REG_P (REGNO (dest)))
3529 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
3531 if (ALL_COP_REG_P (REGNO (dest)))
3533 static char retval[] = "dmtc_\t%z1,%0";
3535 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3536 return (dbl_p ? retval : retval + 1);
3539 if (dest_code == MEM)
3540 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
3542 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3544 if (src_code == REG)
3546 if (DSP_ACC_REG_P (REGNO (src)))
3548 static char retval[] = "mf__\t%0,%q1";
3549 retval[2] = reg_names[REGNO (src)][4];
3550 retval[3] = reg_names[REGNO (src)][5];
3554 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3555 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3557 if (FP_REG_P (REGNO (src)))
3558 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
3560 if (ALL_COP_REG_P (REGNO (src)))
3562 static char retval[] = "dmfc_\t%0,%1";
3564 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3565 return (dbl_p ? retval : retval + 1);
3569 if (src_code == MEM)
3570 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
3572 if (src_code == CONST_INT)
3574 /* Don't use the X format, because that will give out of
3575 range numbers for 64-bit hosts and 32-bit targets. */
3577 return "li\t%0,%1\t\t\t# %X1";
3579 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
3582 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
3586 if (src_code == HIGH)
3587 return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
3589 if (CONST_GP_P (src))
3590 return "move\t%0,%1";
3592 if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
3593 && mips_lo_relocs[symbol_type] != 0)
3595 /* A signed 16-bit constant formed by applying a relocation
3596 operator to a symbolic address. */
3597 gcc_assert (!mips_split_p[symbol_type]);
3598 return "li\t%0,%R1";
3601 if (symbolic_operand (src, VOIDmode))
3603 gcc_assert (TARGET_MIPS16
3604 ? TARGET_MIPS16_TEXT_LOADS
3605 : !TARGET_EXPLICIT_RELOCS);
3606 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
3609 if (src_code == REG && FP_REG_P (REGNO (src)))
3611 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3613 if (GET_MODE (dest) == V2SFmode)
3614 return "mov.ps\t%0,%1";
3616 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
3619 if (dest_code == MEM)
3620 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
3622 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3624 if (src_code == MEM)
3625 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
3627 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3629 static char retval[] = "l_c_\t%0,%1";
3631 retval[1] = (dbl_p ? 'd' : 'w');
3632 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3635 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
3637 static char retval[] = "s_c_\t%1,%0";
3639 retval[1] = (dbl_p ? 'd' : 'w');
3640 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3646 /* Return true if CMP1 is a suitable second operand for relational
3647 operator CODE. See also the *sCC patterns in mips.md. */
3650 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
3656 return reg_or_0_operand (cmp1, VOIDmode);
3660 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3664 return arith_operand (cmp1, VOIDmode);
3667 return sle_operand (cmp1, VOIDmode);
3670 return sleu_operand (cmp1, VOIDmode);
3677 /* Canonicalize LE or LEU comparisons into LT comparisons when
3678 possible to avoid extra instructions or inverting the
3682 mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
3683 enum machine_mode mode)
3685 HOST_WIDE_INT original, plus_one;
3687 if (GET_CODE (*cmp1) != CONST_INT)
3690 original = INTVAL (*cmp1);
3691 plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
3696 if (original < plus_one)
3699 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3708 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3721 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3722 result in TARGET. CMP0 and TARGET are register_operands that have
3723 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3724 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3727 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
3728 rtx target, rtx cmp0, rtx cmp1)
3730 /* First see if there is a MIPS instruction that can do this operation
3731 with CMP1 in its current form. If not, try to canonicalize the
3732 comparison to LT. If that fails, try doing the same for the
3733 inverse operation. If that also fails, force CMP1 into a register
3735 if (mips_relational_operand_ok_p (code, cmp1))
3736 mips_emit_binary (code, target, cmp0, cmp1);
3737 else if (mips_canonicalize_comparison (&code, &cmp1, GET_MODE (target)))
3738 mips_emit_binary (code, target, cmp0, cmp1);
3741 enum rtx_code inv_code = reverse_condition (code);
3742 if (!mips_relational_operand_ok_p (inv_code, cmp1))
3744 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
3745 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
3747 else if (invert_ptr == 0)
3749 rtx inv_target = gen_reg_rtx (GET_MODE (target));
3750 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3751 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3755 *invert_ptr = !*invert_ptr;
3756 mips_emit_binary (inv_code, target, cmp0, cmp1);
3761 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3762 The register will have the same mode as CMP0. */
3765 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3767 if (cmp1 == const0_rtx)
3770 if (uns_arith_operand (cmp1, VOIDmode))
3771 return expand_binop (GET_MODE (cmp0), xor_optab,
3772 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3774 return expand_binop (GET_MODE (cmp0), sub_optab,
3775 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3778 /* Convert *CODE into a code that can be used in a floating-point
3779 scc instruction (c.<cond>.<fmt>). Return true if the values of
3780 the condition code registers will be inverted, with 0 indicating
3781 that the condition holds. */
3784 mips_reverse_fp_cond_p (enum rtx_code *code)
3791 *code = reverse_condition_maybe_unordered (*code);
3799 /* Convert a comparison into something that can be used in a branch or
3800 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3801 being compared and *CODE is the code used to compare them.
3803 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3804 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3805 otherwise any standard branch condition can be used. The standard branch
3808 - EQ/NE between two registers.
3809 - any comparison between a register and zero. */
3812 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3814 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3816 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3818 *op0 = cmp_operands[0];
3819 *op1 = cmp_operands[1];
3821 else if (*code == EQ || *code == NE)
3825 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3830 *op0 = cmp_operands[0];
3831 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3836 /* The comparison needs a separate scc instruction. Store the
3837 result of the scc in *OP0 and compare it against zero. */
3838 bool invert = false;
3839 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3841 mips_emit_int_relational (*code, &invert, *op0,
3842 cmp_operands[0], cmp_operands[1]);
3843 *code = (invert ? EQ : NE);
3846 else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_operands[0])))
3848 *op0 = gen_rtx_REG (CCDSPmode, CCDSP_CC_REGNUM);
3849 mips_emit_binary (*code, *op0, cmp_operands[0], cmp_operands[1]);
3855 enum rtx_code cmp_code;
3857 /* Floating-point tests use a separate c.cond.fmt comparison to
3858 set a condition code register. The branch or conditional move
3859 will then compare that register against zero.
3861 Set CMP_CODE to the code of the comparison instruction and
3862 *CODE to the code that the branch or move should use. */
3864 *code = mips_reverse_fp_cond_p (&cmp_code) ? EQ : NE;
3866 ? gen_reg_rtx (CCmode)
3867 : gen_rtx_REG (CCmode, FPSW_REGNUM));
3869 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
3873 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
3874 Store the result in TARGET and return true if successful.
3876 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
3879 mips_emit_scc (enum rtx_code code, rtx target)
3881 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
3884 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
3885 if (code == EQ || code == NE)
3887 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3888 mips_emit_binary (code, target, zie, const0_rtx);
3891 mips_emit_int_relational (code, 0, target,
3892 cmp_operands[0], cmp_operands[1]);
3896 /* Emit the common code for doing conditional branches.
3897 operand[0] is the label to jump to.
3898 The comparison operands are saved away by cmp{si,di,sf,df}. */
3901 gen_conditional_branch (rtx *operands, enum rtx_code code)
3903 rtx op0, op1, condition;
3905 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
3906 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
3907 emit_jump_insn (gen_condjump (condition, operands[0]));
3912 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
3913 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
3916 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
3917 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
3922 reversed_p = mips_reverse_fp_cond_p (&cond);
3923 cmp_result = gen_reg_rtx (CCV2mode);
3924 emit_insn (gen_scc_ps (cmp_result,
3925 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
3927 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
3930 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
3934 /* Emit the common code for conditional moves. OPERANDS is the array
3935 of operands passed to the conditional move define_expand. */
3938 gen_conditional_move (rtx *operands)
3943 code = GET_CODE (operands[1]);
3944 mips_emit_compare (&code, &op0, &op1, true);
3945 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
3946 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3947 gen_rtx_fmt_ee (code,
3950 operands[2], operands[3])));
3953 /* Emit a conditional trap. OPERANDS is the array of operands passed to
3954 the conditional_trap expander. */
3957 mips_gen_conditional_trap (rtx *operands)
3960 enum rtx_code cmp_code = GET_CODE (operands[0]);
3961 enum machine_mode mode = GET_MODE (cmp_operands[0]);
3963 /* MIPS conditional trap machine instructions don't have GT or LE
3964 flavors, so we must invert the comparison and convert to LT and
3965 GE, respectively. */
3968 case GT: cmp_code = LT; break;
3969 case LE: cmp_code = GE; break;
3970 case GTU: cmp_code = LTU; break;
3971 case LEU: cmp_code = GEU; break;
3974 if (cmp_code == GET_CODE (operands[0]))
3976 op0 = cmp_operands[0];
3977 op1 = cmp_operands[1];
3981 op0 = cmp_operands[1];
3982 op1 = cmp_operands[0];
3984 op0 = force_reg (mode, op0);
3985 if (!arith_operand (op1, mode))
3986 op1 = force_reg (mode, op1);
3988 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
3989 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
3993 /* Argument support functions. */
3995 /* Initialize CUMULATIVE_ARGS for a function. */
3998 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
3999 rtx libname ATTRIBUTE_UNUSED)
4001 static CUMULATIVE_ARGS zero_cum;
4002 tree param, next_param;
4005 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
4007 /* Determine if this function has variable arguments. This is
4008 indicated by the last argument being 'void_type_mode' if there
4009 are no variable arguments. The standard MIPS calling sequence
4010 passes all arguments in the general purpose registers in this case. */
4012 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
4013 param != 0; param = next_param)
4015 next_param = TREE_CHAIN (param);
4016 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
4017 cum->gp_reg_found = 1;
4022 /* Fill INFO with information about a single argument. CUM is the
4023 cumulative state for earlier arguments. MODE is the mode of this
4024 argument and TYPE is its type (if known). NAMED is true if this
4025 is a named (fixed) argument rather than a variable one. */
4028 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4029 tree type, int named, struct mips_arg_info *info)
4031 bool doubleword_aligned_p;
4032 unsigned int num_bytes, num_words, max_regs;
4034 /* Work out the size of the argument. */
4035 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4036 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4038 /* Decide whether it should go in a floating-point register, assuming
4039 one is free. Later code checks for availability.
4041 The checks against UNITS_PER_FPVALUE handle the soft-float and
4042 single-float cases. */
4046 /* The EABI conventions have traditionally been defined in terms
4047 of TYPE_MODE, regardless of the actual type. */
4048 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4049 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4050 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4055 /* Only leading floating-point scalars are passed in
4056 floating-point registers. We also handle vector floats the same
4057 say, which is OK because they are not covered by the standard ABI. */
4058 info->fpr_p = (!cum->gp_reg_found
4059 && cum->arg_number < 2
4060 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
4061 || VECTOR_FLOAT_TYPE_P (type))
4062 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4063 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4064 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4069 /* Scalar and complex floating-point types are passed in
4070 floating-point registers. */
4071 info->fpr_p = (named
4072 && (type == 0 || FLOAT_TYPE_P (type))
4073 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4074 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4075 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4076 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4078 /* ??? According to the ABI documentation, the real and imaginary
4079 parts of complex floats should be passed in individual registers.
4080 The real and imaginary parts of stack arguments are supposed
4081 to be contiguous and there should be an extra word of padding
4084 This has two problems. First, it makes it impossible to use a
4085 single "void *" va_list type, since register and stack arguments
4086 are passed differently. (At the time of writing, MIPSpro cannot
4087 handle complex float varargs correctly.) Second, it's unclear
4088 what should happen when there is only one register free.
4090 For now, we assume that named complex floats should go into FPRs
4091 if there are two FPRs free, otherwise they should be passed in the
4092 same way as a struct containing two floats. */
4094 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4095 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4097 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4098 info->fpr_p = false;
4108 /* See whether the argument has doubleword alignment. */
4109 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
4111 /* Set REG_OFFSET to the register count we're interested in.
4112 The EABI allocates the floating-point registers separately,
4113 but the other ABIs allocate them like integer registers. */
4114 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4118 /* Advance to an even register if the argument is doubleword-aligned. */
4119 if (doubleword_aligned_p)
4120 info->reg_offset += info->reg_offset & 1;
4122 /* Work out the offset of a stack argument. */
4123 info->stack_offset = cum->stack_words;
4124 if (doubleword_aligned_p)
4125 info->stack_offset += info->stack_offset & 1;
4127 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4129 /* Partition the argument between registers and stack. */
4130 info->reg_words = MIN (num_words, max_regs);
4131 info->stack_words = num_words - info->reg_words;
4134 /* INFO describes an argument that is passed in a single-register value.
4135 Return the register it uses, assuming that FPRs are available if
4139 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4141 if (!info->fpr_p || !hard_float_p)
4142 return GP_ARG_FIRST + info->reg_offset;
4143 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4144 /* In o32, the second argument is always passed in $f14
4145 for TARGET_DOUBLE_FLOAT, regardless of whether the
4146 first argument was a word or doubleword. */
4147 return FP_ARG_FIRST + 2;
4149 return FP_ARG_FIRST + info->reg_offset;
4153 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4155 return !TARGET_OLDABI;
4158 /* Implement FUNCTION_ARG. */
4161 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4162 tree type, int named)
4164 struct mips_arg_info info;
4166 /* We will be called with a mode of VOIDmode after the last argument
4167 has been seen. Whatever we return will be passed to the call
4168 insn. If we need a mips16 fp_code, return a REG with the code
4169 stored as the mode. */
4170 if (mode == VOIDmode)
4172 if (TARGET_MIPS16 && cum->fp_code != 0)
4173 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4179 mips_arg_info (cum, mode, type, named, &info);
4181 /* Return straight away if the whole argument is passed on the stack. */
4182 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4186 && TREE_CODE (type) == RECORD_TYPE
4188 && TYPE_SIZE_UNIT (type)
4189 && host_integerp (TYPE_SIZE_UNIT (type), 1)
4192 /* The Irix 6 n32/n64 ABIs say that if any 64-bit chunk of the
4193 structure contains a double in its entirety, then that 64-bit
4194 chunk is passed in a floating point register. */
4197 /* First check to see if there is any such field. */
4198 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4199 if (TREE_CODE (field) == FIELD_DECL
4200 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4201 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4202 && host_integerp (bit_position (field), 0)
4203 && int_bit_position (field) % BITS_PER_WORD == 0)
4208 /* Now handle the special case by returning a PARALLEL
4209 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4210 chunks are passed in registers. */
4212 HOST_WIDE_INT bitpos;
4215 /* assign_parms checks the mode of ENTRY_PARM, so we must
4216 use the actual mode here. */
4217 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4220 field = TYPE_FIELDS (type);
4221 for (i = 0; i < info.reg_words; i++)
4225 for (; field; field = TREE_CHAIN (field))
4226 if (TREE_CODE (field) == FIELD_DECL
4227 && int_bit_position (field) >= bitpos)
4231 && int_bit_position (field) == bitpos
4232 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4233 && !TARGET_SOFT_FLOAT
4234 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4235 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4237 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4240 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4241 GEN_INT (bitpos / BITS_PER_UNIT));
4243 bitpos += BITS_PER_WORD;
4249 /* Handle the n32/n64 conventions for passing complex floating-point
4250 arguments in FPR pairs. The real part goes in the lower register
4251 and the imaginary part goes in the upper register. */
4254 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4257 enum machine_mode inner;
4260 inner = GET_MODE_INNER (mode);
4261 reg = FP_ARG_FIRST + info.reg_offset;
4262 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4264 /* Real part in registers, imaginary part on stack. */
4265 gcc_assert (info.stack_words == info.reg_words);
4266 return gen_rtx_REG (inner, reg);
4270 gcc_assert (info.stack_words == 0);
4271 real = gen_rtx_EXPR_LIST (VOIDmode,
4272 gen_rtx_REG (inner, reg),
4274 imag = gen_rtx_EXPR_LIST (VOIDmode,
4276 reg + info.reg_words / 2),
4277 GEN_INT (GET_MODE_SIZE (inner)));
4278 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4282 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4285 /* Implement FUNCTION_ARG_ADVANCE. */
4288 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4289 tree type, int named)
4291 struct mips_arg_info info;
4293 mips_arg_info (cum, mode, type, named, &info);
4296 cum->gp_reg_found = true;
4298 /* See the comment above the cumulative args structure in mips.h
4299 for an explanation of what this code does. It assumes the O32
4300 ABI, which passes at most 2 arguments in float registers. */
4301 if (cum->arg_number < 2 && info.fpr_p)
4302 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4304 if (mips_abi != ABI_EABI || !info.fpr_p)
4305 cum->num_gprs = info.reg_offset + info.reg_words;
4306 else if (info.reg_words > 0)
4307 cum->num_fprs += MAX_FPRS_PER_FMT;
4309 if (info.stack_words > 0)
4310 cum->stack_words = info.stack_offset + info.stack_words;
4315 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4318 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4319 enum machine_mode mode, tree type, bool named)
4321 struct mips_arg_info info;
4323 mips_arg_info (cum, mode, type, named, &info);
4324 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4328 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4329 PARM_BOUNDARY bits of alignment, but will be given anything up
4330 to STACK_BOUNDARY bits if the type requires it. */
4333 function_arg_boundary (enum machine_mode mode, tree type)
4335 unsigned int alignment;
4337 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4338 if (alignment < PARM_BOUNDARY)
4339 alignment = PARM_BOUNDARY;
4340 if (alignment > STACK_BOUNDARY)
4341 alignment = STACK_BOUNDARY;
4345 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4346 upward rather than downward. In other words, return true if the
4347 first byte of the stack slot has useful data, false if the last
4351 mips_pad_arg_upward (enum machine_mode mode, const_tree type)
4353 /* On little-endian targets, the first byte of every stack argument
4354 is passed in the first byte of the stack slot. */
4355 if (!BYTES_BIG_ENDIAN)
4358 /* Otherwise, integral types are padded downward: the last byte of a
4359 stack argument is passed in the last byte of the stack slot. */
4361 ? (INTEGRAL_TYPE_P (type)
4362 || POINTER_TYPE_P (type)
4363 || FIXED_POINT_TYPE_P (type))
4364 : (GET_MODE_CLASS (mode) == MODE_INT
4365 || ALL_SCALAR_FIXED_POINT_MODE_P (mode)))
4368 /* Big-endian o64 pads floating-point arguments downward. */
4369 if (mips_abi == ABI_O64)
4370 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4373 /* Other types are padded upward for o32, o64, n32 and n64. */
4374 if (mips_abi != ABI_EABI)
4377 /* Arguments smaller than a stack slot are padded downward. */
4378 if (mode != BLKmode)
4379 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
4381 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
4385 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4386 if the least significant byte of the register has useful data. Return
4387 the opposite if the most significant byte does. */
4390 mips_pad_reg_upward (enum machine_mode mode, tree type)
4392 /* No shifting is required for floating-point arguments. */
4393 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4394 return !BYTES_BIG_ENDIAN;
4396 /* Otherwise, apply the same padding to register arguments as we do
4397 to stack arguments. */
4398 return mips_pad_arg_upward (mode, type);
4402 /* Return nonzero when an argument must be passed by reference. */
4405 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4406 enum machine_mode mode, const_tree type,
4407 bool named ATTRIBUTE_UNUSED)
4409 if (mips_abi == ABI_EABI)
4413 /* ??? How should SCmode be handled? */
4414 if (mode == DImode || mode == DFmode
4415 || mode == DQmode || mode == UDQmode
4416 || mode == DAmode || mode == UDAmode)
4419 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4420 return size == -1 || size > UNITS_PER_WORD;
4424 /* If we have a variable-sized parameter, we have no choice. */
4425 return targetm.calls.must_pass_in_stack (mode, type);
4430 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4431 enum machine_mode mode ATTRIBUTE_UNUSED,
4432 const_tree type ATTRIBUTE_UNUSED, bool named)
4434 return mips_abi == ABI_EABI && named;
4437 /* See whether VALTYPE is a record whose fields should be returned in
4438 floating-point registers. If so, return the number of fields and
4439 list them in FIELDS (which should have two elements). Return 0
4442 For n32 & n64, a structure with one or two fields is returned in
4443 floating-point registers as long as every field has a floating-point
4447 mips_fpr_return_fields (const_tree valtype, tree *fields)
4455 if (TREE_CODE (valtype) != RECORD_TYPE)
4459 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
4461 if (TREE_CODE (field) != FIELD_DECL)
4464 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
4470 fields[i++] = field;
4476 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
4477 a value in the most significant part of $2/$3 if:
4479 - the target is big-endian;
4481 - the value has a structure or union type (we generalize this to
4482 cover aggregates from other languages too); and
4484 - the structure is not returned in floating-point registers. */
4487 mips_return_in_msb (const_tree valtype)
4491 return (TARGET_NEWABI
4492 && TARGET_BIG_ENDIAN
4493 && AGGREGATE_TYPE_P (valtype)
4494 && mips_fpr_return_fields (valtype, fields) == 0);
4498 /* Return true if the function return value MODE will get returned in a
4499 floating-point register. */
4502 mips_return_mode_in_fpr_p (enum machine_mode mode)
4504 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
4505 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
4506 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4507 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
4510 /* Return a composite value in a pair of floating-point registers.
4511 MODE1 and OFFSET1 are the mode and byte offset for the first value,
4512 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
4515 For n32 & n64, $f0 always holds the first value and $f2 the second.
4516 Otherwise the values are packed together as closely as possible. */
4519 mips_return_fpr_pair (enum machine_mode mode,
4520 enum machine_mode mode1, HOST_WIDE_INT offset1,
4521 enum machine_mode mode2, HOST_WIDE_INT offset2)
4525 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
4526 return gen_rtx_PARALLEL
4529 gen_rtx_EXPR_LIST (VOIDmode,
4530 gen_rtx_REG (mode1, FP_RETURN),
4532 gen_rtx_EXPR_LIST (VOIDmode,
4533 gen_rtx_REG (mode2, FP_RETURN + inc),
4534 GEN_INT (offset2))));
4539 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
4540 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
4541 VALTYPE is null and MODE is the mode of the return value. */
4544 mips_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
4545 enum machine_mode mode)
4552 mode = TYPE_MODE (valtype);
4553 unsignedp = TYPE_UNSIGNED (valtype);
4555 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
4556 true, we must promote the mode just as PROMOTE_MODE does. */
4557 mode = promote_mode (valtype, mode, &unsignedp, 1);
4559 /* Handle structures whose fields are returned in $f0/$f2. */
4560 switch (mips_fpr_return_fields (valtype, fields))
4563 return gen_rtx_REG (mode, FP_RETURN);
4566 return mips_return_fpr_pair (mode,
4567 TYPE_MODE (TREE_TYPE (fields[0])),
4568 int_byte_position (fields[0]),
4569 TYPE_MODE (TREE_TYPE (fields[1])),
4570 int_byte_position (fields[1]));
4573 /* If a value is passed in the most significant part of a register, see
4574 whether we have to round the mode up to a whole number of words. */
4575 if (mips_return_in_msb (valtype))
4577 HOST_WIDE_INT size = int_size_in_bytes (valtype);
4578 if (size % UNITS_PER_WORD != 0)
4580 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
4581 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
4585 /* For EABI, the class of return register depends entirely on MODE.
4586 For example, "struct { some_type x; }" and "union { some_type x; }"
4587 are returned in the same way as a bare "some_type" would be.
4588 Other ABIs only use FPRs for scalar, complex or vector types. */
4589 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
4590 return gen_rtx_REG (mode, GP_RETURN);
4595 /* Handle long doubles for n32 & n64. */
4597 return mips_return_fpr_pair (mode,
4599 DImode, GET_MODE_SIZE (mode) / 2);
4601 if (mips_return_mode_in_fpr_p (mode))
4603 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4604 return mips_return_fpr_pair (mode,
4605 GET_MODE_INNER (mode), 0,
4606 GET_MODE_INNER (mode),
4607 GET_MODE_SIZE (mode) / 2);
4609 return gen_rtx_REG (mode, FP_RETURN);
4613 return gen_rtx_REG (mode, GP_RETURN);
4616 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
4617 all BLKmode objects are returned in memory. Under the new (N32 and
4618 64-bit MIPS ABIs) small structures are returned in a register.
4619 Objects with varying size must still be returned in memory, of
4623 mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
4626 return (TYPE_MODE (type) == BLKmode);
4628 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
4629 || (int_size_in_bytes (type) == -1));
4633 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4634 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4637 CUMULATIVE_ARGS local_cum;
4638 int gp_saved, fp_saved;
4640 /* The caller has advanced CUM up to, but not beyond, the last named
4641 argument. Advance a local copy of CUM past the last "real" named
4642 argument, to find out how many registers are left over. */
4645 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
4647 /* Found out how many registers we need to save. */
4648 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4649 fp_saved = (EABI_FLOAT_VARARGS_P
4650 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4659 ptr = plus_constant (virtual_incoming_args_rtx,
4660 REG_PARM_STACK_SPACE (cfun->decl)
4661 - gp_saved * UNITS_PER_WORD);
4662 mem = gen_rtx_MEM (BLKmode, ptr);
4663 set_mem_alias_set (mem, get_varargs_alias_set ());
4665 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4670 /* We can't use move_block_from_reg, because it will use
4672 enum machine_mode mode;
4675 /* Set OFF to the offset from virtual_incoming_args_rtx of
4676 the first float register. The FP save area lies below
4677 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4678 off = -gp_saved * UNITS_PER_WORD;
4679 off &= ~(UNITS_PER_FPVALUE - 1);
4680 off -= fp_saved * UNITS_PER_FPREG;
4682 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4684 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
4685 i += MAX_FPRS_PER_FMT)
4689 ptr = plus_constant (virtual_incoming_args_rtx, off);
4690 mem = gen_rtx_MEM (mode, ptr);
4691 set_mem_alias_set (mem, get_varargs_alias_set ());
4692 mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4693 off += UNITS_PER_HWFPVALUE;
4697 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4698 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4699 + fp_saved * UNITS_PER_FPREG);
4702 /* Create the va_list data type.
4703 We keep 3 pointers, and two offsets.
4704 Two pointers are to the overflow area, which starts at the CFA.
4705 One of these is constant, for addressing into the GPR save area below it.
4706 The other is advanced up the stack through the overflow region.
4707 The third pointer is to the GPR save area. Since the FPR save area
4708 is just below it, we can address FPR slots off this pointer.
4709 We also keep two one-byte offsets, which are to be subtracted from the
4710 constant pointers to yield addresses in the GPR and FPR save areas.
4711 These are downcounted as float or non-float arguments are used,
4712 and when they get to zero, the argument must be obtained from the
4714 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4715 pointer is enough. It's started at the GPR save area, and is
4717 Note that the GPR save area is not constant size, due to optimization
4718 in the prologue. Hence, we can't use a design with two pointers
4719 and two offsets, although we could have designed this with two pointers
4720 and three offsets. */
4723 mips_build_builtin_va_list (void)
4725 if (EABI_FLOAT_VARARGS_P)
4727 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4730 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4732 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4734 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4736 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4738 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4739 unsigned_char_type_node);
4740 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4741 unsigned_char_type_node);
4742 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4743 warn on every user file. */
4744 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4745 array = build_array_type (unsigned_char_type_node,
4746 build_index_type (index));
4747 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4749 DECL_FIELD_CONTEXT (f_ovfl) = record;
4750 DECL_FIELD_CONTEXT (f_gtop) = record;
4751 DECL_FIELD_CONTEXT (f_ftop) = record;
4752 DECL_FIELD_CONTEXT (f_goff) = record;
4753 DECL_FIELD_CONTEXT (f_foff) = record;
4754 DECL_FIELD_CONTEXT (f_res) = record;
4756 TYPE_FIELDS (record) = f_ovfl;
4757 TREE_CHAIN (f_ovfl) = f_gtop;
4758 TREE_CHAIN (f_gtop) = f_ftop;
4759 TREE_CHAIN (f_ftop) = f_goff;
4760 TREE_CHAIN (f_goff) = f_foff;
4761 TREE_CHAIN (f_foff) = f_res;
4763 layout_type (record);
4766 else if (TARGET_IRIX && TARGET_IRIX6)
4767 /* On IRIX 6, this type is 'char *'. */
4768 return build_pointer_type (char_type_node);
4770 /* Otherwise, we use 'void *'. */
4771 return ptr_type_node;
4774 /* Implement va_start. */
4777 mips_va_start (tree valist, rtx nextarg)
4779 if (EABI_FLOAT_VARARGS_P)
4781 const CUMULATIVE_ARGS *cum;
4782 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4783 tree ovfl, gtop, ftop, goff, foff;
4785 int gpr_save_area_size;
4786 int fpr_save_area_size;
4789 cum = ¤t_function_args_info;
4791 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
4793 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
4795 f_ovfl = TYPE_FIELDS (va_list_type_node);
4796 f_gtop = TREE_CHAIN (f_ovfl);
4797 f_ftop = TREE_CHAIN (f_gtop);
4798 f_goff = TREE_CHAIN (f_ftop);
4799 f_foff = TREE_CHAIN (f_goff);
4801 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4803 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4805 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4807 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4809 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4812 /* Emit code to initialize OVFL, which points to the next varargs
4813 stack argument. CUM->STACK_WORDS gives the number of stack
4814 words used by named arguments. */
4815 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
4816 if (cum->stack_words > 0)
4817 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
4818 size_int (cum->stack_words * UNITS_PER_WORD));
4819 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4820 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4822 /* Emit code to initialize GTOP, the top of the GPR save area. */
4823 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
4824 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gtop), gtop, t);
4825 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4827 /* Emit code to initialize FTOP, the top of the FPR save area.
4828 This address is gpr_save_area_bytes below GTOP, rounded
4829 down to the next fp-aligned boundary. */
4830 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
4831 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
4832 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
4834 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
4835 size_int (-fpr_offset));
4836 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ftop), ftop, t);
4837 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4839 /* Emit code to initialize GOFF, the offset from GTOP of the
4840 next GPR argument. */
4841 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (goff), goff,
4842 build_int_cst (NULL_TREE, gpr_save_area_size));
4843 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4845 /* Likewise emit code to initialize FOFF, the offset from FTOP
4846 of the next FPR argument. */
4847 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (foff), foff,
4848 build_int_cst (NULL_TREE, fpr_save_area_size));
4849 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4853 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
4854 std_expand_builtin_va_start (valist, nextarg);
4858 /* Implement va_arg. */
4861 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
4863 HOST_WIDE_INT size, rsize;
4867 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
4870 type = build_pointer_type (type);
4872 size = int_size_in_bytes (type);
4873 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
4875 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
4876 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4879 /* Not a simple merged stack. */
4881 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4882 tree ovfl, top, off, align;
4883 HOST_WIDE_INT osize;
4886 f_ovfl = TYPE_FIELDS (va_list_type_node);
4887 f_gtop = TREE_CHAIN (f_ovfl);
4888 f_ftop = TREE_CHAIN (f_gtop);
4889 f_goff = TREE_CHAIN (f_ftop);
4890 f_foff = TREE_CHAIN (f_goff);
4892 /* We maintain separate pointers and offsets for floating-point
4893 and integer arguments, but we need similar code in both cases.
4896 TOP be the top of the register save area;
4897 OFF be the offset from TOP of the next register;
4898 ADDR_RTX be the address of the argument;
4899 RSIZE be the number of bytes used to store the argument
4900 when it's in the register save area;
4901 OSIZE be the number of bytes used to store it when it's
4902 in the stack overflow area; and
4903 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
4905 The code we want is:
4907 1: off &= -rsize; // round down
4910 4: addr_rtx = top - off;
4915 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
4916 10: addr_rtx = ovfl + PADDING;
4920 [1] and [9] can sometimes be optimized away. */
4922 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4925 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
4926 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
4928 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4930 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4933 /* When floating-point registers are saved to the stack,
4934 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
4935 of the float's precision. */
4936 rsize = UNITS_PER_HWFPVALUE;
4938 /* Overflow arguments are padded to UNITS_PER_WORD bytes
4939 (= PARM_BOUNDARY bits). This can be different from RSIZE
4942 (1) On 32-bit targets when TYPE is a structure such as:
4944 struct s { float f; };
4946 Such structures are passed in paired FPRs, so RSIZE
4947 will be 8 bytes. However, the structure only takes
4948 up 4 bytes of memory, so OSIZE will only be 4.
4950 (2) In combinations such as -mgp64 -msingle-float
4951 -fshort-double. Doubles passed in registers
4952 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
4953 but those passed on the stack take up
4954 UNITS_PER_WORD bytes. */
4955 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
4959 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4961 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4963 if (rsize > UNITS_PER_WORD)
4965 /* [1] Emit code for: off &= -rsize. */
4966 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
4967 build_int_cst (NULL_TREE, -rsize));
4968 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (off), off, t);
4969 gimplify_and_add (t, pre_p);
4974 /* [2] Emit code to branch if off == 0. */
4975 t = build2 (NE_EXPR, boolean_type_node, off,
4976 build_int_cst (TREE_TYPE (off), 0));
4977 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
4979 /* [5] Emit code for: off -= rsize. We do this as a form of
4980 post-increment not available to C. Also widen for the
4981 coming pointer arithmetic. */
4982 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
4983 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
4984 t = fold_convert (sizetype, t);
4985 t = fold_build1 (NEGATE_EXPR, sizetype, t);
4987 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
4988 the argument has RSIZE - SIZE bytes of leading padding. */
4989 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
4990 if (BYTES_BIG_ENDIAN && rsize > size)
4992 u = size_int (rsize - size);
4993 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
4995 COND_EXPR_THEN (addr) = t;
4997 if (osize > UNITS_PER_WORD)
4999 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
5000 u = size_int (osize - 1);
5001 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
5002 t = fold_convert (sizetype, t);
5003 u = size_int (-osize);
5004 t = build2 (BIT_AND_EXPR, sizetype, t, u);
5005 t = fold_convert (TREE_TYPE (ovfl), t);
5006 align = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
5011 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
5012 post-increment ovfl by osize. On big-endian machines,
5013 the argument has OSIZE - SIZE bytes of leading padding. */
5014 u = fold_convert (TREE_TYPE (ovfl),
5015 build_int_cst (NULL_TREE, osize));
5016 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
5017 if (BYTES_BIG_ENDIAN && osize > size)
5019 u = size_int (osize - size);
5020 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5023 /* String [9] and [10,11] together. */
5025 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
5026 COND_EXPR_ELSE (addr) = t;
5028 addr = fold_convert (build_pointer_type (type), addr);
5029 addr = build_va_arg_indirect_ref (addr);
5033 addr = build_va_arg_indirect_ref (addr);
5038 /* We keep a list of functions for which we have already built stubs
5039 in build_mips16_call_stub. */
5043 struct mips16_stub *next;
5048 static struct mips16_stub *mips16_stubs;
5050 /* Return a two-character string representing a function floating-point
5051 return mode, used to name MIPS16 function stubs. */
5054 mips16_call_stub_mode_suffix (enum machine_mode mode)
5058 else if (mode == DFmode)
5060 else if (mode == SCmode)
5062 else if (mode == DCmode)
5064 else if (mode == V2SFmode)
5070 /* Write out code to move floating point arguments in or out of
5071 general registers. Output the instructions to FILE. FP_CODE is
5072 the code describing which arguments are present (see the comment at
5073 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
5074 we are copying from the floating point registers. */
5077 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
5082 CUMULATIVE_ARGS cum;
5084 /* This code only works for the original 32-bit ABI and the O64 ABI. */
5085 gcc_assert (TARGET_OLDABI);
5092 init_cumulative_args (&cum, NULL, NULL);
5094 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
5096 enum machine_mode mode;
5097 struct mips_arg_info info;
5101 else if ((f & 3) == 2)
5106 mips_arg_info (&cum, mode, NULL, true, &info);
5107 gparg = mips_arg_regno (&info, false);
5108 fparg = mips_arg_regno (&info, true);
5111 fprintf (file, "\t%s\t%s,%s\n", s,
5112 reg_names[gparg], reg_names[fparg]);
5113 else if (TARGET_64BIT)
5114 fprintf (file, "\td%s\t%s,%s\n", s,
5115 reg_names[gparg], reg_names[fparg]);
5116 else if (ISA_HAS_MXHC1)
5117 /* -mips32r2 -mfp64 */
5118 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n",
5120 reg_names[gparg + (WORDS_BIG_ENDIAN ? 1 : 0)],
5122 from_fp_p ? "mfhc1" : "mthc1",
5123 reg_names[gparg + (WORDS_BIG_ENDIAN ? 0 : 1)],
5125 else if (TARGET_BIG_ENDIAN)
5126 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
5127 reg_names[gparg], reg_names[fparg + 1], s,
5128 reg_names[gparg + 1], reg_names[fparg]);
5130 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
5131 reg_names[gparg], reg_names[fparg], s,
5132 reg_names[gparg + 1], reg_names[fparg + 1]);
5134 function_arg_advance (&cum, mode, NULL, true);
5138 /* Build a mips16 function stub. This is used for functions which
5139 take arguments in the floating point registers. It is 32-bit code
5140 that moves the floating point args into the general registers, and
5141 then jumps to the 16-bit code. */
5144 build_mips16_function_stub (FILE *file)
5147 char *secname, *stubname;
5148 tree stubid, stubdecl;
5152 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
5153 fnname = targetm.strip_name_encoding (fnname);
5154 secname = (char *) alloca (strlen (fnname) + 20);
5155 sprintf (secname, ".mips16.fn.%s", fnname);
5156 stubname = (char *) alloca (strlen (fnname) + 20);
5157 sprintf (stubname, "__fn_stub_%s", fnname);
5158 stubid = get_identifier (stubname);
5159 stubdecl = build_decl (FUNCTION_DECL, stubid,
5160 build_function_type (void_type_node, NULL_TREE));
5161 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
5162 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
5164 fprintf (file, "\t# Stub function for %s (", current_function_name ());
5166 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
5168 fprintf (file, "%s%s",
5169 need_comma ? ", " : "",
5170 (f & 3) == 1 ? "float" : "double");
5173 fprintf (file, ")\n");
5175 fprintf (file, "\t.set\tnomips16\n");
5176 switch_to_section (function_section (stubdecl));
5177 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
5179 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
5180 within a .ent, and we cannot emit another .ent. */
5181 if (!FUNCTION_NAME_ALREADY_DECLARED)
5183 fputs ("\t.ent\t", file);
5184 assemble_name (file, stubname);
5188 assemble_name (file, stubname);
5189 fputs (":\n", file);
5191 /* We don't want the assembler to insert any nops here. */
5192 fprintf (file, "\t.set\tnoreorder\n");
5194 mips16_fp_args (file, current_function_args_info.fp_code, 1);
5196 fprintf (asm_out_file, "\t.set\tnoat\n");
5197 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
5198 assemble_name (file, fnname);
5199 fprintf (file, "\n");
5200 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
5201 fprintf (asm_out_file, "\t.set\tat\n");
5203 /* Unfortunately, we can't fill the jump delay slot. We can't fill
5204 with one of the mfc1 instructions, because the result is not
5205 available for one instruction, so if the very first instruction
5206 in the function refers to the register, it will see the wrong
5208 fprintf (file, "\tnop\n");
5210 fprintf (file, "\t.set\treorder\n");
5212 if (!FUNCTION_NAME_ALREADY_DECLARED)
5214 fputs ("\t.end\t", file);
5215 assemble_name (file, stubname);
5219 switch_to_section (function_section (current_function_decl));
5222 /* Emit code to return a double value from a mips16 stub. GPREG is the
5223 first GP reg to use, FPREG is the first FP reg to use. */
5226 mips16_fpret_double (int gpreg, int fpreg)
5229 fprintf (asm_out_file, "\tdmfc1\t%s,%s\n",
5230 reg_names[gpreg], reg_names[fpreg]);
5231 else if (TARGET_FLOAT64)
5233 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
5234 reg_names[gpreg + WORDS_BIG_ENDIAN],
5236 fprintf (asm_out_file, "\tmfhc1\t%s,%s\n",
5237 reg_names[gpreg + !WORDS_BIG_ENDIAN],
5242 if (TARGET_BIG_ENDIAN)
5244 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
5245 reg_names[gpreg + 0],
5246 reg_names[fpreg + 1]);
5247 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
5248 reg_names[gpreg + 1],
5249 reg_names[fpreg + 0]);
5253 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
5254 reg_names[gpreg + 0],
5255 reg_names[fpreg + 0]);
5256 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
5257 reg_names[gpreg + 1],
5258 reg_names[fpreg + 1]);
5263 /* Build a call stub for a mips16 call. A stub is needed if we are
5264 passing any floating point values which should go into the floating
5265 point registers. If we are, and the call turns out to be to a
5266 32-bit function, the stub will be used to move the values into the
5267 floating point registers before calling the 32-bit function. The
5268 linker will magically adjust the function call to either the 16-bit
5269 function or the 32-bit stub, depending upon where the function call
5270 is actually defined.
5272 Similarly, we need a stub if the return value might come back in a
5273 floating point register.
5275 RETVAL is the location of the return value, or null if this is
5276 a call rather than a call_value. FN is the address of the
5277 function and ARG_SIZE is the size of the arguments. FP_CODE
5278 is the code built by function_arg. This function returns a nonzero
5279 value if it builds the call instruction itself. */
5282 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
5286 char *secname, *stubname;
5287 struct mips16_stub *l;
5288 tree stubid, stubdecl;
5293 /* We don't need to do anything if we aren't in mips16 mode, or if
5294 we were invoked with the -msoft-float option. */
5295 if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
5298 /* Figure out whether the value might come back in a floating point
5301 fpret = mips_return_mode_in_fpr_p (GET_MODE (retval));
5303 /* We don't need to do anything if there were no floating point
5304 arguments and the value will not be returned in a floating point
5306 if (fp_code == 0 && ! fpret)
5309 /* We don't need to do anything if this is a call to a special
5310 mips16 support function. */
5311 if (GET_CODE (fn) == SYMBOL_REF
5312 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
5315 /* This code will only work for o32 and o64 abis. The other ABI's
5316 require more sophisticated support. */
5317 gcc_assert (TARGET_OLDABI);
5319 /* If we're calling via a function pointer, then we must always call
5320 via a stub. There are magic stubs provided in libgcc.a for each
5321 of the required cases. Each of them expects the function address
5322 to arrive in register $2. */
5324 if (GET_CODE (fn) != SYMBOL_REF)
5330 /* ??? If this code is modified to support other ABI's, we need
5331 to handle PARALLEL return values here. */
5334 sprintf (buf, "__mips16_call_stub_%s_%d",
5335 mips16_call_stub_mode_suffix (GET_MODE (retval)),
5338 sprintf (buf, "__mips16_call_stub_%d",
5341 id = get_identifier (buf);
5342 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
5344 mips_emit_move (gen_rtx_REG (Pmode, 2), fn);
5346 if (retval == NULL_RTX)
5347 insn = gen_call_internal (stub_fn, arg_size);
5349 insn = gen_call_value_internal (retval, stub_fn, arg_size);
5350 insn = emit_call_insn (insn);
5352 /* Put the register usage information on the CALL. */
5353 CALL_INSN_FUNCTION_USAGE (insn) =
5354 gen_rtx_EXPR_LIST (VOIDmode,
5355 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
5356 CALL_INSN_FUNCTION_USAGE (insn));
5358 /* If we are handling a floating point return value, we need to
5359 save $18 in the function prologue. Putting a note on the
5360 call will mean that df_regs_ever_live_p ($18) will be true if the
5361 call is not eliminated, and we can check that in the prologue
5364 CALL_INSN_FUNCTION_USAGE (insn) =
5365 gen_rtx_EXPR_LIST (VOIDmode,
5366 gen_rtx_USE (VOIDmode,
5367 gen_rtx_REG (word_mode, 18)),
5368 CALL_INSN_FUNCTION_USAGE (insn));
5370 /* Return 1 to tell the caller that we've generated the call
5375 /* We know the function we are going to call. If we have already
5376 built a stub, we don't need to do anything further. */
5378 fnname = targetm.strip_name_encoding (XSTR (fn, 0));
5379 for (l = mips16_stubs; l != NULL; l = l->next)
5380 if (strcmp (l->name, fnname) == 0)
5385 /* Build a special purpose stub. When the linker sees a
5386 function call in mips16 code, it will check where the target
5387 is defined. If the target is a 32-bit call, the linker will
5388 search for the section defined here. It can tell which
5389 symbol this section is associated with by looking at the
5390 relocation information (the name is unreliable, since this
5391 might be a static function). If such a section is found, the
5392 linker will redirect the call to the start of the magic
5395 If the function does not return a floating point value, the
5396 special stub section is named
5399 If the function does return a floating point value, the stub
5401 .mips16.call.fp.FNNAME
5404 secname = (char *) alloca (strlen (fnname) + 40);
5405 sprintf (secname, ".mips16.call.%s%s",
5408 stubname = (char *) alloca (strlen (fnname) + 20);
5409 sprintf (stubname, "__call_stub_%s%s",
5412 stubid = get_identifier (stubname);
5413 stubdecl = build_decl (FUNCTION_DECL, stubid,
5414 build_function_type (void_type_node, NULL_TREE));
5415 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
5416 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
5418 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
5420 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
5424 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
5426 fprintf (asm_out_file, "%s%s",
5427 need_comma ? ", " : "",
5428 (f & 3) == 1 ? "float" : "double");
5431 fprintf (asm_out_file, ")\n");
5433 fprintf (asm_out_file, "\t.set\tnomips16\n");
5434 assemble_start_function (stubdecl, stubname);
5436 if (!FUNCTION_NAME_ALREADY_DECLARED)
5438 fputs ("\t.ent\t", asm_out_file);
5439 assemble_name (asm_out_file, stubname);
5440 fputs ("\n", asm_out_file);
5442 assemble_name (asm_out_file, stubname);
5443 fputs (":\n", asm_out_file);
5446 /* We build the stub code by hand. That's the only way we can
5447 do it, since we can't generate 32-bit code during a 16-bit
5450 /* We don't want the assembler to insert any nops here. */
5451 fprintf (asm_out_file, "\t.set\tnoreorder\n");
5453 mips16_fp_args (asm_out_file, fp_code, 0);
5457 fprintf (asm_out_file, "\t.set\tnoat\n");
5458 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
5460 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
5461 fprintf (asm_out_file, "\t.set\tat\n");
5462 /* Unfortunately, we can't fill the jump delay slot. We
5463 can't fill with one of the mtc1 instructions, because the
5464 result is not available for one instruction, so if the
5465 very first instruction in the function refers to the
5466 register, it will see the wrong value. */
5467 fprintf (asm_out_file, "\tnop\n");
5471 fprintf (asm_out_file, "\tmove\t%s,%s\n",
5472 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
5473 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
5474 /* As above, we can't fill the delay slot. */
5475 fprintf (asm_out_file, "\tnop\n");
5476 switch (GET_MODE (retval))
5479 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
5480 reg_names[GP_REG_FIRST + 3],
5481 reg_names[FP_REG_FIRST + MAX_FPRS_PER_FMT]);
5484 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
5485 reg_names[GP_REG_FIRST + 2],
5486 reg_names[FP_REG_FIRST + 0]);
5487 if (GET_MODE (retval) == SCmode && TARGET_64BIT)
5489 /* On 64-bit targets, complex floats are returned in
5490 a single GPR, such that "sd" on a suitably-aligned
5491 target would store the value correctly. */
5492 fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
5493 reg_names[GP_REG_FIRST + 2 + TARGET_LITTLE_ENDIAN],
5494 reg_names[GP_REG_FIRST + 2 + TARGET_LITTLE_ENDIAN]);
5495 fprintf (asm_out_file, "\tor\t%s,%s,%s\n",
5496 reg_names[GP_REG_FIRST + 2],
5497 reg_names[GP_REG_FIRST + 2],
5498 reg_names[GP_REG_FIRST + 3]);
5503 mips16_fpret_double (GP_REG_FIRST + 2 + (8 / UNITS_PER_WORD),
5504 FP_REG_FIRST + MAX_FPRS_PER_FMT);
5508 mips16_fpret_double (GP_REG_FIRST + 2, FP_REG_FIRST + 0);
5514 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
5515 /* As above, we can't fill the delay slot. */
5516 fprintf (asm_out_file, "\tnop\n");
5519 fprintf (asm_out_file, "\t.set\treorder\n");
5521 #ifdef ASM_DECLARE_FUNCTION_SIZE
5522 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
5525 if (!FUNCTION_NAME_ALREADY_DECLARED)
5527 fputs ("\t.end\t", asm_out_file);
5528 assemble_name (asm_out_file, stubname);
5529 fputs ("\n", asm_out_file);
5532 /* Record this stub. */
5533 l = (struct mips16_stub *) xmalloc (sizeof *l);
5534 l->name = xstrdup (fnname);
5536 l->next = mips16_stubs;
5540 /* If we expect a floating point return value, but we've built a
5541 stub which does not expect one, then we're in trouble. We can't
5542 use the existing stub, because it won't handle the floating point
5543 value. We can't build a new stub, because the linker won't know
5544 which stub to use for the various calls in this object file.
5545 Fortunately, this case is illegal, since it means that a function
5546 was declared in two different ways in a single compilation. */
5547 if (fpret && ! l->fpret)
5548 error ("cannot handle inconsistent calls to %qs", fnname);
5550 if (retval == NULL_RTX)
5551 insn = gen_call_internal_direct (fn, arg_size);
5553 insn = gen_call_value_internal_direct (retval, fn, arg_size);
5554 insn = emit_call_insn (insn);
5556 /* If we are calling a stub which handles a floating point return
5557 value, we need to arrange to save $18 in the prologue. We do
5558 this by marking the function call as using the register. The
5559 prologue will later see that it is used, and emit code to save
5562 CALL_INSN_FUNCTION_USAGE (insn) =
5563 gen_rtx_EXPR_LIST (VOIDmode,
5564 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
5565 CALL_INSN_FUNCTION_USAGE (insn));
5567 /* Return 1 to tell the caller that we've generated the call
5572 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
5575 mips_ok_for_lazy_binding_p (rtx x)
5577 return (TARGET_USE_GOT
5578 && GET_CODE (x) == SYMBOL_REF
5579 && !mips_symbol_binds_local_p (x));
5582 /* Load function address ADDR into register DEST. SIBCALL_P is true
5583 if the address is needed for a sibling call. Return true if we
5584 used an explicit lazy-binding sequence. */
5587 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
5589 /* If we're generating PIC, and this call is to a global function,
5590 try to allow its address to be resolved lazily. This isn't
5591 possible if TARGET_CALL_SAVED_GP since the value of $gp on entry
5592 to the stub would be our caller's gp, not ours. */
5593 if (TARGET_EXPLICIT_RELOCS
5594 && !(sibcall_p && TARGET_CALL_SAVED_GP)
5595 && mips_ok_for_lazy_binding_p (addr))
5597 rtx high, lo_sum_symbol;
5599 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
5600 addr, SYMBOL_GOTOFF_CALL);
5601 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
5602 if (Pmode == SImode)
5603 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
5605 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
5610 mips_emit_move (dest, addr);
5616 /* Expand a call or call_value instruction. RESULT is where the
5617 result will go (null for calls), ADDR is the address of the
5618 function, ARGS_SIZE is the size of the arguments and AUX is
5619 the value passed to us by mips_function_arg. SIBCALL_P is true
5620 if we are expanding a sibling call, false if we're expanding
5624 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
5626 rtx orig_addr, pattern, insn;
5631 if (!call_insn_operand (addr, VOIDmode))
5633 addr = gen_reg_rtx (Pmode);
5634 lazy_p = mips_load_call_address (addr, orig_addr, sibcall_p);
5638 && TARGET_HARD_FLOAT_ABI
5639 && build_mips16_call_stub (result, addr, args_size,
5640 aux == 0 ? 0 : (int) GET_MODE (aux)))
5644 pattern = (sibcall_p
5645 ? gen_sibcall_internal (addr, args_size)
5646 : gen_call_internal (addr, args_size));
5647 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
5651 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
5652 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
5655 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
5656 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
5659 pattern = (sibcall_p
5660 ? gen_sibcall_value_internal (result, addr, args_size)
5661 : gen_call_value_internal (result, addr, args_size));
5663 insn = emit_call_insn (pattern);
5665 /* Lazy-binding stubs require $gp to be valid on entry. We also pretend
5666 that they use FAKE_CALL_REGNO; see the load_call<mode> patterns for
5670 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
5671 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
5672 gen_rtx_REG (Pmode, FAKE_CALL_REGNO));
5677 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
5680 mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
5682 if (!TARGET_SIBCALLS)
5685 /* We can't do a sibcall if the called function is a MIPS16 function
5686 because there is no direct "jx" instruction equivalent to "jalx" to
5687 switch the ISA mode. */
5688 if (mips_use_mips16_mode_p (decl))
5691 /* ...and when -minterlink-mips16 is in effect, assume that external
5692 functions could be MIPS16 ones unless an attribute explicitly
5693 tells us otherwise. We only care about cases where the sibling
5694 and normal calls would both be direct. */
5695 if (TARGET_INTERLINK_MIPS16
5697 && DECL_EXTERNAL (decl)
5698 && !mips_nomips16_decl_p (decl)
5699 && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
5706 /* Emit code to move general operand SRC into condition-code
5707 register DEST. SCRATCH is a scratch TFmode float register.
5714 where FP1 and FP2 are single-precision float registers
5715 taken from SCRATCH. */
5718 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
5722 /* Change the source to SFmode. */
5724 src = adjust_address (src, SFmode, 0);
5725 else if (REG_P (src) || GET_CODE (src) == SUBREG)
5726 src = gen_rtx_REG (SFmode, true_regnum (src));
5728 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
5729 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
5731 mips_emit_move (copy_rtx (fp1), src);
5732 mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
5733 emit_insn (gen_slt_sf (dest, fp2, fp1));
5736 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
5737 Assume that the areas do not overlap. */
5740 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
5742 HOST_WIDE_INT offset, delta;
5743 unsigned HOST_WIDE_INT bits;
5745 enum machine_mode mode;
5748 /* Work out how many bits to move at a time. If both operands have
5749 half-word alignment, it is usually better to move in half words.
5750 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
5751 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
5752 Otherwise move word-sized chunks. */
5753 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
5754 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
5755 bits = BITS_PER_WORD / 2;
5757 bits = BITS_PER_WORD;
5759 mode = mode_for_size (bits, MODE_INT, 0);
5760 delta = bits / BITS_PER_UNIT;
5762 /* Allocate a buffer for the temporary registers. */
5763 regs = alloca (sizeof (rtx) * length / delta);
5765 /* Load as many BITS-sized chunks as possible. Use a normal load if
5766 the source has enough alignment, otherwise use left/right pairs. */
5767 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
5769 regs[i] = gen_reg_rtx (mode);
5770 if (MEM_ALIGN (src) >= bits)
5771 mips_emit_move (regs[i], adjust_address (src, mode, offset));
5774 rtx part = adjust_address (src, BLKmode, offset);
5775 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
5780 /* Copy the chunks to the destination. */
5781 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
5782 if (MEM_ALIGN (dest) >= bits)
5783 mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
5786 rtx part = adjust_address (dest, BLKmode, offset);
5787 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
5791 /* Mop up any left-over bytes. */
5792 if (offset < length)
5794 src = adjust_address (src, BLKmode, offset);
5795 dest = adjust_address (dest, BLKmode, offset);
5796 move_by_pieces (dest, src, length - offset,
5797 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
5801 #define MAX_MOVE_REGS 4
5802 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
5805 /* Helper function for doing a loop-based block operation on memory
5806 reference MEM. Each iteration of the loop will operate on LENGTH
5809 Create a new base register for use within the loop and point it to
5810 the start of MEM. Create a new memory reference that uses this
5811 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
5814 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
5815 rtx *loop_reg, rtx *loop_mem)
5817 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
5819 /* Although the new mem does not refer to a known location,
5820 it does keep up to LENGTH bytes of alignment. */
5821 *loop_mem = change_address (mem, BLKmode, *loop_reg);
5822 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
5826 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
5827 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
5828 memory regions do not overlap. */
5831 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
5833 rtx label, src_reg, dest_reg, final_src;
5834 HOST_WIDE_INT leftover;
5836 leftover = length % MAX_MOVE_BYTES;
5839 /* Create registers and memory references for use within the loop. */
5840 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
5841 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
5843 /* Calculate the value that SRC_REG should have after the last iteration
5845 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
5848 /* Emit the start of the loop. */
5849 label = gen_label_rtx ();
5852 /* Emit the loop body. */
5853 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
5855 /* Move on to the next block. */
5856 mips_emit_move (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
5857 mips_emit_move (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
5859 /* Emit the loop condition. */
5860 if (Pmode == DImode)
5861 emit_insn (gen_cmpdi (src_reg, final_src));
5863 emit_insn (gen_cmpsi (src_reg, final_src));
5864 emit_jump_insn (gen_bne (label));
5866 /* Mop up any left-over bytes. */
5868 mips_block_move_straight (dest, src, leftover);
5871 /* Expand a movmemsi instruction. */
5874 mips_expand_block_move (rtx dest, rtx src, rtx length)
5876 if (GET_CODE (length) == CONST_INT)
5878 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
5880 mips_block_move_straight (dest, src, INTVAL (length));
5885 mips_block_move_loop (dest, src, INTVAL (length));
5893 /* Expand a loop of synci insns for the address range [BEGIN, END). */
5896 mips_expand_synci_loop (rtx begin, rtx end)
5898 rtx inc, label, cmp, cmp_result;
5900 /* Load INC with the cache line size (rdhwr INC,$1). */
5901 inc = gen_reg_rtx (SImode);
5902 emit_insn (gen_rdhwr (inc, const1_rtx));
5904 /* Loop back to here. */
5905 label = gen_label_rtx ();
5908 emit_insn (gen_synci (begin));
5910 cmp = gen_reg_rtx (Pmode);
5911 mips_emit_binary (GTU, cmp, begin, end);
5913 mips_emit_binary (PLUS, begin, begin, inc);
5915 cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
5916 emit_jump_insn (gen_condjump (cmp_result, label));
5919 /* Return true if it is possible to use left/right accesses for a
5920 bitfield of WIDTH bits starting BITPOS bits into *OP. When
5921 returning true, update *OP, *LEFT and *RIGHT as follows:
5923 *OP is a BLKmode reference to the whole field.
5925 *LEFT is a QImode reference to the first byte if big endian or
5926 the last byte if little endian. This address can be used in the
5927 left-side instructions (lwl, swl, ldl, sdl).
5929 *RIGHT is a QImode reference to the opposite end of the field and
5930 can be used in the patterning right-side instruction. */
5933 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
5934 rtx *left, rtx *right)
5938 /* Check that the operand really is a MEM. Not all the extv and
5939 extzv predicates are checked. */
5943 /* Check that the size is valid. */
5944 if (width != 32 && (!TARGET_64BIT || width != 64))
5947 /* We can only access byte-aligned values. Since we are always passed
5948 a reference to the first byte of the field, it is not necessary to
5949 do anything with BITPOS after this check. */
5950 if (bitpos % BITS_PER_UNIT != 0)
5953 /* Reject aligned bitfields: we want to use a normal load or store
5954 instead of a left/right pair. */
5955 if (MEM_ALIGN (*op) >= width)
5958 /* Adjust *OP to refer to the whole field. This also has the effect
5959 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
5960 *op = adjust_address (*op, BLKmode, 0);
5961 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
5963 /* Get references to both ends of the field. We deliberately don't
5964 use the original QImode *OP for FIRST since the new BLKmode one
5965 might have a simpler address. */
5966 first = adjust_address (*op, QImode, 0);
5967 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
5969 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
5970 be the upper word and RIGHT the lower word. */
5971 if (TARGET_BIG_ENDIAN)
5972 *left = first, *right = last;
5974 *left = last, *right = first;
5980 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
5981 Return true on success. We only handle cases where zero_extract is
5982 equivalent to sign_extract. */
5985 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
5987 rtx left, right, temp;
5989 /* If TARGET_64BIT, the destination of a 32-bit load will be a
5990 paradoxical word_mode subreg. This is the only case in which
5991 we allow the destination to be larger than the source. */
5992 if (GET_CODE (dest) == SUBREG
5993 && GET_MODE (dest) == DImode
5994 && SUBREG_BYTE (dest) == 0
5995 && GET_MODE (SUBREG_REG (dest)) == SImode)
5996 dest = SUBREG_REG (dest);
5998 /* After the above adjustment, the destination must be the same
5999 width as the source. */
6000 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
6003 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
6006 temp = gen_reg_rtx (GET_MODE (dest));
6007 if (GET_MODE (dest) == DImode)
6009 emit_insn (gen_mov_ldl (temp, src, left));
6010 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
6014 emit_insn (gen_mov_lwl (temp, src, left));
6015 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
6021 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
6025 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
6028 enum machine_mode mode;
6030 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
6033 mode = mode_for_size (width, MODE_INT, 0);
6034 src = gen_lowpart (mode, src);
6038 emit_insn (gen_mov_sdl (dest, src, left));
6039 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
6043 emit_insn (gen_mov_swl (dest, src, left));
6044 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
6049 /* Return true if X is a MEM with the same size as MODE. */
6052 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
6059 size = MEM_SIZE (x);
6060 return size && INTVAL (size) == GET_MODE_SIZE (mode);
6063 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
6064 source of an "ext" instruction or the destination of an "ins"
6065 instruction. OP must be a register operand and the following
6066 conditions must hold:
6068 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
6069 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
6070 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
6072 Also reject lengths equal to a word as they are better handled
6073 by the move patterns. */
6076 mips_use_ins_ext_p (rtx op, rtx size, rtx position)
6078 HOST_WIDE_INT len, pos;
6080 if (!ISA_HAS_EXT_INS
6081 || !register_operand (op, VOIDmode)
6082 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
6085 len = INTVAL (size);
6086 pos = INTVAL (position);
6088 if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
6089 || pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
6095 /* Initialize mips_split_addresses from the associated command-line
6098 mips_split_addresses is a half-way house between explicit
6099 relocations and the traditional assembler macros. It can
6100 split absolute 32-bit symbolic constants into a high/lo_sum
6101 pair but uses macros for other sorts of access.
6103 Like explicit relocation support for REL targets, it relies
6104 on GNU extensions in the assembler and the linker.
6106 Although this code should work for -O0, it has traditionally
6107 been treated as an optimization. */
6110 mips_init_split_addresses (void)
6112 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
6113 && optimize && !flag_pic
6114 && !ABI_HAS_64BIT_SYMBOLS)
6115 mips_split_addresses = 1;
6117 mips_split_addresses = 0;
6120 /* (Re-)Initialize information about relocs. */
6123 mips_init_relocs (void)
6125 memset (mips_split_p, '\0', sizeof (mips_split_p));
6126 memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
6127 memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
6129 if (ABI_HAS_64BIT_SYMBOLS)
6131 if (TARGET_EXPLICIT_RELOCS)
6133 mips_split_p[SYMBOL_64_HIGH] = true;
6134 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
6135 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
6137 mips_split_p[SYMBOL_64_MID] = true;
6138 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
6139 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
6141 mips_split_p[SYMBOL_64_LOW] = true;
6142 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
6143 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
6145 mips_split_p[SYMBOL_ABSOLUTE] = true;
6146 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
6151 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses || TARGET_MIPS16)
6153 mips_split_p[SYMBOL_ABSOLUTE] = true;
6154 mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
6155 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
6157 mips_lo_relocs[SYMBOL_32_HIGH] = "%hi(";
6163 /* The high part is provided by a pseudo copy of $gp. */
6164 mips_split_p[SYMBOL_GP_RELATIVE] = true;
6165 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
6168 if (TARGET_EXPLICIT_RELOCS)
6170 /* Small data constants are kept whole until after reload,
6171 then lowered by mips_rewrite_small_data. */
6172 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
6174 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
6177 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
6178 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
6182 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
6183 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
6188 /* The HIGH and LO_SUM are matched by special .md patterns. */
6189 mips_split_p[SYMBOL_GOT_DISP] = true;
6191 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
6192 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
6193 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
6195 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
6196 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
6197 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
6202 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
6204 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
6205 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
6211 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
6212 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
6213 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
6216 /* Thread-local relocation operators. */
6217 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
6218 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
6219 mips_split_p[SYMBOL_DTPREL] = 1;
6220 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
6221 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
6222 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
6223 mips_split_p[SYMBOL_TPREL] = 1;
6224 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
6225 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
6227 mips_lo_relocs[SYMBOL_HALF] = "%half(";
6230 /* If OP is an UNSPEC address, return the address to which it refers,
6231 otherwise return OP itself. */
6234 mips_strip_unspec_address (rtx op)
6238 split_const (op, &base, &offset);
6239 if (UNSPEC_ADDRESS_P (base))
6240 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
6244 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6245 in context CONTEXT. RELOCS is the array of relocations to use. */
6248 print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
6249 const char **relocs)
6251 enum mips_symbol_type symbol_type;
6254 symbol_type = mips_classify_symbolic_expression (op, context);
6255 if (relocs[symbol_type] == 0)
6256 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
6258 fputs (relocs[symbol_type], file);
6259 output_addr_const (file, mips_strip_unspec_address (op));
6260 for (p = relocs[symbol_type]; *p != 0; p++)
6265 /* Print the text for PRINT_OPERAND punctation character CH to FILE.
6266 The punctuation characters are:
6268 '(' Start a nested ".set noreorder" block.
6269 ')' End a nested ".set noreorder" block.
6270 '[' Start a nested ".set noat" block.
6271 ']' End a nested ".set noat" block.
6272 '<' Start a nested ".set nomacro" block.
6273 '>' End a nested ".set nomacro" block.
6274 '*' Behave like %(%< if generating a delayed-branch sequence.
6275 '#' Print a nop if in a ".set noreorder" block.
6276 '/' Like '#', but do nothing within a delayed-branch sequence.
6277 '?' Print "l" if mips_branch_likely is true
6278 '.' Print the name of the register with a hard-wired zero (zero or $0).
6279 '@' Print the name of the assembler temporary register (at or $1).
6280 '^' Print the name of the pic call-through register (t9 or $25).
6281 '+' Print the name of the gp register (usually gp or $28).
6282 '$' Print the name of the stack pointer register (sp or $29).
6283 '|' Print ".set push; .set mips2" if !ISA_HAS_LL_SC.
6284 '-' Print ".set pop" under the same conditions for '|'.
6286 See also mips_init_print_operand_pucnt. */
6289 mips_print_operand_punctuation (FILE *file, int ch)
6294 if (set_noreorder++ == 0)
6295 fputs (".set\tnoreorder\n\t", file);
6299 gcc_assert (set_noreorder > 0);
6300 if (--set_noreorder == 0)
6301 fputs ("\n\t.set\treorder", file);
6305 if (set_noat++ == 0)
6306 fputs (".set\tnoat\n\t", file);
6310 gcc_assert (set_noat > 0);
6311 if (--set_noat == 0)
6312 fputs ("\n\t.set\tat", file);
6316 if (set_nomacro++ == 0)
6317 fputs (".set\tnomacro\n\t", file);
6321 gcc_assert (set_nomacro > 0);
6322 if (--set_nomacro == 0)
6323 fputs ("\n\t.set\tmacro", file);
6327 if (final_sequence != 0)
6329 mips_print_operand_punctuation (file, '(');
6330 mips_print_operand_punctuation (file, '<');
6335 if (set_noreorder != 0)
6336 fputs ("\n\tnop", file);
6340 /* Print an extra newline so that the delayed insn is separated
6341 from the following ones. This looks neater and is consistent
6342 with non-nop delayed sequences. */
6343 if (set_noreorder != 0 && final_sequence == 0)
6344 fputs ("\n\tnop\n", file);
6348 if (mips_branch_likely)
6353 fputs (reg_names[GP_REG_FIRST + 0], file);
6357 fputs (reg_names[GP_REG_FIRST + 1], file);
6361 fputs (reg_names[PIC_FUNCTION_ADDR_REGNUM], file);
6365 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
6369 fputs (reg_names[STACK_POINTER_REGNUM], file);
6374 fputs (".set\tpush\n\t.set\tmips2\n\t", file);
6379 fputs ("\n\t.set\tpop", file);
6388 /* Initialize mips_print_operand_punct. */
6391 mips_init_print_operand_punct (void)
6395 for (p = "()[]<>*#/?.@^+$|-"; *p; p++)
6396 mips_print_operand_punct[(unsigned char) *p] = true;
6399 /* PRINT_OPERAND prefix LETTER refers to the integer branch instruction
6400 associated with condition CODE. Print the condition part of the
6404 mips_print_int_branch_condition (FILE *file, enum rtx_code code, int letter)
6418 /* Conveniently, the MIPS names for these conditions are the same
6419 as their RTL equivalents. */
6420 fputs (GET_RTX_NAME (code), file);
6424 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
6429 /* Likewise floating-point branches. */
6432 mips_print_float_branch_condition (FILE *file, enum rtx_code code, int letter)
6437 fputs ("c1f", file);
6441 fputs ("c1t", file);
6445 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
6450 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
6452 'X' Print CONST_INT OP in hexadecimal format.
6453 'x' Print the low 16 bits of CONST_INT OP in hexadecimal format.
6454 'd' Print CONST_INT OP in decimal.
6455 'h' Print the high-part relocation associated with OP, after stripping
6457 'R' Print the low-part relocation associated with OP.
6458 'C' Print the integer branch condition for comparison OP.
6459 'N' Print the inverse of the integer branch condition for comparison OP.
6460 'F' Print the FPU branch condition for comparison OP.
6461 'W' Print the inverse of the FPU branch condition for comparison OP.
6462 'T' Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
6463 'z' for (eq:?I ...), 'n' for (ne:?I ...).
6464 't' Like 'T', but with the EQ/NE cases reversed
6465 'Y' Print mips_fp_conditions[INTVAL (OP)]
6466 'Z' Print OP and a comma for ISA_HAS_8CC, otherwise print nothing.
6467 'q' Print a DSP accumulator register.
6468 'D' Print the second part of a double-word register or memory operand.
6469 'L' Print the low-order register in a double-word register operand.
6470 'M' Print high-order register in a double-word register operand.
6471 'z' Print $0 if OP is zero, otherwise print OP normally. */
6474 print_operand (FILE *file, rtx op, int letter)
6478 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
6480 mips_print_operand_punctuation (file, letter);
6485 code = GET_CODE (op);
6490 if (GET_CODE (op) == CONST_INT)
6491 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
6493 output_operand_lossage ("invalid use of '%%%c'", letter);
6497 if (GET_CODE (op) == CONST_INT)
6498 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff);
6500 output_operand_lossage ("invalid use of '%%%c'", letter);
6504 if (GET_CODE (op) == CONST_INT)
6505 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op));
6507 output_operand_lossage ("invalid use of '%%%c'", letter);
6513 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
6517 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
6521 mips_print_int_branch_condition (file, code, letter);
6525 mips_print_int_branch_condition (file, reverse_condition (code), letter);
6529 mips_print_float_branch_condition (file, code, letter);
6533 mips_print_float_branch_condition (file, reverse_condition (code),
6540 int truth = (code == NE) == (letter == 'T');
6541 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
6546 if (code == CONST_INT && UINTVAL (op) < ARRAY_SIZE (mips_fp_conditions))
6547 fputs (mips_fp_conditions[UINTVAL (op)], file);
6549 output_operand_lossage ("'%%%c' is not a valid operand prefix",
6556 print_operand (file, op, 0);
6562 if (code == REG && MD_REG_P (REGNO (op)))
6563 fprintf (file, "$ac0");
6564 else if (code == REG && DSP_ACC_REG_P (REGNO (op)))
6565 fprintf (file, "$ac%c", reg_names[REGNO (op)][3]);
6567 output_operand_lossage ("invalid use of '%%%c'", letter);
6575 unsigned int regno = REGNO (op);
6576 if ((letter == 'M' && TARGET_LITTLE_ENDIAN)
6577 || (letter == 'L' && TARGET_BIG_ENDIAN)
6580 fprintf (file, "%s", reg_names[regno]);
6586 output_address (plus_constant (XEXP (op, 0), 4));
6588 output_address (XEXP (op, 0));
6592 if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
6593 fputs (reg_names[GP_REG_FIRST], file);
6594 else if (CONST_GP_P (op))
6595 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
6597 output_addr_const (file, mips_strip_unspec_address (op));
6603 /* Output address operand X to FILE. */
6606 print_operand_address (FILE *file, rtx x)
6608 struct mips_address_info addr;
6610 if (mips_classify_address (&addr, x, word_mode, true))
6614 print_operand (file, addr.offset, 0);
6615 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6618 case ADDRESS_LO_SUM:
6619 print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
6621 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6624 case ADDRESS_CONST_INT:
6625 output_addr_const (file, x);
6626 fprintf (file, "(%s)", reg_names[0]);
6629 case ADDRESS_SYMBOLIC:
6630 output_addr_const (file, mips_strip_unspec_address (x));
6636 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
6637 FIRST is true if this is the first time handling this decl. */
6640 mips_encode_section_info (tree decl, rtx rtl, int first)
6642 default_encode_section_info (decl, rtl, first);
6644 if (TREE_CODE (decl) == FUNCTION_DECL)
6646 rtx symbol = XEXP (rtl, 0);
6647 tree type = TREE_TYPE (decl);
6649 if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
6650 || mips_far_type_p (type))
6651 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
6655 /* Implement TARGET_SELECT_RTX_SECTION. */
6658 mips_select_rtx_section (enum machine_mode mode, rtx x,
6659 unsigned HOST_WIDE_INT align)
6661 /* ??? Consider using mergeable small data sections. */
6662 if (mips_rtx_constant_in_small_data_p (mode))
6663 return get_named_section (NULL, ".sdata", 0);
6665 return default_elf_select_rtx_section (mode, x, align);
6668 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
6670 The complication here is that, with the combination TARGET_ABICALLS
6671 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
6672 therefore not be included in the read-only part of a DSO. Handle such
6673 cases by selecting a normal data section instead of a read-only one.
6674 The logic apes that in default_function_rodata_section. */
6677 mips_function_rodata_section (tree decl)
6679 if (!TARGET_ABICALLS || TARGET_GPWORD)
6680 return default_function_rodata_section (decl);
6682 if (decl && DECL_SECTION_NAME (decl))
6684 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6685 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
6687 char *rname = ASTRDUP (name);
6689 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
6691 else if (flag_function_sections && flag_data_sections
6692 && strncmp (name, ".text.", 6) == 0)
6694 char *rname = ASTRDUP (name);
6695 memcpy (rname + 1, "data", 4);
6696 return get_section (rname, SECTION_WRITE, decl);
6699 return data_section;
6702 /* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
6703 locally-defined objects go in a small data section. It also controls
6704 the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
6705 mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
6708 mips_in_small_data_p (const_tree decl)
6712 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
6715 /* We don't yet generate small-data references for -mabicalls or
6716 VxWorks RTP code. See the related -G handling in override_options. */
6717 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
6720 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
6724 /* Reject anything that isn't in a known small-data section. */
6725 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6726 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
6729 /* If a symbol is defined externally, the assembler will use the
6730 usual -G rules when deciding how to implement macros. */
6731 if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
6734 else if (TARGET_EMBEDDED_DATA)
6736 /* Don't put constants into the small data section: we want them
6737 to be in ROM rather than RAM. */
6738 if (TREE_CODE (decl) != VAR_DECL)
6741 if (TREE_READONLY (decl)
6742 && !TREE_SIDE_EFFECTS (decl)
6743 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
6747 /* Enforce -mlocal-sdata. */
6748 if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
6751 /* Enforce -mextern-sdata. */
6752 if (!TARGET_EXTERN_SDATA && DECL_P (decl))
6754 if (DECL_EXTERNAL (decl))
6756 if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
6760 size = int_size_in_bytes (TREE_TYPE (decl));
6761 return (size > 0 && size <= mips_section_threshold);
6764 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
6765 anchors for small data: the GP register acts as an anchor in that
6766 case. We also don't want to use them for PC-relative accesses,
6767 where the PC acts as an anchor. */
6770 mips_use_anchors_for_symbol_p (const_rtx symbol)
6772 switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
6774 case SYMBOL_PC_RELATIVE:
6775 case SYMBOL_GP_RELATIVE:
6779 return default_use_anchors_for_symbol_p (symbol);
6783 /* The MIPS debug format wants all automatic variables and arguments
6784 to be in terms of the virtual frame pointer (stack pointer before
6785 any adjustment in the function), while the MIPS 3.0 linker wants
6786 the frame pointer to be the stack pointer after the initial
6787 adjustment. So, we do the adjustment here. The arg pointer (which
6788 is eliminated) points to the virtual frame pointer, while the frame
6789 pointer (which may be eliminated) points to the stack pointer after
6790 the initial adjustments. */
6793 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
6795 rtx offset2 = const0_rtx;
6796 rtx reg = eliminate_constant_term (addr, &offset2);
6799 offset = INTVAL (offset2);
6801 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
6802 || reg == hard_frame_pointer_rtx)
6804 offset -= cfun->machine->frame.total_size;
6805 if (reg == hard_frame_pointer_rtx)
6806 offset += cfun->machine->frame.hard_frame_pointer_offset;
6809 /* sdbout_parms does not want this to crash for unrecognized cases. */
6811 else if (reg != arg_pointer_rtx)
6812 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
6819 /* When using assembler macros, keep track of all of small-data externs
6820 so that mips_file_end can emit the appropriate declarations for them.
6822 In most cases it would be safe (though pointless) to emit .externs
6823 for other symbols too. One exception is when an object is within
6824 the -G limit but declared by the user to be in a section other
6825 than .sbss or .sdata. */
6828 mips_output_external (FILE *file, tree decl, const char *name)
6830 default_elf_asm_output_external (file, decl, name);
6832 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
6833 set in order to avoid putting out names that are never really
6835 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
6837 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
6839 fputs ("\t.extern\t", file);
6840 assemble_name (file, name);
6841 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
6842 int_size_in_bytes (TREE_TYPE (decl)));
6844 else if (TARGET_IRIX
6845 && mips_abi == ABI_32
6846 && TREE_CODE (decl) == FUNCTION_DECL)
6848 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
6849 `.global name .text' directive for every used but
6850 undefined function. If we don't, the linker may perform
6851 an optimization (skipping over the insns that set $gp)
6852 when it is unsafe. */
6853 fputs ("\t.globl ", file);
6854 assemble_name (file, name);
6855 fputs (" .text\n", file);
6860 /* Emit a new filename to a stream. If we are smuggling stabs, try to
6861 put out a MIPS ECOFF file and a stab. */
6864 mips_output_filename (FILE *stream, const char *name)
6867 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
6869 if (write_symbols == DWARF2_DEBUG)
6871 else if (mips_output_filename_first_time)
6873 mips_output_filename_first_time = 0;
6874 num_source_filenames += 1;
6875 current_function_file = name;
6876 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6877 output_quoted_string (stream, name);
6878 putc ('\n', stream);
6881 /* If we are emitting stabs, let dbxout.c handle this (except for
6882 the mips_output_filename_first_time case). */
6883 else if (write_symbols == DBX_DEBUG)
6886 else if (name != current_function_file
6887 && strcmp (name, current_function_file) != 0)
6889 num_source_filenames += 1;
6890 current_function_file = name;
6891 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6892 output_quoted_string (stream, name);
6893 putc ('\n', stream);
6897 /* MIPS implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
6900 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
6905 fputs ("\t.dtprelword\t", file);
6909 fputs ("\t.dtpreldword\t", file);
6915 output_addr_const (file, x);
6916 fputs ("+0x8000", file);
6919 /* Implement TARGET_DWARF_REGISTER_SPAN. */
6922 mips_dwarf_register_span (rtx reg)
6925 enum machine_mode mode;
6927 /* By default, GCC maps increasing register numbers to increasing
6928 memory locations, but paired FPRs are always little-endian,
6929 regardless of the prevailing endianness. */
6930 mode = GET_MODE (reg);
6931 if (FP_REG_P (REGNO (reg))
6932 && TARGET_BIG_ENDIAN
6933 && MAX_FPRS_PER_FMT > 1
6934 && GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
6936 gcc_assert (GET_MODE_SIZE (mode) == UNITS_PER_HWFPVALUE);
6937 high = mips_subword (reg, true);
6938 low = mips_subword (reg, false);
6939 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, high, low));
6945 /* Output an ASCII string, in a space-saving way. PREFIX is the string
6946 that should be written before the opening quote, such as "\t.ascii\t"
6947 for real string data or "\t# " for a comment. */
6950 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
6955 register const unsigned char *string =
6956 (const unsigned char *)string_param;
6958 fprintf (stream, "%s\"", prefix);
6959 for (i = 0; i < len; i++)
6961 register int c = string[i];
6965 if (c == '\\' || c == '\"')
6967 putc ('\\', stream);
6975 fprintf (stream, "\\%03o", c);
6979 if (cur_pos > 72 && i+1 < len)
6982 fprintf (stream, "\"\n%s\"", prefix);
6985 fprintf (stream, "\"\n");
6988 #ifdef BSS_SECTION_ASM_OP
6989 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
6990 in the use of sbss. */
6993 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
6994 unsigned HOST_WIDE_INT size, int align)
6996 extern tree last_assemble_variable_decl;
6998 if (mips_in_small_data_p (decl))
6999 switch_to_section (get_named_section (NULL, ".sbss", 0));
7001 switch_to_section (bss_section);
7002 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
7003 last_assemble_variable_decl = decl;
7004 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
7005 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
7009 /* Emit either a label, .comm, or .lcomm directive. When using assembler
7010 macros, mark the symbol as written so that mips_file_end won't emit an
7011 .extern for it. STREAM is the output file, NAME is the name of the
7012 symbol, INIT_STRING is the string that should be written before the
7013 symbol and FINAL_STRING is the string that should be written after it.
7014 FINAL_STRING is a printf() format that consumes the remaining arguments. */
7017 mips_declare_object (FILE *stream, const char *name, const char *init_string,
7018 const char *final_string, ...)
7022 fputs (init_string, stream);
7023 assemble_name (stream, name);
7024 va_start (ap, final_string);
7025 vfprintf (stream, final_string, ap);
7028 if (!TARGET_EXPLICIT_RELOCS)
7030 tree name_tree = get_identifier (name);
7031 TREE_ASM_WRITTEN (name_tree) = 1;
7035 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
7036 NAME is the name of the object and ALIGN is the required alignment
7037 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
7038 alignment argument. */
7041 mips_declare_common_object (FILE *stream, const char *name,
7042 const char *init_string,
7043 unsigned HOST_WIDE_INT size,
7044 unsigned int align, bool takes_alignment_p)
7046 if (!takes_alignment_p)
7048 size += (align / BITS_PER_UNIT) - 1;
7049 size -= size % (align / BITS_PER_UNIT);
7050 mips_declare_object (stream, name, init_string,
7051 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
7054 mips_declare_object (stream, name, init_string,
7055 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
7056 size, align / BITS_PER_UNIT);
7059 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
7060 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
7063 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
7064 unsigned HOST_WIDE_INT size,
7067 /* If the target wants uninitialized const declarations in
7068 .rdata then don't put them in .comm. */
7069 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
7070 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
7071 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
7073 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
7074 targetm.asm_out.globalize_label (stream, name);
7076 switch_to_section (readonly_data_section);
7077 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
7078 mips_declare_object (stream, name, "",
7079 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
7083 mips_declare_common_object (stream, name, "\n\t.comm\t",
7087 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
7088 extern int size_directive_output;
7090 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
7091 definitions except that it uses mips_declare_object() to emit the label. */
7094 mips_declare_object_name (FILE *stream, const char *name,
7095 tree decl ATTRIBUTE_UNUSED)
7097 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
7098 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
7101 size_directive_output = 0;
7102 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
7106 size_directive_output = 1;
7107 size = int_size_in_bytes (TREE_TYPE (decl));
7108 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7111 mips_declare_object (stream, name, "", ":\n");
7114 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
7117 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
7121 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
7122 if (!flag_inhibit_size_directive
7123 && DECL_SIZE (decl) != 0
7124 && !at_end && top_level
7125 && DECL_INITIAL (decl) == error_mark_node
7126 && !size_directive_output)
7130 size_directive_output = 1;
7131 size = int_size_in_bytes (TREE_TYPE (decl));
7132 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7137 /* Implement TARGET_ASM_FILE_START. */
7140 mips_file_start (void)
7142 default_file_start ();
7146 /* Generate a special section to describe the ABI switches used to
7147 produce the resultant binary. This used to be done by the assembler
7148 setting bits in the ELF header's flags field, but we have run out of
7149 bits. GDB needs this information in order to be able to correctly
7150 debug these binaries. See the function mips_gdbarch_init() in
7151 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
7152 causes unnecessary IRIX 6 ld warnings. */
7153 const char * abi_string = NULL;
7157 case ABI_32: abi_string = "abi32"; break;
7158 case ABI_N32: abi_string = "abiN32"; break;
7159 case ABI_64: abi_string = "abi64"; break;
7160 case ABI_O64: abi_string = "abiO64"; break;
7161 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
7165 /* Note - we use fprintf directly rather than calling switch_to_section
7166 because in this way we can avoid creating an allocated section. We
7167 do not want this section to take up any space in the running
7169 fprintf (asm_out_file, "\t.section .mdebug.%s\n\t.previous\n",
7172 /* There is no ELF header flag to distinguish long32 forms of the
7173 EABI from long64 forms. Emit a special section to help tools
7174 such as GDB. Do the same for o64, which is sometimes used with
7176 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
7177 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n"
7178 "\t.previous\n", TARGET_LONG64 ? 64 : 32);
7180 #ifdef HAVE_AS_GNU_ATTRIBUTE
7181 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
7182 TARGET_HARD_FLOAT_ABI ? (TARGET_DOUBLE_FLOAT ? 1 : 2) : 3);
7186 /* Generate the pseudo ops that System V.4 wants. */
7187 if (TARGET_ABICALLS)
7188 fprintf (asm_out_file, "\t.abicalls\n");
7190 if (flag_verbose_asm)
7191 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
7193 mips_section_threshold, mips_arch_info->name, mips_isa);
7197 /* Make the last instruction frame related and note that it performs
7198 the operation described by FRAME_PATTERN. */
7201 mips_set_frame_expr (rtx frame_pattern)
7205 insn = get_last_insn ();
7206 RTX_FRAME_RELATED_P (insn) = 1;
7207 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7213 /* Return a frame-related rtx that stores REG at MEM.
7214 REG must be a single register. */
7217 mips_frame_set (rtx mem, rtx reg)
7221 /* If we're saving the return address register and the dwarf return
7222 address column differs from the hard register number, adjust the
7223 note reg to refer to the former. */
7224 if (REGNO (reg) == GP_REG_FIRST + 31
7225 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
7226 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
7228 set = gen_rtx_SET (VOIDmode, mem, reg);
7229 RTX_FRAME_RELATED_P (set) = 1;
7234 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
7235 mips16e_s2_s8_regs[X], it must also save the registers in indexes
7236 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
7237 static const unsigned char mips16e_s2_s8_regs[] = {
7238 30, 23, 22, 21, 20, 19, 18
7240 static const unsigned char mips16e_a0_a3_regs[] = {
7244 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
7245 ordered from the uppermost in memory to the lowest in memory. */
7246 static const unsigned char mips16e_save_restore_regs[] = {
7247 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
7250 /* Return the index of the lowest X in the range [0, SIZE) for which
7251 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
7254 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
7259 for (i = 0; i < size; i++)
7260 if (BITSET_P (mask, regs[i]))
7266 /* *MASK_PTR is a mask of general-purpose registers and *NUM_REGS_PTR
7267 is the number of set bits. If *MASK_PTR contains REGS[X] for some X
7268 in [0, SIZE), adjust *MASK_PTR and *NUM_REGS_PTR so that the same
7269 is true for all indexes (X, SIZE). */
7272 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
7273 unsigned int size, unsigned int *num_regs_ptr)
7277 i = mips16e_find_first_register (*mask_ptr, regs, size);
7278 for (i++; i < size; i++)
7279 if (!BITSET_P (*mask_ptr, regs[i]))
7282 *mask_ptr |= 1 << regs[i];
7286 /* Return a simplified form of X using the register values in REG_VALUES.
7287 REG_VALUES[R] is the last value assigned to hard register R, or null
7288 if R has not been modified.
7290 This function is rather limited, but is good enough for our purposes. */
7293 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
7297 x = avoid_constant_pool_reference (x);
7301 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7302 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
7303 x0, GET_MODE (XEXP (x, 0)));
7306 if (ARITHMETIC_P (x))
7308 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7309 x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
7310 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
7314 && reg_values[REGNO (x)]
7315 && !rtx_unstable_p (reg_values[REGNO (x)]))
7316 return reg_values[REGNO (x)];
7321 /* Return true if (set DEST SRC) stores an argument register into its
7322 caller-allocated save slot, storing the number of that argument
7323 register in *REGNO_PTR if so. REG_VALUES is as for
7324 mips16e_collect_propagate_value. */
7327 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
7328 unsigned int *regno_ptr)
7330 unsigned int argno, regno;
7331 HOST_WIDE_INT offset, required_offset;
7334 /* Check that this is a word-mode store. */
7335 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
7338 /* Check that the register being saved is an unmodified argument
7340 regno = REGNO (src);
7341 if (regno < GP_ARG_FIRST || regno > GP_ARG_LAST || reg_values[regno])
7343 argno = regno - GP_ARG_FIRST;
7345 /* Check whether the address is an appropriate stack pointer or
7346 frame pointer access. */
7347 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
7348 mips_split_plus (addr, &base, &offset);
7349 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
7350 if (base == hard_frame_pointer_rtx)
7351 required_offset -= cfun->machine->frame.hard_frame_pointer_offset;
7352 else if (base != stack_pointer_rtx)
7354 if (offset != required_offset)
7361 /* A subroutine of mips_expand_prologue, called only when generating
7362 MIPS16e SAVE instructions. Search the start of the function for any
7363 instructions that save argument registers into their caller-allocated
7364 save slots. Delete such instructions and return a value N such that
7365 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
7366 instructions redundant. */
7369 mips16e_collect_argument_saves (void)
7371 rtx reg_values[FIRST_PSEUDO_REGISTER];
7372 rtx insn, next, set, dest, src;
7373 unsigned int nargs, regno;
7375 push_topmost_sequence ();
7377 memset (reg_values, 0, sizeof (reg_values));
7378 for (insn = get_insns (); insn; insn = next)
7380 next = NEXT_INSN (insn);
7387 set = PATTERN (insn);
7388 if (GET_CODE (set) != SET)
7391 dest = SET_DEST (set);
7392 src = SET_SRC (set);
7393 if (mips16e_collect_argument_save_p (dest, src, reg_values, ®no))
7395 if (!BITSET_P (cfun->machine->frame.mask, regno))
7398 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
7401 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
7402 reg_values[REGNO (dest)]
7403 = mips16e_collect_propagate_value (src, reg_values);
7407 pop_topmost_sequence ();
7412 /* Return a move between register REGNO and memory location SP + OFFSET.
7413 Make the move a load if RESTORE_P, otherwise make it a frame-related
7417 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
7422 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
7423 reg = gen_rtx_REG (SImode, regno);
7425 ? gen_rtx_SET (VOIDmode, reg, mem)
7426 : mips_frame_set (mem, reg));
7429 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
7430 The instruction must:
7432 - Allocate or deallocate SIZE bytes in total; SIZE is known
7435 - Save or restore as many registers in *MASK_PTR as possible.
7436 The instruction saves the first registers at the top of the
7437 allocated area, with the other registers below it.
7439 - Save NARGS argument registers above the allocated area.
7441 (NARGS is always zero if RESTORE_P.)
7443 The SAVE and RESTORE instructions cannot save and restore all general
7444 registers, so there may be some registers left over for the caller to
7445 handle. Destructively modify *MASK_PTR so that it contains the registers
7446 that still need to be saved or restored. The caller can save these
7447 registers in the memory immediately below *OFFSET_PTR, which is a
7448 byte offset from the bottom of the allocated stack area. */
7451 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
7452 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
7456 HOST_WIDE_INT offset, top_offset;
7457 unsigned int i, regno;
7460 gcc_assert (cfun->machine->frame.num_fp == 0);
7462 /* Calculate the number of elements in the PARALLEL. We need one element
7463 for the stack adjustment, one for each argument register save, and one
7464 for each additional register move. */
7466 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7467 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
7470 /* Create the final PARALLEL. */
7471 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
7474 /* Add the stack pointer adjustment. */
7475 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7476 plus_constant (stack_pointer_rtx,
7477 restore_p ? size : -size));
7478 RTX_FRAME_RELATED_P (set) = 1;
7479 XVECEXP (pattern, 0, n++) = set;
7481 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7482 top_offset = restore_p ? size : 0;
7484 /* Save the arguments. */
7485 for (i = 0; i < nargs; i++)
7487 offset = top_offset + i * GET_MODE_SIZE (gpr_mode);
7488 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
7489 XVECEXP (pattern, 0, n++) = set;
7492 /* Then fill in the other register moves. */
7493 offset = top_offset;
7494 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7496 regno = mips16e_save_restore_regs[i];
7497 if (BITSET_P (*mask_ptr, regno))
7499 offset -= UNITS_PER_WORD;
7500 set = mips16e_save_restore_reg (restore_p, offset, regno);
7501 XVECEXP (pattern, 0, n++) = set;
7502 *mask_ptr &= ~(1 << regno);
7506 /* Tell the caller what offset it should use for the remaining registers. */
7507 *offset_ptr = size + (offset - top_offset) + size;
7509 gcc_assert (n == XVECLEN (pattern, 0));
7514 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
7515 pointer. Return true if PATTERN matches the kind of instruction
7516 generated by mips16e_build_save_restore. If INFO is nonnull,
7517 initialize it when returning true. */
7520 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
7521 struct mips16e_save_restore_info *info)
7523 unsigned int i, nargs, mask, extra;
7524 HOST_WIDE_INT top_offset, save_offset, offset;
7525 rtx set, reg, mem, base;
7528 if (!GENERATE_MIPS16E_SAVE_RESTORE)
7531 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7532 top_offset = adjust > 0 ? adjust : 0;
7534 /* Interpret all other members of the PARALLEL. */
7535 save_offset = top_offset - GET_MODE_SIZE (gpr_mode);
7539 for (n = 1; n < XVECLEN (pattern, 0); n++)
7541 /* Check that we have a SET. */
7542 set = XVECEXP (pattern, 0, n);
7543 if (GET_CODE (set) != SET)
7546 /* Check that the SET is a load (if restoring) or a store
7548 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
7552 /* Check that the address is the sum of the stack pointer and a
7553 possibly-zero constant offset. */
7554 mips_split_plus (XEXP (mem, 0), &base, &offset);
7555 if (base != stack_pointer_rtx)
7558 /* Check that SET's other operand is a register. */
7559 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
7563 /* Check for argument saves. */
7564 if (offset == top_offset + nargs * GET_MODE_SIZE (gpr_mode)
7565 && REGNO (reg) == GP_ARG_FIRST + nargs)
7567 else if (offset == save_offset)
7569 while (mips16e_save_restore_regs[i++] != REGNO (reg))
7570 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
7573 mask |= 1 << REGNO (reg);
7574 save_offset -= GET_MODE_SIZE (gpr_mode);
7580 /* Check that the restrictions on register ranges are met. */
7582 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7583 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
7584 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7585 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
7589 /* Make sure that the topmost argument register is not saved twice.
7590 The checks above ensure that the same is then true for the other
7591 argument registers. */
7592 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
7595 /* Pass back information, if requested. */
7598 info->nargs = nargs;
7600 info->size = (adjust > 0 ? adjust : -adjust);
7606 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
7607 for the register range [MIN_REG, MAX_REG]. Return a pointer to
7608 the null terminator. */
7611 mips16e_add_register_range (char *s, unsigned int min_reg,
7612 unsigned int max_reg)
7614 if (min_reg != max_reg)
7615 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
7617 s += sprintf (s, ",%s", reg_names[min_reg]);
7621 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
7622 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
7625 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
7627 static char buffer[300];
7629 struct mips16e_save_restore_info info;
7630 unsigned int i, end;
7633 /* Parse the pattern. */
7634 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
7637 /* Add the mnemonic. */
7638 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
7641 /* Save the arguments. */
7643 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
7644 reg_names[GP_ARG_FIRST + info.nargs - 1]);
7645 else if (info.nargs == 1)
7646 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
7648 /* Emit the amount of stack space to allocate or deallocate. */
7649 s += sprintf (s, "%d", (int) info.size);
7651 /* Save or restore $16. */
7652 if (BITSET_P (info.mask, 16))
7653 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
7655 /* Save or restore $17. */
7656 if (BITSET_P (info.mask, 17))
7657 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
7659 /* Save or restore registers in the range $s2...$s8, which
7660 mips16e_s2_s8_regs lists in decreasing order. Note that this
7661 is a software register range; the hardware registers are not
7662 numbered consecutively. */
7663 end = ARRAY_SIZE (mips16e_s2_s8_regs);
7664 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
7666 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
7667 mips16e_s2_s8_regs[i]);
7669 /* Save or restore registers in the range $a0...$a3. */
7670 end = ARRAY_SIZE (mips16e_a0_a3_regs);
7671 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
7673 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
7674 mips16e_a0_a3_regs[end - 1]);
7676 /* Save or restore $31. */
7677 if (BITSET_P (info.mask, 31))
7678 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
7683 /* Return true if the current function has an insn that implicitly
7687 mips_function_has_gp_insn (void)
7689 /* Don't bother rechecking if we found one last time. */
7690 if (!cfun->machine->has_gp_insn_p)
7694 push_topmost_sequence ();
7695 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7697 && GET_CODE (PATTERN (insn)) != USE
7698 && GET_CODE (PATTERN (insn)) != CLOBBER
7699 && (get_attr_got (insn) != GOT_UNSET
7700 || small_data_pattern (PATTERN (insn), VOIDmode)))
7702 pop_topmost_sequence ();
7704 cfun->machine->has_gp_insn_p = (insn != 0);
7706 return cfun->machine->has_gp_insn_p;
7710 /* Return the register that should be used as the global pointer
7711 within this function. Return 0 if the function doesn't need
7712 a global pointer. */
7715 mips_global_pointer (void)
7719 /* $gp is always available unless we're using a GOT. */
7720 if (!TARGET_USE_GOT)
7721 return GLOBAL_POINTER_REGNUM;
7723 /* We must always provide $gp when it is used implicitly. */
7724 if (!TARGET_EXPLICIT_RELOCS)
7725 return GLOBAL_POINTER_REGNUM;
7727 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
7729 if (current_function_profile)
7730 return GLOBAL_POINTER_REGNUM;
7732 /* If the function has a nonlocal goto, $gp must hold the correct
7733 global pointer for the target function. */
7734 if (current_function_has_nonlocal_goto)
7735 return GLOBAL_POINTER_REGNUM;
7737 /* If the gp is never referenced, there's no need to initialize it.
7738 Note that reload can sometimes introduce constant pool references
7739 into a function that otherwise didn't need them. For example,
7740 suppose we have an instruction like:
7742 (set (reg:DF R1) (float:DF (reg:SI R2)))
7744 If R2 turns out to be constant such as 1, the instruction may have a
7745 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
7746 using this constant if R2 doesn't get allocated to a register.
7748 In cases like these, reload will have added the constant to the pool
7749 but no instruction will yet refer to it. */
7750 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
7751 && !current_function_uses_const_pool
7752 && !mips_function_has_gp_insn ())
7755 /* We need a global pointer, but perhaps we can use a call-clobbered
7756 register instead of $gp. */
7757 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
7758 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7759 if (!df_regs_ever_live_p (regno)
7760 && call_really_used_regs[regno]
7761 && !fixed_regs[regno]
7762 && regno != PIC_FUNCTION_ADDR_REGNUM)
7765 return GLOBAL_POINTER_REGNUM;
7768 /* Return true if the current function returns its value in a floating-point
7769 register in MIPS16 mode. */
7772 mips16_cfun_returns_in_fpr_p (void)
7774 tree return_type = DECL_RESULT (current_function_decl);
7775 return (TARGET_MIPS16
7776 && TARGET_HARD_FLOAT_ABI
7777 && !aggregate_value_p (return_type, current_function_decl)
7778 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
7782 /* Return true if the current function must save REGNO. */
7785 mips_save_reg_p (unsigned int regno)
7787 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
7788 if we have not chosen a call-clobbered substitute. */
7789 if (regno == GLOBAL_POINTER_REGNUM)
7790 return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno;
7792 /* Check call-saved registers. */
7793 if ((current_function_saves_all_registers || df_regs_ever_live_p (regno))
7794 && !call_really_used_regs[regno])
7797 /* Save both registers in an FPR pair if either one is used. This is
7798 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
7799 register to be used without the even register. */
7800 if (FP_REG_P (regno)
7801 && MAX_FPRS_PER_FMT == 2
7802 && df_regs_ever_live_p (regno + 1)
7803 && !call_really_used_regs[regno + 1])
7806 /* We need to save the old frame pointer before setting up a new one. */
7807 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
7810 /* Check for registers that must be saved for FUNCTION_PROFILER. */
7811 if (current_function_profile && MIPS_SAVE_REG_FOR_PROFILING_P (regno))
7814 /* We need to save the incoming return address if it is ever clobbered
7815 within the function, if __builtin_eh_return is being used to set a
7816 different return address, or if a stub is being used to return a
7818 if (regno == GP_REG_FIRST + 31
7819 && (df_regs_ever_live_p (regno)
7820 || current_function_calls_eh_return
7821 || mips16_cfun_returns_in_fpr_p ()))
7827 /* Populate the current function's mips_frame_info structure.
7829 MIPS stack frames look like:
7831 +-------------------------------+
7833 | incoming stack arguments |
7835 +-------------------------------+
7837 | caller-allocated save area |
7838 A | for register arguments |
7840 +-------------------------------+ <-- incoming stack pointer
7842 | callee-allocated save area |
7843 B | for arguments that are |
7844 | split between registers and |
7847 +-------------------------------+ <-- arg_pointer_rtx
7849 C | callee-allocated save area |
7850 | for register varargs |
7852 +-------------------------------+ <-- frame_pointer_rtx + fp_sp_offset
7853 | | + UNITS_PER_HWFPVALUE
7856 +-------------------------------+ <-- frame_pointer_rtx + gp_sp_offset
7857 | | + UNITS_PER_WORD
7860 +-------------------------------+
7862 | local variables | | var_size
7864 +-------------------------------+
7866 | $gp save area | | cprestore_size
7868 P +-------------------------------+ <-- hard_frame_pointer_rtx for
7870 | outgoing stack arguments |
7872 +-------------------------------+
7874 | caller-allocated save area |
7875 | for register arguments |
7877 +-------------------------------+ <-- stack_pointer_rtx
7879 hard_frame_pointer_rtx for
7882 At least two of A, B and C will be empty.
7884 Dynamic stack allocations such as alloca insert data at point P.
7885 They decrease stack_pointer_rtx but leave frame_pointer_rtx and
7886 hard_frame_pointer_rtx unchanged. */
7889 mips_compute_frame_info (void)
7891 struct mips_frame_info *frame;
7892 HOST_WIDE_INT offset, size;
7893 unsigned int regno, i;
7895 frame = &cfun->machine->frame;
7896 memset (frame, 0, sizeof (*frame));
7897 size = get_frame_size ();
7899 cfun->machine->global_pointer = mips_global_pointer ();
7901 /* The first STARTING_FRAME_OFFSET bytes contain the outgoing argument
7902 area and the $gp save slot. This area isn't needed in leaf functions,
7903 but if the target-independent frame size is nonzero, we're committed
7904 to allocating it anyway. */
7905 if (size == 0 && current_function_is_leaf)
7907 /* The MIPS 3.0 linker does not like functions that dynamically
7908 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
7909 looks like we are trying to create a second frame pointer to the
7910 function, so allocate some stack space to make it happy. */
7911 if (current_function_calls_alloca)
7912 frame->args_size = REG_PARM_STACK_SPACE (cfun->decl);
7914 frame->args_size = 0;
7915 frame->cprestore_size = 0;
7919 frame->args_size = current_function_outgoing_args_size;
7920 frame->cprestore_size = STARTING_FRAME_OFFSET - frame->args_size;
7922 offset = frame->args_size + frame->cprestore_size;
7924 /* Move above the local variables. */
7925 frame->var_size = MIPS_STACK_ALIGN (size);
7926 offset += frame->var_size;
7928 /* Find out which GPRs we need to save. */
7929 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7930 if (mips_save_reg_p (regno))
7933 frame->mask |= 1 << (regno - GP_REG_FIRST);
7936 /* If this function calls eh_return, we must also save and restore the
7937 EH data registers. */
7938 if (current_function_calls_eh_return)
7939 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; i++)
7942 frame->mask |= 1 << (EH_RETURN_DATA_REGNO (i) - GP_REG_FIRST);
7945 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
7946 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
7947 save all later registers too. */
7948 if (GENERATE_MIPS16E_SAVE_RESTORE)
7950 mips16e_mask_registers (&frame->mask, mips16e_s2_s8_regs,
7951 ARRAY_SIZE (mips16e_s2_s8_regs), &frame->num_gp);
7952 mips16e_mask_registers (&frame->mask, mips16e_a0_a3_regs,
7953 ARRAY_SIZE (mips16e_a0_a3_regs), &frame->num_gp);
7956 /* Move above the GPR save area. */
7957 if (frame->num_gp > 0)
7959 offset += MIPS_STACK_ALIGN (frame->num_gp * UNITS_PER_WORD);
7960 frame->gp_sp_offset = offset - UNITS_PER_WORD;
7963 /* Find out which FPRs we need to save. This loop must iterate over
7964 the same space as its companion in mips_for_each_saved_reg. */
7965 if (TARGET_HARD_FLOAT)
7966 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno += MAX_FPRS_PER_FMT)
7967 if (mips_save_reg_p (regno))
7969 frame->num_fp += MAX_FPRS_PER_FMT;
7970 frame->fmask |= ~(~0 << MAX_FPRS_PER_FMT) << (regno - FP_REG_FIRST);
7973 /* Move above the FPR save area. */
7974 if (frame->num_fp > 0)
7976 offset += MIPS_STACK_ALIGN (frame->num_fp * UNITS_PER_FPREG);
7977 frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE;
7980 /* Move above the callee-allocated varargs save area. */
7981 offset += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
7982 frame->arg_pointer_offset = offset;
7984 /* Move above the callee-allocated area for pretend stack arguments. */
7985 offset += current_function_pretend_args_size;
7986 frame->total_size = offset;
7988 /* Work out the offsets of the save areas from the top of the frame. */
7989 if (frame->gp_sp_offset > 0)
7990 frame->gp_save_offset = frame->gp_sp_offset - offset;
7991 if (frame->fp_sp_offset > 0)
7992 frame->fp_save_offset = frame->fp_sp_offset - offset;
7994 /* MIPS16 code offsets the frame pointer by the size of the outgoing
7995 arguments. This tends to increase the chances of using unextended
7996 instructions for local variables and incoming arguments. */
7998 frame->hard_frame_pointer_offset = frame->args_size;
8001 /* Return the style of GP load sequence that is being used for the
8002 current function. */
8004 enum mips_loadgp_style
8005 mips_current_loadgp_style (void)
8007 if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
8013 if (TARGET_ABSOLUTE_ABICALLS)
8014 return LOADGP_ABSOLUTE;
8016 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
8019 /* Implement FRAME_POINTER_REQUIRED. */
8022 mips_frame_pointer_required (void)
8024 /* If the function contains dynamic stack allocations, we need to
8025 use the frame pointer to access the static parts of the frame. */
8026 if (current_function_calls_alloca)
8029 /* In MIPS16 mode, we need a frame pointer for a large frame; otherwise,
8030 reload may be unable to compute the address of a local variable,
8031 since there is no way to add a large constant to the stack pointer
8032 without using a second temporary register. */
8035 mips_compute_frame_info ();
8036 if (!SMALL_OPERAND (cfun->machine->frame.total_size))
8043 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
8044 pointer or argument pointer. TO is either the stack pointer or
8045 hard frame pointer. */
8048 mips_initial_elimination_offset (int from, int to)
8050 HOST_WIDE_INT offset;
8052 mips_compute_frame_info ();
8054 /* Set OFFSET to the offset from the soft frame pointer, which is also
8055 the offset from the end-of-prologue stack pointer. */
8058 case FRAME_POINTER_REGNUM:
8062 case ARG_POINTER_REGNUM:
8063 offset = cfun->machine->frame.arg_pointer_offset;
8070 if (to == HARD_FRAME_POINTER_REGNUM)
8071 offset -= cfun->machine->frame.hard_frame_pointer_offset;
8076 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
8077 value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
8080 mips_extra_live_on_entry (bitmap regs)
8082 if (TARGET_USE_GOT && !TARGET_ABSOLUTE_ABICALLS)
8083 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
8086 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
8087 back to a previous frame. */
8090 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
8095 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
8098 /* Emit code to change the current function's return address to
8099 ADDRESS. SCRATCH is available as a scratch register, if needed.
8100 ADDRESS and SCRATCH are both word-mode GPRs. */
8103 mips_set_return_address (rtx address, rtx scratch)
8107 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
8108 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
8109 cfun->machine->frame.gp_sp_offset);
8111 mips_emit_move (gen_rtx_MEM (GET_MODE (address), slot_address), address);
8114 /* Restore $gp from its save slot. Valid only when using o32 or
8118 mips_restore_gp (void)
8122 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
8124 address = mips_add_offset (pic_offset_table_rtx,
8125 frame_pointer_needed
8126 ? hard_frame_pointer_rtx
8127 : stack_pointer_rtx,
8128 current_function_outgoing_args_size);
8129 slot = gen_rtx_MEM (Pmode, address);
8131 mips_emit_move (pic_offset_table_rtx, slot);
8132 if (!TARGET_EXPLICIT_RELOCS)
8133 emit_insn (gen_blockage ());
8136 /* A function to save or store a register. The first argument is the
8137 register and the second is the stack slot. */
8138 typedef void (*mips_save_restore_fn) (rtx, rtx);
8140 /* Use FN to save or restore register REGNO. MODE is the register's
8141 mode and OFFSET is the offset of its save slot from the current
8145 mips_save_restore_reg (enum machine_mode mode, int regno,
8146 HOST_WIDE_INT offset, mips_save_restore_fn fn)
8150 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
8152 fn (gen_rtx_REG (mode, regno), mem);
8156 /* Call FN for each register that is saved by the current function.
8157 SP_OFFSET is the offset of the current stack pointer from the start
8161 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
8163 enum machine_mode fpr_mode;
8164 HOST_WIDE_INT offset;
8167 /* Save registers starting from high to low. The debuggers prefer at least
8168 the return register be stored at func+4, and also it allows us not to
8169 need a nop in the epilogue if at least one register is reloaded in
8170 addition to return address. */
8171 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
8172 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
8173 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
8175 mips_save_restore_reg (gpr_mode, regno, offset, fn);
8176 offset -= GET_MODE_SIZE (gpr_mode);
8179 /* This loop must iterate over the same space as its companion in
8180 mips_compute_frame_info. */
8181 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
8182 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
8183 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
8184 regno >= FP_REG_FIRST;
8185 regno -= MAX_FPRS_PER_FMT)
8186 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
8188 mips_save_restore_reg (fpr_mode, regno, offset, fn);
8189 offset -= GET_MODE_SIZE (fpr_mode);
8193 /* If we're generating n32 or n64 abicalls, and the current function
8194 does not use $28 as its global pointer, emit a cplocal directive.
8195 Use pic_offset_table_rtx as the argument to the directive. */
8198 mips_output_cplocal (void)
8200 if (!TARGET_EXPLICIT_RELOCS
8201 && cfun->machine->global_pointer > 0
8202 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
8203 output_asm_insn (".cplocal %+", 0);
8206 /* Set up the stack and frame (if desired) for the function. */
8209 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8212 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
8214 #ifdef SDB_DEBUGGING_INFO
8215 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
8216 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
8219 /* In mips16 mode, we may need to generate a 32 bit to handle
8220 floating point arguments. The linker will arrange for any 32-bit
8221 functions to call this stub, which will then jump to the 16-bit
8224 && TARGET_HARD_FLOAT_ABI
8225 && current_function_args_info.fp_code != 0)
8226 build_mips16_function_stub (file);
8228 /* Select the mips16 mode for this function. */
8230 fprintf (file, "\t.set\tmips16\n");
8232 fprintf (file, "\t.set\tnomips16\n");
8234 if (!FUNCTION_NAME_ALREADY_DECLARED)
8236 /* Get the function name the same way that toplev.c does before calling
8237 assemble_start_function. This is needed so that the name used here
8238 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8239 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8241 if (!flag_inhibit_size_directive)
8243 fputs ("\t.ent\t", file);
8244 assemble_name (file, fnname);
8248 assemble_name (file, fnname);
8249 fputs (":\n", file);
8252 /* Stop mips_file_end from treating this function as external. */
8253 if (TARGET_IRIX && mips_abi == ABI_32)
8254 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
8256 if (!flag_inhibit_size_directive)
8258 /* .frame FRAMEREG, FRAMESIZE, RETREG */
8260 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
8261 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
8262 ", args= " HOST_WIDE_INT_PRINT_DEC
8263 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
8264 (reg_names[(frame_pointer_needed)
8265 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
8266 (frame_pointer_needed
8267 ? tsize - cfun->machine->frame.hard_frame_pointer_offset
8269 reg_names[GP_REG_FIRST + 31],
8270 cfun->machine->frame.var_size,
8271 cfun->machine->frame.num_gp,
8272 cfun->machine->frame.num_fp,
8273 cfun->machine->frame.args_size,
8274 cfun->machine->frame.cprestore_size);
8276 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
8277 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
8278 cfun->machine->frame.mask,
8279 cfun->machine->frame.gp_save_offset);
8280 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
8281 cfun->machine->frame.fmask,
8282 cfun->machine->frame.fp_save_offset);
8285 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
8286 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
8289 if (mips_current_loadgp_style () == LOADGP_OLDABI)
8291 /* Handle the initialization of $gp for SVR4 PIC. */
8292 if (!cfun->machine->all_noreorder_p)
8293 output_asm_insn ("%(.cpload\t%^%)", 0);
8295 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
8297 else if (cfun->machine->all_noreorder_p)
8298 output_asm_insn ("%(%<", 0);
8300 /* Tell the assembler which register we're using as the global
8301 pointer. This is needed for thunks, since they can use either
8302 explicit relocs or assembler macros. */
8303 mips_output_cplocal ();
8306 /* Do any necessary cleanup after a function to restore stack, frame,
8309 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
8312 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
8313 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8315 /* Reinstate the normal $gp. */
8316 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
8317 mips_output_cplocal ();
8319 if (cfun->machine->all_noreorder_p)
8321 /* Avoid using %>%) since it adds excess whitespace. */
8322 output_asm_insn (".set\tmacro", 0);
8323 output_asm_insn (".set\treorder", 0);
8324 set_noreorder = set_nomacro = 0;
8327 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
8331 /* Get the function name the same way that toplev.c does before calling
8332 assemble_start_function. This is needed so that the name used here
8333 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8334 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8335 fputs ("\t.end\t", file);
8336 assemble_name (file, fnname);
8341 /* Save register REG to MEM. Make the instruction frame-related. */
8344 mips_save_reg (rtx reg, rtx mem)
8346 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
8350 if (mips_split_64bit_move_p (mem, reg))
8351 mips_split_doubleword_move (mem, reg);
8353 mips_emit_move (mem, reg);
8355 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
8356 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
8357 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
8362 && REGNO (reg) != GP_REG_FIRST + 31
8363 && !M16_REG_P (REGNO (reg)))
8365 /* Save a non-mips16 register by moving it through a temporary.
8366 We don't need to do this for $31 since there's a special
8367 instruction for it. */
8368 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
8369 mips_emit_move (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
8372 mips_emit_move (mem, reg);
8374 mips_set_frame_expr (mips_frame_set (mem, reg));
8378 /* The __gnu_local_gp symbol. */
8380 static GTY(()) rtx mips_gnu_local_gp;
8382 /* If we're generating n32 or n64 abicalls, emit instructions
8383 to set up the global pointer. */
8386 mips_emit_loadgp (void)
8388 rtx addr, offset, incoming_address, base, index;
8390 switch (mips_current_loadgp_style ())
8392 case LOADGP_ABSOLUTE:
8393 if (mips_gnu_local_gp == NULL)
8395 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
8396 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
8398 emit_insn (gen_loadgp_absolute (mips_gnu_local_gp));
8402 addr = XEXP (DECL_RTL (current_function_decl), 0);
8403 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
8404 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
8405 emit_insn (gen_loadgp_newabi (offset, incoming_address));
8406 if (!TARGET_EXPLICIT_RELOCS)
8407 emit_insn (gen_loadgp_blockage ());
8411 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
8412 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
8413 emit_insn (gen_loadgp_rtp (base, index));
8414 if (!TARGET_EXPLICIT_RELOCS)
8415 emit_insn (gen_loadgp_blockage ());
8423 /* Expand the prologue into a bunch of separate insns. */
8426 mips_expand_prologue (void)
8432 if (cfun->machine->global_pointer > 0)
8433 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8435 size = cfun->machine->frame.total_size;
8437 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
8438 bytes beforehand; this is enough to cover the register save area
8439 without going out of range. */
8440 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
8442 HOST_WIDE_INT step1;
8444 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
8446 if (GENERATE_MIPS16E_SAVE_RESTORE)
8448 HOST_WIDE_INT offset;
8449 unsigned int mask, regno;
8451 /* Try to merge argument stores into the save instruction. */
8452 nargs = mips16e_collect_argument_saves ();
8454 /* Build the save instruction. */
8455 mask = cfun->machine->frame.mask;
8456 insn = mips16e_build_save_restore (false, &mask, &offset,
8458 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8461 /* Check if we need to save other registers. */
8462 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8463 if (BITSET_P (mask, regno - GP_REG_FIRST))
8465 offset -= GET_MODE_SIZE (gpr_mode);
8466 mips_save_restore_reg (gpr_mode, regno, offset, mips_save_reg);
8471 insn = gen_add3_insn (stack_pointer_rtx,
8474 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8476 mips_for_each_saved_reg (size, mips_save_reg);
8480 /* Allocate the rest of the frame. */
8483 if (SMALL_OPERAND (-size))
8484 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
8486 GEN_INT (-size)))) = 1;
8489 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
8492 /* There are no instructions to add or subtract registers
8493 from the stack pointer, so use the frame pointer as a
8494 temporary. We should always be using a frame pointer
8495 in this case anyway. */
8496 gcc_assert (frame_pointer_needed);
8497 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8498 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
8499 hard_frame_pointer_rtx,
8500 MIPS_PROLOGUE_TEMP (Pmode)));
8501 mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
8504 emit_insn (gen_sub3_insn (stack_pointer_rtx,
8506 MIPS_PROLOGUE_TEMP (Pmode)));
8508 /* Describe the combined effect of the previous instructions. */
8510 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8511 plus_constant (stack_pointer_rtx, -size)));
8515 /* Set up the frame pointer, if we're using one. */
8516 if (frame_pointer_needed)
8518 HOST_WIDE_INT offset;
8520 offset = cfun->machine->frame.hard_frame_pointer_offset;
8523 insn = mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8524 RTX_FRAME_RELATED_P (insn) = 1;
8526 else if (SMALL_OPERAND (offset))
8528 insn = gen_add3_insn (hard_frame_pointer_rtx,
8529 stack_pointer_rtx, GEN_INT (offset));
8530 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8534 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (offset));
8535 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8536 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
8537 hard_frame_pointer_rtx,
8538 MIPS_PROLOGUE_TEMP (Pmode)));
8540 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
8541 plus_constant (stack_pointer_rtx, offset)));
8545 mips_emit_loadgp ();
8547 /* If generating o32/o64 abicalls, save $gp on the stack. */
8548 if (TARGET_ABICALLS && TARGET_OLDABI && !current_function_is_leaf)
8549 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
8551 /* If we are profiling, make sure no instructions are scheduled before
8552 the call to mcount. */
8554 if (current_function_profile)
8555 emit_insn (gen_blockage ());
8558 /* Emit instructions to restore register REG from slot MEM. */
8561 mips_restore_reg (rtx reg, rtx mem)
8563 /* There's no mips16 instruction to load $31 directly. Load into
8564 $7 instead and adjust the return insn appropriately. */
8565 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
8566 reg = gen_rtx_REG (GET_MODE (reg), 7);
8568 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
8570 /* Can't restore directly; move through a temporary. */
8571 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
8572 mips_emit_move (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
8575 mips_emit_move (reg, mem);
8579 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
8580 if this epilogue precedes a sibling call, false if it is for a normal
8581 "epilogue" pattern. */
8584 mips_expand_epilogue (int sibcall_p)
8586 HOST_WIDE_INT step1, step2;
8589 if (!sibcall_p && mips_can_use_return_insn ())
8591 emit_jump_insn (gen_return ());
8595 /* In mips16 mode, if the return value should go into a floating-point
8596 register, we need to call a helper routine to copy it over. */
8597 if (mips16_cfun_returns_in_fpr_p ())
8606 enum machine_mode return_mode;
8608 return_type = DECL_RESULT (current_function_decl);
8609 return_mode = DECL_MODE (return_type);
8611 name = ACONCAT (("__mips16_ret_",
8612 mips16_call_stub_mode_suffix (return_mode),
8614 id = get_identifier (name);
8615 func = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
8616 retval = gen_rtx_REG (return_mode, GP_RETURN);
8617 call = gen_call_value_internal (retval, func, const0_rtx);
8618 insn = emit_call_insn (call);
8619 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
8622 /* Split the frame into two. STEP1 is the amount of stack we should
8623 deallocate before restoring the registers. STEP2 is the amount we
8624 should deallocate afterwards.
8626 Start off by assuming that no registers need to be restored. */
8627 step1 = cfun->machine->frame.total_size;
8630 /* Work out which register holds the frame address. */
8631 if (!frame_pointer_needed)
8632 base = stack_pointer_rtx;
8635 base = hard_frame_pointer_rtx;
8636 step1 -= cfun->machine->frame.hard_frame_pointer_offset;
8639 /* If we need to restore registers, deallocate as much stack as
8640 possible in the second step without going out of range. */
8641 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
8643 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
8647 /* Set TARGET to BASE + STEP1. */
8653 /* Get an rtx for STEP1 that we can add to BASE. */
8654 adjust = GEN_INT (step1);
8655 if (!SMALL_OPERAND (step1))
8657 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
8658 adjust = MIPS_EPILOGUE_TEMP (Pmode);
8661 /* Normal mode code can copy the result straight into $sp. */
8663 target = stack_pointer_rtx;
8665 emit_insn (gen_add3_insn (target, base, adjust));
8668 /* Copy TARGET into the stack pointer. */
8669 if (target != stack_pointer_rtx)
8670 mips_emit_move (stack_pointer_rtx, target);
8672 /* If we're using addressing macros, $gp is implicitly used by all
8673 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
8675 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
8676 emit_insn (gen_blockage ());
8678 if (GENERATE_MIPS16E_SAVE_RESTORE && cfun->machine->frame.mask != 0)
8680 unsigned int regno, mask;
8681 HOST_WIDE_INT offset;
8684 /* Generate the restore instruction. */
8685 mask = cfun->machine->frame.mask;
8686 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
8688 /* Restore any other registers manually. */
8689 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8690 if (BITSET_P (mask, regno - GP_REG_FIRST))
8692 offset -= GET_MODE_SIZE (gpr_mode);
8693 mips_save_restore_reg (gpr_mode, regno, offset, mips_restore_reg);
8696 /* Restore the remaining registers and deallocate the final bit
8698 emit_insn (restore);
8702 /* Restore the registers. */
8703 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
8706 /* Deallocate the final bit of the frame. */
8708 emit_insn (gen_add3_insn (stack_pointer_rtx,
8713 /* Add in the __builtin_eh_return stack adjustment. We need to
8714 use a temporary in mips16 code. */
8715 if (current_function_calls_eh_return)
8719 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
8720 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
8721 MIPS_EPILOGUE_TEMP (Pmode),
8722 EH_RETURN_STACKADJ_RTX));
8723 mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
8726 emit_insn (gen_add3_insn (stack_pointer_rtx,
8728 EH_RETURN_STACKADJ_RTX));
8733 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
8734 path will restore the return address into $7 rather than $31. */
8736 && !GENERATE_MIPS16E_SAVE_RESTORE
8737 && (cfun->machine->frame.mask & RA_MASK) != 0)
8738 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8739 GP_REG_FIRST + 7)));
8741 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8742 GP_REG_FIRST + 31)));
8746 /* Return nonzero if this function is known to have a null epilogue.
8747 This allows the optimizer to omit jumps to jumps if no stack
8751 mips_can_use_return_insn (void)
8753 if (! reload_completed)
8756 if (current_function_profile)
8759 /* In mips16 mode, a function that returns a floating point value
8760 needs to arrange to copy the return value into the floating point
8762 if (mips16_cfun_returns_in_fpr_p ())
8765 return cfun->machine->frame.total_size == 0;
8768 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
8769 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
8770 they only hold condition code modes, and CCmode is always considered to
8771 be 4 bytes wide. All other registers are word sized. */
8774 mips_hard_regno_nregs (int regno, enum machine_mode mode)
8776 if (ST_REG_P (regno))
8777 return ((GET_MODE_SIZE (mode) + 3) / 4);
8778 else if (! FP_REG_P (regno))
8779 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
8781 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
8784 /* Implement CLASS_MAX_NREGS.
8786 - UNITS_PER_FPREG controls the number of registers needed by FP_REGS.
8788 - ST_REGS are always hold CCmode values, and CCmode values are
8789 considered to be 4 bytes wide.
8791 All other register classes are covered by UNITS_PER_WORD. Note that
8792 this is true even for unions of integer and float registers when the
8793 latter are smaller than the former. The only supported combination
8794 in which case this occurs is -mgp64 -msingle-float, which has 64-bit
8795 words but 32-bit float registers. A word-based calculation is correct
8796 in that case since -msingle-float disallows multi-FPR values. */
8799 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
8800 enum machine_mode mode)
8802 if (class == ST_REGS)
8803 return (GET_MODE_SIZE (mode) + 3) / 4;
8804 else if (class == FP_REGS)
8805 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
8807 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
8810 /* Return true if registers of class CLASS cannot change from mode FROM
8814 mips_cannot_change_mode_class (enum machine_mode from ATTRIBUTE_UNUSED,
8815 enum machine_mode to ATTRIBUTE_UNUSED,
8816 enum reg_class class)
8818 /* There are several problems with changing the modes of values
8819 in floating-point registers:
8821 - When a multi-word value is stored in paired floating-point
8822 registers, the first register always holds the low word.
8823 We therefore can't allow FPRs to change between single-word
8824 and multi-word modes on big-endian targets.
8826 - GCC assumes that each word of a multiword register can be accessed
8827 individually using SUBREGs. This is not true for floating-point
8828 registers if they are bigger than a word.
8830 - Loading a 32-bit value into a 64-bit floating-point register
8831 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
8832 We can't allow FPRs to change from SImode to to a wider mode on
8835 - If the FPU has already interpreted a value in one format, we must
8836 not ask it to treat the value as having a different format.
8838 We therefore only allow changes between 4-byte and smaller integer
8839 values, all of which have the "W" format as far as the FPU is
8841 return (reg_classes_intersect_p (FP_REGS, class)
8842 && (GET_MODE_CLASS (from) != MODE_INT
8843 || GET_MODE_CLASS (to) != MODE_INT
8844 || GET_MODE_SIZE (from) > 4
8845 || GET_MODE_SIZE (to) > 4));
8848 /* Return true if moves in mode MODE can use the FPU's mov.fmt instruction. */
8851 mips_mode_ok_for_mov_fmt_p (enum machine_mode mode)
8856 return TARGET_HARD_FLOAT;
8859 return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT;
8862 return TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT;
8869 /* Implement PREFERRED_RELOAD_CLASS. */
8872 mips_preferred_reload_class (rtx x, enum reg_class class)
8874 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
8877 if (reg_class_subset_p (FP_REGS, class)
8878 && mips_mode_ok_for_mov_fmt_p (GET_MODE (x)))
8881 if (reg_class_subset_p (GR_REGS, class))
8884 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
8890 /* Return a number assessing the cost of moving a register in class
8891 FROM to class TO. The classes are expressed using the enumeration
8892 values such as `GENERAL_REGS'. A value of 2 is the default; other
8893 values are interpreted relative to that.
8895 It is not required that the cost always equal 2 when FROM is the
8896 same as TO; on some machines it is expensive to move between
8897 registers if they are not general registers.
8899 If reload sees an insn consisting of a single `set' between two
8900 hard registers, and if `REGISTER_MOVE_COST' applied to their
8901 classes returns a value of 2, reload does not check to ensure that
8902 the constraints of the insn are met. Setting a cost of other than
8903 2 will allow reload to verify that the constraints are met. You
8904 should do this if the `movM' pattern's constraints do not allow
8907 ??? We make the cost of moving from HI/LO into general
8908 registers the same as for one of moving general registers to
8909 HI/LO for TARGET_MIPS16 in order to prevent allocating a
8910 pseudo to HI/LO. This might hurt optimizations though, it
8911 isn't clear if it is wise. And it might not work in all cases. We
8912 could solve the DImode LO reg problem by using a multiply, just
8913 like reload_{in,out}si. We could solve the SImode/HImode HI reg
8914 problem by using divide instructions. divu puts the remainder in
8915 the HI reg, so doing a divide by -1 will move the value in the HI
8916 reg for all values except -1. We could handle that case by using a
8917 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
8918 a compare/branch to test the input value to see which instruction
8919 we need to use. This gets pretty messy, but it is feasible. */
8922 mips_register_move_cost (enum machine_mode mode,
8923 enum reg_class to, enum reg_class from)
8927 if (reg_class_subset_p (from, GENERAL_REGS)
8928 && reg_class_subset_p (to, GENERAL_REGS))
8930 if (reg_class_subset_p (from, M16_REGS)
8931 || reg_class_subset_p (to, M16_REGS))
8937 else if (reg_class_subset_p (from, GENERAL_REGS))
8939 if (reg_class_subset_p (to, GENERAL_REGS))
8941 if (reg_class_subset_p (to, FP_REGS))
8943 if (reg_class_subset_p (to, ALL_COP_AND_GR_REGS))
8945 if (reg_class_subset_p (to, ACC_REGS))
8948 else if (reg_class_subset_p (to, GENERAL_REGS))
8950 if (reg_class_subset_p (from, FP_REGS))
8952 if (reg_class_subset_p (from, ST_REGS))
8953 /* LUI followed by MOVF. */
8955 if (reg_class_subset_p (from, ALL_COP_AND_GR_REGS))
8957 if (reg_class_subset_p (from, ACC_REGS))
8960 else if (reg_class_subset_p (from, FP_REGS))
8962 if (reg_class_subset_p (to, FP_REGS)
8963 && mips_mode_ok_for_mov_fmt_p (mode))
8965 if (reg_class_subset_p (to, ST_REGS))
8966 /* An expensive sequence. */
8973 /* This function returns the register class required for a secondary
8974 register when copying between one of the registers in CLASS, and X,
8975 using MODE. If IN_P is nonzero, the copy is going from X to the
8976 register, otherwise the register is the source. A return value of
8977 NO_REGS means that no secondary register is required. */
8980 mips_secondary_reload_class (enum reg_class class,
8981 enum machine_mode mode, rtx x, int in_p)
8985 /* If X is a constant that cannot be loaded into $25, it must be loaded
8986 into some other GPR. No other register class allows a direct move. */
8987 if (mips_dangerous_for_la25_p (x))
8988 return reg_class_subset_p (class, LEA_REGS) ? NO_REGS : LEA_REGS;
8990 regno = true_regnum (x);
8993 /* In MIPS16 mode, every move must involve a member of M16_REGS. */
8994 if (!reg_class_subset_p (class, M16_REGS) && !M16_REG_P (regno))
8997 /* We can't really copy to HI or LO at all in MIPS16 mode. */
8998 if (in_p ? reg_classes_intersect_p (class, ACC_REGS) : ACC_REG_P (regno))
9004 /* Copying from accumulator registers to anywhere other than a general
9005 register requires a temporary general register. */
9006 if (reg_class_subset_p (class, ACC_REGS))
9007 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
9008 if (ACC_REG_P (regno))
9009 return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
9011 /* We can only copy a value to a condition code register from a
9012 floating point register, and even then we require a scratch
9013 floating point register. We can only copy a value out of a
9014 condition code register into a general register. */
9015 if (reg_class_subset_p (class, ST_REGS))
9019 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
9021 if (ST_REG_P (regno))
9025 return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
9028 if (reg_class_subset_p (class, FP_REGS))
9031 && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))
9032 /* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use
9033 pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */
9036 if (GP_REG_P (regno) || x == CONST0_RTX (mode))
9037 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
9040 if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (x))
9041 /* We can force the constant to memory and use lwc1
9042 and ldc1. As above, we will use pairs of lwc1s if
9043 ldc1 is not supported. */
9046 if (FP_REG_P (regno) && mips_mode_ok_for_mov_fmt_p (mode))
9047 /* In this case we can use mov.fmt. */
9050 /* Otherwise, we need to reload through an integer register. */
9053 if (FP_REG_P (regno))
9054 return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
9059 /* SImode values are represented as sign-extended to DImode. */
9062 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
9064 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
9071 mips_valid_pointer_mode (enum machine_mode mode)
9073 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9076 /* Target hook for vector_mode_supported_p. */
9079 mips_vector_mode_supported_p (enum machine_mode mode)
9084 return TARGET_PAIRED_SINGLE_FLOAT;
9101 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
9104 mips_scalar_mode_supported_p (enum machine_mode mode)
9106 if (ALL_FIXED_POINT_MODE_P (mode)
9107 && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
9110 return default_scalar_mode_supported_p (mode);
9112 /* This function does three things:
9114 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
9115 - Register the mips16 hardware floating point stubs.
9116 - Register the gofast functions if selected using --enable-gofast. */
9118 #include "config/gofast.h"
9121 mips_init_libfuncs (void)
9123 if (TARGET_FIX_VR4120)
9125 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
9126 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
9129 if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
9131 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
9132 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
9133 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
9134 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
9136 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
9137 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
9138 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
9139 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
9140 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
9141 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
9142 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
9144 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
9145 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
9146 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
9148 if (TARGET_DOUBLE_FLOAT)
9150 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
9151 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
9152 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
9153 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
9155 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
9156 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
9157 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
9158 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
9159 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
9160 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
9161 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
9163 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
9164 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
9166 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
9167 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
9168 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__mips16_floatunsidf");
9172 gofast_maybe_init_libfuncs ();
9175 /* Return the length of INSN. LENGTH is the initial length computed by
9176 attributes in the machine-description file. */
9179 mips_adjust_insn_length (rtx insn, int length)
9181 /* A unconditional jump has an unfilled delay slot if it is not part
9182 of a sequence. A conditional jump normally has a delay slot, but
9183 does not on MIPS16. */
9184 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
9187 /* See how many nops might be needed to avoid hardware hazards. */
9188 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
9189 switch (get_attr_hazard (insn))
9203 /* All MIPS16 instructions are a measly two bytes. */
9211 /* Return an asm sequence to start a noat block and load the address
9212 of a label into $1. */
9215 mips_output_load_label (void)
9217 if (TARGET_EXPLICIT_RELOCS)
9221 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
9224 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
9227 if (ISA_HAS_LOAD_DELAY)
9228 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
9229 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
9233 if (Pmode == DImode)
9234 return "%[dla\t%@,%0";
9236 return "%[la\t%@,%0";
9240 /* Return the assembly code for INSN, which has the operands given by
9241 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
9242 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
9243 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
9244 version of BRANCH_IF_TRUE. */
9247 mips_output_conditional_branch (rtx insn, rtx *operands,
9248 const char *branch_if_true,
9249 const char *branch_if_false)
9251 unsigned int length;
9252 rtx taken, not_taken;
9254 length = get_attr_length (insn);
9257 /* Just a simple conditional branch. */
9258 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
9259 return branch_if_true;
9262 /* Generate a reversed branch around a direct jump. This fallback does
9263 not use branch-likely instructions. */
9264 mips_branch_likely = false;
9265 not_taken = gen_label_rtx ();
9266 taken = operands[1];
9268 /* Generate the reversed branch to NOT_TAKEN. */
9269 operands[1] = not_taken;
9270 output_asm_insn (branch_if_false, operands);
9272 /* If INSN has a delay slot, we must provide delay slots for both the
9273 branch to NOT_TAKEN and the conditional jump. We must also ensure
9274 that INSN's delay slot is executed in the appropriate cases. */
9277 /* This first delay slot will always be executed, so use INSN's
9278 delay slot if is not annulled. */
9279 if (!INSN_ANNULLED_BRANCH_P (insn))
9281 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9282 asm_out_file, optimize, 1, NULL);
9283 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9286 output_asm_insn ("nop", 0);
9287 fprintf (asm_out_file, "\n");
9290 /* Output the unconditional branch to TAKEN. */
9292 output_asm_insn ("j\t%0%/", &taken);
9295 output_asm_insn (mips_output_load_label (), &taken);
9296 output_asm_insn ("jr\t%@%]%/", 0);
9299 /* Now deal with its delay slot; see above. */
9302 /* This delay slot will only be executed if the branch is taken.
9303 Use INSN's delay slot if is annulled. */
9304 if (INSN_ANNULLED_BRANCH_P (insn))
9306 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9307 asm_out_file, optimize, 1, NULL);
9308 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9311 output_asm_insn ("nop", 0);
9312 fprintf (asm_out_file, "\n");
9315 /* Output NOT_TAKEN. */
9316 (*targetm.asm_out.internal_label) (asm_out_file, "L",
9317 CODE_LABEL_NUMBER (not_taken));
9321 /* Return the assembly code for INSN, which branches to OPERANDS[1]
9322 if some ordered condition is true. The condition is given by
9323 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
9324 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
9325 its second is always zero. */
9328 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
9330 const char *branch[2];
9332 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
9333 Make BRANCH[0] branch on the inverse condition. */
9334 switch (GET_CODE (operands[0]))
9336 /* These cases are equivalent to comparisons against zero. */
9338 inverted_p = !inverted_p;
9341 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
9342 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
9345 /* These cases are always true or always false. */
9347 inverted_p = !inverted_p;
9350 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
9351 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
9355 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
9356 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
9359 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
9362 /* Used to output div or ddiv instruction DIVISION, which has the operands
9363 given by OPERANDS. Add in a divide-by-zero check if needed.
9365 When working around R4000 and R4400 errata, we need to make sure that
9366 the division is not immediately followed by a shift[1][2]. We also
9367 need to stop the division from being put into a branch delay slot[3].
9368 The easiest way to avoid both problems is to add a nop after the
9369 division. When a divide-by-zero check is needed, this nop can be
9370 used to fill the branch delay slot.
9372 [1] If a double-word or a variable shift executes immediately
9373 after starting an integer division, the shift may give an
9374 incorrect result. See quotations of errata #16 and #28 from
9375 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9376 in mips.md for details.
9378 [2] A similar bug to [1] exists for all revisions of the
9379 R4000 and the R4400 when run in an MC configuration.
9380 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
9382 "19. In this following sequence:
9384 ddiv (or ddivu or div or divu)
9385 dsll32 (or dsrl32, dsra32)
9387 if an MPT stall occurs, while the divide is slipping the cpu
9388 pipeline, then the following double shift would end up with an
9391 Workaround: The compiler needs to avoid generating any
9392 sequence with divide followed by extended double shift."
9394 This erratum is also present in "MIPS R4400MC Errata, Processor
9395 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
9396 & 3.0" as errata #10 and #4, respectively.
9398 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9399 (also valid for MIPS R4000MC processors):
9401 "52. R4000SC: This bug does not apply for the R4000PC.
9403 There are two flavors of this bug:
9405 1) If the instruction just after divide takes an RF exception
9406 (tlb-refill, tlb-invalid) and gets an instruction cache
9407 miss (both primary and secondary) and the line which is
9408 currently in secondary cache at this index had the first
9409 data word, where the bits 5..2 are set, then R4000 would
9410 get a wrong result for the div.
9415 ------------------- # end-of page. -tlb-refill
9420 ------------------- # end-of page. -tlb-invalid
9423 2) If the divide is in the taken branch delay slot, where the
9424 target takes RF exception and gets an I-cache miss for the
9425 exception vector or where I-cache miss occurs for the
9426 target address, under the above mentioned scenarios, the
9427 div would get wrong results.
9430 j r2 # to next page mapped or unmapped
9431 div r8,r9 # this bug would be there as long
9432 # as there is an ICache miss and
9433 nop # the "data pattern" is present
9436 beq r0, r0, NextPage # to Next page
9440 This bug is present for div, divu, ddiv, and ddivu
9443 Workaround: For item 1), OS could make sure that the next page
9444 after the divide instruction is also mapped. For item 2), the
9445 compiler could make sure that the divide instruction is not in
9446 the branch delay slot."
9448 These processors have PRId values of 0x00004220 and 0x00004300 for
9449 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
9452 mips_output_division (const char *division, rtx *operands)
9457 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
9459 output_asm_insn (s, operands);
9462 if (TARGET_CHECK_ZERO_DIV)
9466 output_asm_insn (s, operands);
9467 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
9469 else if (GENERATE_DIVIDE_TRAPS)
9471 output_asm_insn (s, operands);
9476 output_asm_insn ("%(bne\t%2,%.,1f", operands);
9477 output_asm_insn (s, operands);
9478 s = "break\t7%)\n1:";
9484 /* Return true if INSN is a multiply-add or multiply-subtract
9485 instruction and PREV assigns to the accumulator operand. */
9488 mips_linked_madd_p (rtx prev, rtx insn)
9492 x = single_set (insn);
9498 if (GET_CODE (x) == PLUS
9499 && GET_CODE (XEXP (x, 0)) == MULT
9500 && reg_set_p (XEXP (x, 1), prev))
9503 if (GET_CODE (x) == MINUS
9504 && GET_CODE (XEXP (x, 1)) == MULT
9505 && reg_set_p (XEXP (x, 0), prev))
9511 /* Implements a store data bypass check. We need this because the cprestore
9512 pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
9513 default routine to abort. We just return false for that case. */
9514 /* ??? Should try to give a better result here than assuming false. */
9517 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
9519 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
9522 return ! store_data_bypass_p (out_insn, in_insn);
9525 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
9526 dependencies have no cost, except on the 20Kc where output-dependence
9527 is treated like input-dependence. */
9530 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
9531 rtx dep ATTRIBUTE_UNUSED, int cost)
9533 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
9536 if (REG_NOTE_KIND (link) != 0)
9541 /* Return the number of instructions that can be issued per cycle. */
9544 mips_issue_rate (void)
9548 case PROCESSOR_74KC:
9549 case PROCESSOR_74KF2_1:
9550 case PROCESSOR_74KF1_1:
9551 case PROCESSOR_74KF3_2:
9552 /* The 74k is not strictly quad-issue cpu, but can be seen as one
9553 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
9554 but in reality only a maximum of 3 insns can be issued as the
9555 floating point load/stores also require a slot in the AGEN pipe. */
9558 case PROCESSOR_20KC:
9559 case PROCESSOR_R4130:
9560 case PROCESSOR_R5400:
9561 case PROCESSOR_R5500:
9562 case PROCESSOR_R7000:
9563 case PROCESSOR_R9000:
9567 case PROCESSOR_SB1A:
9568 /* This is actually 4, but we get better performance if we claim 3.
9569 This is partly because of unwanted speculative code motion with the
9570 larger number, and partly because in most common cases we can't
9571 reach the theoretical max of 4. */
9579 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
9580 be as wide as the scheduling freedom in the DFA. */
9583 mips_multipass_dfa_lookahead (void)
9585 /* Can schedule up to 4 of the 6 function units in any one cycle. */
9592 /* Remove the instruction at index LOWER from ready queue READY and
9593 reinsert it in front of the instruction at index HIGHER. LOWER must
9597 mips_promote_ready (rtx *ready, int lower, int higher)
9602 new_head = ready[lower];
9603 for (i = lower; i < higher; i++)
9604 ready[i] = ready[i + 1];
9605 ready[i] = new_head;
9608 /* If the priority of the instruction at POS2 in the ready queue READY
9609 is within LIMIT units of that of the instruction at POS1, swap the
9610 instructions if POS2 is not already less than POS1. */
9613 mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
9616 && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
9620 ready[pos1] = ready[pos2];
9625 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
9626 that may clobber hi or lo. */
9628 static rtx mips_macc_chains_last_hilo;
9630 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
9631 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
9634 mips_macc_chains_record (rtx insn)
9636 if (get_attr_may_clobber_hilo (insn))
9637 mips_macc_chains_last_hilo = insn;
9640 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
9641 has NREADY elements, looking for a multiply-add or multiply-subtract
9642 instruction that is cumulative with mips_macc_chains_last_hilo.
9643 If there is one, promote it ahead of anything else that might
9644 clobber hi or lo. */
9647 mips_macc_chains_reorder (rtx *ready, int nready)
9651 if (mips_macc_chains_last_hilo != 0)
9652 for (i = nready - 1; i >= 0; i--)
9653 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
9655 for (j = nready - 1; j > i; j--)
9656 if (recog_memoized (ready[j]) >= 0
9657 && get_attr_may_clobber_hilo (ready[j]))
9659 mips_promote_ready (ready, i, j);
9666 /* The last instruction to be scheduled. */
9668 static rtx vr4130_last_insn;
9670 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
9671 points to an rtx that is initially an instruction. Nullify the rtx
9672 if the instruction uses the value of register X. */
9675 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
9677 rtx *insn_ptr = data;
9680 && reg_referenced_p (x, PATTERN (*insn_ptr)))
9684 /* Return true if there is true register dependence between vr4130_last_insn
9688 vr4130_true_reg_dependence_p (rtx insn)
9690 note_stores (PATTERN (vr4130_last_insn),
9691 vr4130_true_reg_dependence_p_1, &insn);
9695 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
9696 the ready queue and that INSN2 is the instruction after it, return
9697 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
9698 in which INSN1 and INSN2 can probably issue in parallel, but for
9699 which (INSN2, INSN1) should be less sensitive to instruction
9700 alignment than (INSN1, INSN2). See 4130.md for more details. */
9703 vr4130_swap_insns_p (rtx insn1, rtx insn2)
9705 sd_iterator_def sd_it;
9708 /* Check for the following case:
9710 1) there is some other instruction X with an anti dependence on INSN1;
9711 2) X has a higher priority than INSN2; and
9712 3) X is an arithmetic instruction (and thus has no unit restrictions).
9714 If INSN1 is the last instruction blocking X, it would better to
9715 choose (INSN1, X) over (INSN2, INSN1). */
9716 FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
9717 if (DEP_TYPE (dep) == REG_DEP_ANTI
9718 && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
9719 && recog_memoized (DEP_CON (dep)) >= 0
9720 && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
9723 if (vr4130_last_insn != 0
9724 && recog_memoized (insn1) >= 0
9725 && recog_memoized (insn2) >= 0)
9727 /* See whether INSN1 and INSN2 use different execution units,
9728 or if they are both ALU-type instructions. If so, they can
9729 probably execute in parallel. */
9730 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
9731 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
9732 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
9734 /* If only one of the instructions has a dependence on
9735 vr4130_last_insn, prefer to schedule the other one first. */
9736 bool dep1 = vr4130_true_reg_dependence_p (insn1);
9737 bool dep2 = vr4130_true_reg_dependence_p (insn2);
9741 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
9742 is not an ALU-type instruction and if INSN1 uses the same
9743 execution unit. (Note that if this condition holds, we already
9744 know that INSN2 uses a different execution unit.) */
9745 if (class1 != VR4130_CLASS_ALU
9746 && recog_memoized (vr4130_last_insn) >= 0
9747 && class1 == get_attr_vr4130_class (vr4130_last_insn))
9754 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
9755 queue with at least two instructions. Swap the first two if
9756 vr4130_swap_insns_p says that it could be worthwhile. */
9759 vr4130_reorder (rtx *ready, int nready)
9761 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
9762 mips_promote_ready (ready, nready - 2, nready - 1);
9765 /* Record whether last 74k AGEN instruction was a load or store. */
9767 static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
9769 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
9770 resets to TYPE_UNKNOWN state. */
9773 mips_74k_agen_init (rtx insn)
9775 if (!insn || !NONJUMP_INSN_P (insn))
9776 mips_last_74k_agen_insn = TYPE_UNKNOWN;
9777 else if (USEFUL_INSN_P (insn))
9779 enum attr_type type = get_attr_type (insn);
9780 if (type == TYPE_LOAD || type == TYPE_STORE)
9781 mips_last_74k_agen_insn = type;
9785 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
9786 loads to be grouped together, and multiple stores to be grouped
9787 together. Swap things around in the ready queue to make this happen. */
9790 mips_74k_agen_reorder (rtx *ready, int nready)
9793 int store_pos, load_pos;
9798 for (i = nready - 1; i >= 0; i--)
9800 rtx insn = ready[i];
9801 if (USEFUL_INSN_P (insn))
9802 switch (get_attr_type (insn))
9805 if (store_pos == -1)
9819 if (load_pos == -1 || store_pos == -1)
9822 switch (mips_last_74k_agen_insn)
9825 /* Prefer to schedule loads since they have a higher latency. */
9827 /* Swap loads to the front of the queue. */
9828 mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
9831 /* Swap stores to the front of the queue. */
9832 mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
9839 /* Implement TARGET_SCHED_INIT. */
9842 mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9843 int max_ready ATTRIBUTE_UNUSED)
9845 mips_macc_chains_last_hilo = 0;
9846 vr4130_last_insn = 0;
9847 mips_74k_agen_init (NULL_RTX);
9850 /* Implement TARGET_SCHED_REORDER and TARG_SCHED_REORDER2. */
9853 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9854 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
9856 if (!reload_completed
9859 mips_macc_chains_reorder (ready, *nreadyp);
9860 if (reload_completed
9862 && !TARGET_VR4130_ALIGN
9864 vr4130_reorder (ready, *nreadyp);
9866 mips_74k_agen_reorder (ready, *nreadyp);
9867 return mips_issue_rate ();
9870 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
9873 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9877 mips_74k_agen_init (insn);
9878 switch (GET_CODE (PATTERN (insn)))
9882 /* Don't count USEs and CLOBBERs against the issue rate. */
9887 if (!reload_completed && TUNE_MACC_CHAINS)
9888 mips_macc_chains_record (insn);
9889 vr4130_last_insn = insn;
9895 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
9896 return the first operand of the associated "pref" or "prefx" insn. */
9899 mips_prefetch_cookie (rtx write, rtx locality)
9901 /* store_streamed / load_streamed. */
9902 if (INTVAL (locality) <= 0)
9903 return GEN_INT (INTVAL (write) + 4);
9906 if (INTVAL (locality) <= 2)
9909 /* store_retained / load_retained. */
9910 return GEN_INT (INTVAL (write) + 6);
9913 /* MIPS builtin function support. */
9915 struct builtin_description
9917 /* The code of the main .md file instruction. See mips_builtin_type
9918 for more information. */
9919 enum insn_code icode;
9921 /* The floating-point comparison code to use with ICODE, if any. */
9922 enum mips_fp_condition cond;
9924 /* The name of the builtin function. */
9927 /* Specifies how the function should be expanded. */
9928 enum mips_builtin_type builtin_type;
9930 /* The function's prototype. */
9931 enum mips_function_type function_type;
9933 /* The target flags required for this function. */
9937 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
9938 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
9939 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
9940 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
9941 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
9943 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
9945 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
9946 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
9947 "__builtin_mips_" #INSN "_" #COND "_s", \
9948 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
9949 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
9950 "__builtin_mips_" #INSN "_" #COND "_d", \
9951 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
9953 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
9954 The lower and upper forms require TARGET_FLAGS while the any and all
9955 forms require MASK_MIPS3D. */
9956 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
9957 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9958 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
9959 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
9960 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9961 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
9962 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
9963 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9964 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
9965 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
9966 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9967 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
9968 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
9970 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
9971 require MASK_MIPS3D. */
9972 #define CMP_4S_BUILTINS(INSN, COND) \
9973 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
9974 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
9975 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9977 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
9978 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
9979 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9982 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
9983 instruction requires TARGET_FLAGS. */
9984 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
9985 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9986 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
9987 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9989 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9990 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
9991 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9994 /* Define all the builtins related to c.cond.fmt condition COND. */
9995 #define CMP_BUILTINS(COND) \
9996 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
9997 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
9998 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
9999 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
10000 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
10001 CMP_4S_BUILTINS (c, COND), \
10002 CMP_4S_BUILTINS (cabs, COND)
10004 static const struct builtin_description mips_bdesc[] =
10006 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10007 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10008 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10009 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10010 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
10011 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10012 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10013 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10015 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
10016 MASK_PAIRED_SINGLE_FLOAT),
10017 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10018 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10019 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10020 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10022 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
10023 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
10024 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10025 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
10026 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
10027 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10029 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
10030 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
10031 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10032 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
10033 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
10034 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10036 MIPS_FP_CONDITIONS (CMP_BUILTINS)
10039 /* Builtin functions for the SB-1 processor. */
10041 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
10043 static const struct builtin_description sb1_bdesc[] =
10045 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
10048 /* Builtin functions for DSP ASE. */
10050 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
10051 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
10052 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
10053 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
10054 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
10056 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
10057 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
10058 builtin_description fields. */
10059 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
10060 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
10061 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
10063 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
10064 branch instruction. TARGET_FLAGS is a builtin_description field. */
10065 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
10066 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
10067 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
10069 static const struct builtin_description dsp_bdesc[] =
10071 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10072 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10073 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10074 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10075 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10076 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10077 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10078 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10079 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10080 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10081 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10082 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10083 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10084 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
10085 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
10086 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
10087 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
10088 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
10089 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
10090 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
10091 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
10092 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
10093 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10094 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10095 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10096 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10097 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10098 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10099 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10100 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10101 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
10102 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10103 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10104 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10105 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
10106 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10107 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10108 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10109 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
10110 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
10111 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10112 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
10113 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
10114 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
10115 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10116 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
10117 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
10118 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10119 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10120 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10121 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10122 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10123 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10124 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10125 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10126 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10127 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10128 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10129 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10130 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
10131 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
10132 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_POINTER_SI, MASK_DSP),
10133 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_POINTER_SI, MASK_DSP),
10134 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_POINTER_SI, MASK_DSP),
10135 BPOSGE_BUILTIN (32, MASK_DSP),
10137 /* The following are for the MIPS DSP ASE REV 2. */
10138 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, MASK_DSPR2),
10139 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10140 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10141 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10142 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10143 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
10144 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
10145 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10146 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10147 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10148 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10149 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10150 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10151 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10152 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10153 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10154 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
10155 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
10156 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
10157 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
10158 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
10159 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSPR2),
10160 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10161 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10162 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10163 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10164 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10165 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10166 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10167 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10168 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10169 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10170 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10171 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2)
10174 static const struct builtin_description dsp_32only_bdesc[] =
10176 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10177 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10178 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10179 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10180 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10181 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10182 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10183 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
10184 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
10185 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10186 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10187 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10188 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10189 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10190 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10191 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10192 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10193 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10194 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10195 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
10196 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
10198 /* The following are for the MIPS DSP ASE REV 2. */
10199 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10200 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10201 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
10202 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
10203 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
10204 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
10205 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10206 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, MASK_DSPR2),
10207 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, MASK_DSPR2),
10208 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10209 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10210 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10211 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10212 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10213 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2)
10216 /* This helps provide a mapping from builtin function codes to bdesc
10221 /* The builtin function table that this entry describes. */
10222 const struct builtin_description *bdesc;
10224 /* The number of entries in the builtin function table. */
10227 /* The target processor that supports these builtin functions.
10228 PROCESSOR_MAX means we enable them for all processors. */
10229 enum processor_type proc;
10231 /* If the target has these flags, this builtin function table
10232 will not be supported. */
10233 int unsupported_target_flags;
10236 static const struct bdesc_map bdesc_arrays[] =
10238 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX, 0 },
10239 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1, 0 },
10240 { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX, 0 },
10241 { dsp_32only_bdesc, ARRAY_SIZE (dsp_32only_bdesc), PROCESSOR_MAX,
10245 /* MODE is a vector mode whose elements have type TYPE. Return the type
10246 of the vector itself. */
10249 mips_builtin_vector_type (tree type, enum machine_mode mode)
10251 static tree types[(int) MAX_MACHINE_MODE];
10253 if (types[(int) mode] == NULL_TREE)
10254 types[(int) mode] = build_vector_type_for_mode (type, mode);
10255 return types[(int) mode];
10258 /* Source-level argument types. */
10259 #define MIPS_ATYPE_VOID void_type_node
10260 #define MIPS_ATYPE_INT integer_type_node
10261 #define MIPS_ATYPE_POINTER ptr_type_node
10263 /* Standard mode-based argument types. */
10264 #define MIPS_ATYPE_SI intSI_type_node
10265 #define MIPS_ATYPE_USI unsigned_intSI_type_node
10266 #define MIPS_ATYPE_DI intDI_type_node
10267 #define MIPS_ATYPE_SF float_type_node
10268 #define MIPS_ATYPE_DF double_type_node
10270 /* Vector argument types. */
10271 #define MIPS_ATYPE_V2SF mips_builtin_vector_type (float_type_node, V2SFmode)
10272 #define MIPS_ATYPE_V2HI mips_builtin_vector_type (intHI_type_node, V2HImode)
10273 #define MIPS_ATYPE_V4QI mips_builtin_vector_type (intQI_type_node, V4QImode)
10275 /* MIPS_FTYPE_ATYPESN takes N MIPS_FTYPES-like type codes and lists
10276 their associated MIPS_ATYPEs. */
10277 #define MIPS_FTYPE_ATYPES1(A, B) \
10278 MIPS_ATYPE_##A, MIPS_ATYPE_##B
10280 #define MIPS_FTYPE_ATYPES2(A, B, C) \
10281 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C
10283 #define MIPS_FTYPE_ATYPES3(A, B, C, D) \
10284 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D
10286 #define MIPS_FTYPE_ATYPES4(A, B, C, D, E) \
10287 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D, \
10290 /* Return the function type associated with function prototype TYPE. */
10293 mips_build_function_type (enum mips_function_type type)
10295 static tree types[(int) MIPS_MAX_FTYPE_MAX];
10297 if (types[(int) type] == NULL_TREE)
10300 #define DEF_MIPS_FTYPE(NUM, ARGS) \
10301 case MIPS_FTYPE_NAME##NUM ARGS: \
10302 types[(int) type] \
10303 = build_function_type_list (MIPS_FTYPE_ATYPES##NUM ARGS, \
10306 #include "config/mips/mips-ftypes.def"
10307 #undef DEF_MIPS_FTYPE
10309 gcc_unreachable ();
10312 return types[(int) type];
10315 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
10318 mips_init_builtins (void)
10320 const struct builtin_description *d;
10321 const struct bdesc_map *m;
10322 unsigned int offset;
10324 /* Iterate through all of the bdesc arrays, initializing all of the
10325 builtin functions. */
10328 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
10330 if ((m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
10331 && (m->unsupported_target_flags & target_flags) == 0)
10332 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
10333 if ((d->target_flags & target_flags) == d->target_flags)
10334 add_builtin_function (d->name,
10335 mips_build_function_type (d->function_type),
10336 d - m->bdesc + offset,
10337 BUILT_IN_MD, NULL, NULL);
10342 /* Take the argument ARGNUM of the arglist of EXP and convert it into a form
10343 suitable for input operand OP of instruction ICODE. Return the value. */
10346 mips_prepare_builtin_arg (enum insn_code icode,
10347 unsigned int op, tree exp, unsigned int argnum)
10350 enum machine_mode mode;
10352 value = expand_normal (CALL_EXPR_ARG (exp, argnum));
10353 mode = insn_data[icode].operand[op].mode;
10354 if (!insn_data[icode].operand[op].predicate (value, mode))
10356 value = copy_to_mode_reg (mode, value);
10357 /* Check the predicate again. */
10358 if (!insn_data[icode].operand[op].predicate (value, mode))
10360 error ("invalid argument to builtin function");
10368 /* Return an rtx suitable for output operand OP of instruction ICODE.
10369 If TARGET is non-null, try to use it where possible. */
10372 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
10374 enum machine_mode mode;
10376 mode = insn_data[icode].operand[op].mode;
10377 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
10378 target = gen_reg_rtx (mode);
10383 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
10384 .md pattern and CALL is the function expr with arguments. TARGET,
10385 if nonnull, suggests a good place to put the result.
10386 HAS_TARGET indicates the function must return something. */
10389 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
10392 rtx ops[MAX_RECOG_OPERANDS];
10398 /* We save target to ops[0]. */
10399 ops[0] = mips_prepare_builtin_target (icode, 0, target);
10403 /* We need to test if the arglist is not zero. Some instructions have extra
10404 clobber registers. */
10405 for (; i < insn_data[icode].n_operands && i <= call_expr_nargs (exp); i++, j++)
10406 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
10411 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
10415 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
10419 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
10423 gcc_unreachable ();
10428 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
10429 function (TYPE says which). EXP is the tree for the function
10430 function, ICODE is the instruction that should be used to compare
10431 the first two arguments, and COND is the condition it should test.
10432 TARGET, if nonnull, suggests a good place to put the result. */
10435 mips_expand_builtin_movtf (enum mips_builtin_type type,
10436 enum insn_code icode, enum mips_fp_condition cond,
10437 rtx target, tree exp)
10439 rtx cmp_result, op0, op1;
10441 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10442 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
10443 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
10444 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
10446 icode = CODE_FOR_mips_cond_move_tf_ps;
10447 target = mips_prepare_builtin_target (icode, 0, target);
10448 if (type == MIPS_BUILTIN_MOVT)
10450 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
10451 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
10455 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
10456 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
10458 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
10462 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
10463 into TARGET otherwise. Return TARGET. */
10466 mips_builtin_branch_and_move (rtx condition, rtx target,
10467 rtx value_if_true, rtx value_if_false)
10469 rtx true_label, done_label;
10471 true_label = gen_label_rtx ();
10472 done_label = gen_label_rtx ();
10474 /* First assume that CONDITION is false. */
10475 mips_emit_move (target, value_if_false);
10477 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
10478 emit_jump_insn (gen_condjump (condition, true_label));
10479 emit_jump_insn (gen_jump (done_label));
10482 /* Fix TARGET if CONDITION is true. */
10483 emit_label (true_label);
10484 mips_emit_move (target, value_if_true);
10486 emit_label (done_label);
10490 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
10491 of the comparison instruction and COND is the condition it should test.
10492 EXP is the function call and arguments and TARGET, if nonnull,
10493 suggests a good place to put the boolean result. */
10496 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
10497 enum insn_code icode, enum mips_fp_condition cond,
10498 rtx target, tree exp)
10500 rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
10504 if (target == 0 || GET_MODE (target) != SImode)
10505 target = gen_reg_rtx (SImode);
10507 /* Prepare the operands to the comparison. */
10508 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10509 for (i = 1; i < insn_data[icode].n_operands - 1; i++, j++)
10510 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
10512 switch (insn_data[icode].n_operands)
10515 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond)));
10519 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2],
10520 ops[3], ops[4], GEN_INT (cond)));
10524 gcc_unreachable ();
10527 /* If the comparison sets more than one register, we define the result
10528 to be 0 if all registers are false and -1 if all registers are true.
10529 The value of the complete result is indeterminate otherwise. */
10530 switch (builtin_type)
10532 case MIPS_BUILTIN_CMP_ALL:
10533 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
10534 return mips_builtin_branch_and_move (condition, target,
10535 const0_rtx, const1_rtx);
10537 case MIPS_BUILTIN_CMP_UPPER:
10538 case MIPS_BUILTIN_CMP_LOWER:
10539 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
10540 condition = gen_single_cc (cmp_result, offset);
10541 return mips_builtin_branch_and_move (condition, target,
10542 const1_rtx, const0_rtx);
10545 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
10546 return mips_builtin_branch_and_move (condition, target,
10547 const1_rtx, const0_rtx);
10551 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
10552 suggests a good place to put the boolean result. */
10555 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
10557 rtx condition, cmp_result;
10560 if (target == 0 || GET_MODE (target) != SImode)
10561 target = gen_reg_rtx (SImode);
10563 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
10565 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
10570 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
10571 return mips_builtin_branch_and_move (condition, target,
10572 const1_rtx, const0_rtx);
10575 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
10578 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10579 enum machine_mode mode ATTRIBUTE_UNUSED,
10580 int ignore ATTRIBUTE_UNUSED)
10582 enum insn_code icode;
10583 enum mips_builtin_type type;
10585 unsigned int fcode;
10586 const struct builtin_description *bdesc;
10587 const struct bdesc_map *m;
10589 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10590 fcode = DECL_FUNCTION_CODE (fndecl);
10594 error ("built-in function %qs not supported for MIPS16",
10595 IDENTIFIER_POINTER (DECL_NAME (fndecl)));
10600 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
10602 if (fcode < m->size)
10605 icode = bdesc[fcode].icode;
10606 type = bdesc[fcode].builtin_type;
10616 case MIPS_BUILTIN_DIRECT:
10617 return mips_expand_builtin_direct (icode, target, exp, true);
10619 case MIPS_BUILTIN_DIRECT_NO_TARGET:
10620 return mips_expand_builtin_direct (icode, target, exp, false);
10622 case MIPS_BUILTIN_MOVT:
10623 case MIPS_BUILTIN_MOVF:
10624 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
10627 case MIPS_BUILTIN_CMP_ANY:
10628 case MIPS_BUILTIN_CMP_ALL:
10629 case MIPS_BUILTIN_CMP_UPPER:
10630 case MIPS_BUILTIN_CMP_LOWER:
10631 case MIPS_BUILTIN_CMP_SINGLE:
10632 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
10635 case MIPS_BUILTIN_BPOSGE32:
10636 return mips_expand_builtin_bposge (type, target);
10643 /* An entry in the mips16 constant pool. VALUE is the pool constant,
10644 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
10646 struct mips16_constant {
10647 struct mips16_constant *next;
10650 enum machine_mode mode;
10653 /* Information about an incomplete mips16 constant pool. FIRST is the
10654 first constant, HIGHEST_ADDRESS is the highest address that the first
10655 byte of the pool can have, and INSN_ADDRESS is the current instruction
10658 struct mips16_constant_pool {
10659 struct mips16_constant *first;
10660 int highest_address;
10664 /* Add constant VALUE to POOL and return its label. MODE is the
10665 value's mode (used for CONST_INTs, etc.). */
10668 add_constant (struct mips16_constant_pool *pool,
10669 rtx value, enum machine_mode mode)
10671 struct mips16_constant **p, *c;
10672 bool first_of_size_p;
10674 /* See whether the constant is already in the pool. If so, return the
10675 existing label, otherwise leave P pointing to the place where the
10676 constant should be added.
10678 Keep the pool sorted in increasing order of mode size so that we can
10679 reduce the number of alignments needed. */
10680 first_of_size_p = true;
10681 for (p = &pool->first; *p != 0; p = &(*p)->next)
10683 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
10684 return (*p)->label;
10685 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
10687 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
10688 first_of_size_p = false;
10691 /* In the worst case, the constant needed by the earliest instruction
10692 will end up at the end of the pool. The entire pool must then be
10693 accessible from that instruction.
10695 When adding the first constant, set the pool's highest address to
10696 the address of the first out-of-range byte. Adjust this address
10697 downwards each time a new constant is added. */
10698 if (pool->first == 0)
10699 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
10700 is the address of the instruction with the lowest two bits clear.
10701 The base PC value for ld has the lowest three bits clear. Assume
10702 the worst case here. */
10703 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
10704 pool->highest_address -= GET_MODE_SIZE (mode);
10705 if (first_of_size_p)
10706 /* Take into account the worst possible padding due to alignment. */
10707 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
10709 /* Create a new entry. */
10710 c = (struct mips16_constant *) xmalloc (sizeof *c);
10713 c->label = gen_label_rtx ();
10720 /* Output constant VALUE after instruction INSN and return the last
10721 instruction emitted. MODE is the mode of the constant. */
10724 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
10726 if (SCALAR_INT_MODE_P (mode)
10727 || ALL_SCALAR_FRACT_MODE_P (mode)
10728 || ALL_SCALAR_ACCUM_MODE_P (mode))
10730 rtx size = GEN_INT (GET_MODE_SIZE (mode));
10731 return emit_insn_after (gen_consttable_int (value, size), insn);
10734 if (SCALAR_FLOAT_MODE_P (mode))
10735 return emit_insn_after (gen_consttable_float (value), insn);
10737 if (VECTOR_MODE_P (mode))
10741 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
10742 insn = dump_constants_1 (GET_MODE_INNER (mode),
10743 CONST_VECTOR_ELT (value, i), insn);
10747 gcc_unreachable ();
10751 /* Dump out the constants in CONSTANTS after INSN. */
10754 dump_constants (struct mips16_constant *constants, rtx insn)
10756 struct mips16_constant *c, *next;
10760 for (c = constants; c != NULL; c = next)
10762 /* If necessary, increase the alignment of PC. */
10763 if (align < GET_MODE_SIZE (c->mode))
10765 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
10766 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
10768 align = GET_MODE_SIZE (c->mode);
10770 insn = emit_label_after (c->label, insn);
10771 insn = dump_constants_1 (c->mode, c->value, insn);
10777 emit_barrier_after (insn);
10780 /* Return the length of instruction INSN. */
10783 mips16_insn_length (rtx insn)
10787 rtx body = PATTERN (insn);
10788 if (GET_CODE (body) == ADDR_VEC)
10789 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
10790 if (GET_CODE (body) == ADDR_DIFF_VEC)
10791 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
10793 return get_attr_length (insn);
10796 /* If *X is a symbolic constant that refers to the constant pool, add
10797 the constant to POOL and rewrite *X to use the constant's label. */
10800 mips16_rewrite_pool_constant (struct mips16_constant_pool *pool, rtx *x)
10802 rtx base, offset, label;
10804 split_const (*x, &base, &offset);
10805 if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
10807 label = add_constant (pool, get_pool_constant (base),
10808 get_pool_mode (base));
10809 base = gen_rtx_LABEL_REF (Pmode, label);
10810 *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
10814 /* This structure is used to communicate with mips16_rewrite_pool_refs.
10815 INSN is the instruction we're rewriting and POOL points to the current
10817 struct mips16_rewrite_pool_refs_info {
10819 struct mips16_constant_pool *pool;
10822 /* Rewrite *X so that constant pool references refer to the constant's
10823 label instead. DATA points to a mips16_rewrite_pool_refs_info
10827 mips16_rewrite_pool_refs (rtx *x, void *data)
10829 struct mips16_rewrite_pool_refs_info *info = data;
10831 if (force_to_mem_operand (*x, Pmode))
10833 rtx mem = force_const_mem (GET_MODE (*x), *x);
10834 validate_change (info->insn, x, mem, false);
10839 mips16_rewrite_pool_constant (info->pool, &XEXP (*x, 0));
10843 if (TARGET_MIPS16_TEXT_LOADS)
10844 mips16_rewrite_pool_constant (info->pool, x);
10846 return GET_CODE (*x) == CONST ? -1 : 0;
10849 /* Build MIPS16 constant pools. */
10852 mips16_lay_out_constants (void)
10854 struct mips16_constant_pool pool;
10855 struct mips16_rewrite_pool_refs_info info;
10858 if (!TARGET_MIPS16_PCREL_LOADS)
10862 memset (&pool, 0, sizeof (pool));
10863 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10865 /* Rewrite constant pool references in INSN. */
10870 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &info);
10873 pool.insn_address += mips16_insn_length (insn);
10875 if (pool.first != NULL)
10877 /* If there are no natural barriers between the first user of
10878 the pool and the highest acceptable address, we'll need to
10879 create a new instruction to jump around the constant pool.
10880 In the worst case, this instruction will be 4 bytes long.
10882 If it's too late to do this transformation after INSN,
10883 do it immediately before INSN. */
10884 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
10888 label = gen_label_rtx ();
10890 jump = emit_jump_insn_before (gen_jump (label), insn);
10891 JUMP_LABEL (jump) = label;
10892 LABEL_NUSES (label) = 1;
10893 barrier = emit_barrier_after (jump);
10895 emit_label_after (label, barrier);
10896 pool.insn_address += 4;
10899 /* See whether the constant pool is now out of range of the first
10900 user. If so, output the constants after the previous barrier.
10901 Note that any instructions between BARRIER and INSN (inclusive)
10902 will use negative offsets to refer to the pool. */
10903 if (pool.insn_address > pool.highest_address)
10905 dump_constants (pool.first, barrier);
10909 else if (BARRIER_P (insn))
10913 dump_constants (pool.first, get_last_insn ());
10916 /* A temporary variable used by for_each_rtx callbacks, etc. */
10917 static rtx mips_sim_insn;
10919 /* A structure representing the state of the processor pipeline.
10920 Used by the mips_sim_* family of functions. */
10922 /* The maximum number of instructions that can be issued in a cycle.
10923 (Caches mips_issue_rate.) */
10924 unsigned int issue_rate;
10926 /* The current simulation time. */
10929 /* How many more instructions can be issued in the current cycle. */
10930 unsigned int insns_left;
10932 /* LAST_SET[X].INSN is the last instruction to set register X.
10933 LAST_SET[X].TIME is the time at which that instruction was issued.
10934 INSN is null if no instruction has yet set register X. */
10938 } last_set[FIRST_PSEUDO_REGISTER];
10940 /* The pipeline's current DFA state. */
10944 /* Reset STATE to the initial simulation state. */
10947 mips_sim_reset (struct mips_sim *state)
10950 state->insns_left = state->issue_rate;
10951 memset (&state->last_set, 0, sizeof (state->last_set));
10952 state_reset (state->dfa_state);
10955 /* Initialize STATE before its first use. DFA_STATE points to an
10956 allocated but uninitialized DFA state. */
10959 mips_sim_init (struct mips_sim *state, state_t dfa_state)
10961 state->issue_rate = mips_issue_rate ();
10962 state->dfa_state = dfa_state;
10963 mips_sim_reset (state);
10966 /* Advance STATE by one clock cycle. */
10969 mips_sim_next_cycle (struct mips_sim *state)
10972 state->insns_left = state->issue_rate;
10973 state_transition (state->dfa_state, 0);
10976 /* Advance simulation state STATE until instruction INSN can read
10980 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
10984 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
10985 if (state->last_set[REGNO (reg) + i].insn != 0)
10989 t = state->last_set[REGNO (reg) + i].time;
10990 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
10991 while (state->time < t)
10992 mips_sim_next_cycle (state);
10996 /* A for_each_rtx callback. If *X is a register, advance simulation state
10997 DATA until mips_sim_insn can read the register's value. */
11000 mips_sim_wait_regs_2 (rtx *x, void *data)
11003 mips_sim_wait_reg (data, mips_sim_insn, *x);
11007 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
11010 mips_sim_wait_regs_1 (rtx *x, void *data)
11012 for_each_rtx (x, mips_sim_wait_regs_2, data);
11015 /* Advance simulation state STATE until all of INSN's register
11016 dependencies are satisfied. */
11019 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
11021 mips_sim_insn = insn;
11022 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
11025 /* Advance simulation state STATE until the units required by
11026 instruction INSN are available. */
11029 mips_sim_wait_units (struct mips_sim *state, rtx insn)
11033 tmp_state = alloca (state_size ());
11034 while (state->insns_left == 0
11035 || (memcpy (tmp_state, state->dfa_state, state_size ()),
11036 state_transition (tmp_state, insn) >= 0))
11037 mips_sim_next_cycle (state);
11040 /* Advance simulation state STATE until INSN is ready to issue. */
11043 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
11045 mips_sim_wait_regs (state, insn);
11046 mips_sim_wait_units (state, insn);
11049 /* mips_sim_insn has just set X. Update the LAST_SET array
11050 in simulation state DATA. */
11053 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
11055 struct mips_sim *state;
11060 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
11062 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
11063 state->last_set[REGNO (x) + i].time = state->time;
11067 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
11068 can issue immediately (i.e., that mips_sim_wait_insn has already
11072 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
11074 state_transition (state->dfa_state, insn);
11075 state->insns_left--;
11077 mips_sim_insn = insn;
11078 note_stores (PATTERN (insn), mips_sim_record_set, state);
11081 /* Simulate issuing a NOP in state STATE. */
11084 mips_sim_issue_nop (struct mips_sim *state)
11086 if (state->insns_left == 0)
11087 mips_sim_next_cycle (state);
11088 state->insns_left--;
11091 /* Update simulation state STATE so that it's ready to accept the instruction
11092 after INSN. INSN should be part of the main rtl chain, not a member of a
11096 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
11098 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
11100 mips_sim_issue_nop (state);
11102 switch (GET_CODE (SEQ_BEGIN (insn)))
11106 /* We can't predict the processor state after a call or label. */
11107 mips_sim_reset (state);
11111 /* The delay slots of branch likely instructions are only executed
11112 when the branch is taken. Therefore, if the caller has simulated
11113 the delay slot instruction, STATE does not really reflect the state
11114 of the pipeline for the instruction after the delay slot. Also,
11115 branch likely instructions tend to incur a penalty when not taken,
11116 so there will probably be an extra delay between the branch and
11117 the instruction after the delay slot. */
11118 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
11119 mips_sim_reset (state);
11127 /* The VR4130 pipeline issues aligned pairs of instructions together,
11128 but it stalls the second instruction if it depends on the first.
11129 In order to cut down the amount of logic required, this dependence
11130 check is not based on a full instruction decode. Instead, any non-SPECIAL
11131 instruction is assumed to modify the register specified by bits 20-16
11132 (which is usually the "rt" field).
11134 In beq, beql, bne and bnel instructions, the rt field is actually an
11135 input, so we can end up with a false dependence between the branch
11136 and its delay slot. If this situation occurs in instruction INSN,
11137 try to avoid it by swapping rs and rt. */
11140 vr4130_avoid_branch_rt_conflict (rtx insn)
11144 first = SEQ_BEGIN (insn);
11145 second = SEQ_END (insn);
11147 && NONJUMP_INSN_P (second)
11148 && GET_CODE (PATTERN (first)) == SET
11149 && GET_CODE (SET_DEST (PATTERN (first))) == PC
11150 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
11152 /* Check for the right kind of condition. */
11153 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
11154 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
11155 && REG_P (XEXP (cond, 0))
11156 && REG_P (XEXP (cond, 1))
11157 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
11158 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
11160 /* SECOND mentions the rt register but not the rs register. */
11161 rtx tmp = XEXP (cond, 0);
11162 XEXP (cond, 0) = XEXP (cond, 1);
11163 XEXP (cond, 1) = tmp;
11168 /* Implement -mvr4130-align. Go through each basic block and simulate the
11169 processor pipeline. If we find that a pair of instructions could execute
11170 in parallel, and the first of those instruction is not 8-byte aligned,
11171 insert a nop to make it aligned. */
11174 vr4130_align_insns (void)
11176 struct mips_sim state;
11177 rtx insn, subinsn, last, last2, next;
11182 /* LAST is the last instruction before INSN to have a nonzero length.
11183 LAST2 is the last such instruction before LAST. */
11187 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
11190 mips_sim_init (&state, alloca (state_size ()));
11191 for (insn = get_insns (); insn != 0; insn = next)
11193 unsigned int length;
11195 next = NEXT_INSN (insn);
11197 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
11198 This isn't really related to the alignment pass, but we do it on
11199 the fly to avoid a separate instruction walk. */
11200 vr4130_avoid_branch_rt_conflict (insn);
11202 if (USEFUL_INSN_P (insn))
11203 FOR_EACH_SUBINSN (subinsn, insn)
11205 mips_sim_wait_insn (&state, subinsn);
11207 /* If we want this instruction to issue in parallel with the
11208 previous one, make sure that the previous instruction is
11209 aligned. There are several reasons why this isn't worthwhile
11210 when the second instruction is a call:
11212 - Calls are less likely to be performance critical,
11213 - There's a good chance that the delay slot can execute
11214 in parallel with the call.
11215 - The return address would then be unaligned.
11217 In general, if we're going to insert a nop between instructions
11218 X and Y, it's better to insert it immediately after X. That
11219 way, if the nop makes Y aligned, it will also align any labels
11220 between X and Y. */
11221 if (state.insns_left != state.issue_rate
11222 && !CALL_P (subinsn))
11224 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
11226 /* SUBINSN is the first instruction in INSN and INSN is
11227 aligned. We want to align the previous instruction
11228 instead, so insert a nop between LAST2 and LAST.
11230 Note that LAST could be either a single instruction
11231 or a branch with a delay slot. In the latter case,
11232 LAST, like INSN, is already aligned, but the delay
11233 slot must have some extra delay that stops it from
11234 issuing at the same time as the branch. We therefore
11235 insert a nop before the branch in order to align its
11237 emit_insn_after (gen_nop (), last2);
11240 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
11242 /* SUBINSN is the delay slot of INSN, but INSN is
11243 currently unaligned. Insert a nop between
11244 LAST and INSN to align it. */
11245 emit_insn_after (gen_nop (), last);
11249 mips_sim_issue_insn (&state, subinsn);
11251 mips_sim_finish_insn (&state, insn);
11253 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
11254 length = get_attr_length (insn);
11257 /* If the instruction is an asm statement or multi-instruction
11258 mips.md patern, the length is only an estimate. Insert an
11259 8 byte alignment after it so that the following instructions
11260 can be handled correctly. */
11261 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
11262 && (recog_memoized (insn) < 0 || length >= 8))
11264 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
11265 next = NEXT_INSN (next);
11266 mips_sim_next_cycle (&state);
11269 else if (length & 4)
11270 aligned_p = !aligned_p;
11275 /* See whether INSN is an aligned label. */
11276 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
11282 /* Subroutine of mips_reorg. If there is a hazard between INSN
11283 and a previous instruction, avoid it by inserting nops after
11286 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
11287 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
11288 before using the value of that register. *HILO_DELAY counts the
11289 number of instructions since the last hilo hazard (that is,
11290 the number of instructions since the last mflo or mfhi).
11292 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
11293 for the next instruction.
11295 LO_REG is an rtx for the LO register, used in dependence checking. */
11298 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
11299 rtx *delayed_reg, rtx lo_reg)
11302 int nops, ninsns, hazard_set;
11304 if (!INSN_P (insn))
11307 pattern = PATTERN (insn);
11309 /* Do not put the whole function in .set noreorder if it contains
11310 an asm statement. We don't know whether there will be hazards
11311 between the asm statement and the gcc-generated code. */
11312 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
11313 cfun->machine->all_noreorder_p = false;
11315 /* Ignore zero-length instructions (barriers and the like). */
11316 ninsns = get_attr_length (insn) / 4;
11320 /* Work out how many nops are needed. Note that we only care about
11321 registers that are explicitly mentioned in the instruction's pattern.
11322 It doesn't matter that calls use the argument registers or that they
11323 clobber hi and lo. */
11324 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
11325 nops = 2 - *hilo_delay;
11326 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
11331 /* Insert the nops between this instruction and the previous one.
11332 Each new nop takes us further from the last hilo hazard. */
11333 *hilo_delay += nops;
11335 emit_insn_after (gen_hazard_nop (), after);
11337 /* Set up the state for the next instruction. */
11338 *hilo_delay += ninsns;
11340 if (INSN_CODE (insn) >= 0)
11341 switch (get_attr_hazard (insn))
11351 hazard_set = (int) get_attr_hazard_set (insn);
11352 if (hazard_set == 0)
11353 set = single_set (insn);
11356 gcc_assert (GET_CODE (PATTERN (insn)) == PARALLEL);
11357 set = XVECEXP (PATTERN (insn), 0, hazard_set - 1);
11359 gcc_assert (set && GET_CODE (set) == SET);
11360 *delayed_reg = SET_DEST (set);
11366 /* Go through the instruction stream and insert nops where necessary.
11367 See if the whole function can then be put into .set noreorder &
11371 mips_avoid_hazards (void)
11373 rtx insn, last_insn, lo_reg, delayed_reg;
11376 /* Force all instructions to be split into their final form. */
11377 split_all_insns_noflow ();
11379 /* Recalculate instruction lengths without taking nops into account. */
11380 cfun->machine->ignore_hazard_length_p = true;
11381 shorten_branches (get_insns ());
11383 cfun->machine->all_noreorder_p = true;
11385 /* Profiled functions can't be all noreorder because the profiler
11386 support uses assembler macros. */
11387 if (current_function_profile)
11388 cfun->machine->all_noreorder_p = false;
11390 /* Code compiled with -mfix-vr4120 can't be all noreorder because
11391 we rely on the assembler to work around some errata. */
11392 if (TARGET_FIX_VR4120)
11393 cfun->machine->all_noreorder_p = false;
11395 /* The same is true for -mfix-vr4130 if we might generate mflo or
11396 mfhi instructions. Note that we avoid using mflo and mfhi if
11397 the VR4130 macc and dmacc instructions are available instead;
11398 see the *mfhilo_{si,di}_macc patterns. */
11399 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
11400 cfun->machine->all_noreorder_p = false;
11405 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
11407 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
11410 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
11411 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
11412 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
11413 &hilo_delay, &delayed_reg, lo_reg);
11415 mips_avoid_hazard (last_insn, insn, &hilo_delay,
11416 &delayed_reg, lo_reg);
11423 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
11428 mips16_lay_out_constants ();
11429 if (TARGET_EXPLICIT_RELOCS)
11431 if (mips_flag_delayed_branch)
11432 dbr_schedule (get_insns ());
11433 mips_avoid_hazards ();
11434 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
11435 vr4130_align_insns ();
11439 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
11440 in order to avoid duplicating too much logic from elsewhere. */
11443 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
11444 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
11447 rtx this, temp1, temp2, insn, fnaddr;
11448 bool use_sibcall_p;
11450 /* Pretend to be a post-reload pass while generating rtl. */
11451 reload_completed = 1;
11453 /* Mark the end of the (empty) prologue. */
11454 emit_note (NOTE_INSN_PROLOGUE_END);
11456 /* Determine if we can use a sibcall to call FUNCTION directly. */
11457 fnaddr = XEXP (DECL_RTL (function), 0);
11458 use_sibcall_p = (mips_function_ok_for_sibcall (function, NULL)
11459 && const_call_insn_operand (fnaddr, Pmode));
11461 /* Determine if we need to load FNADDR from the GOT. */
11462 if (!use_sibcall_p)
11463 switch (mips_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA))
11465 case SYMBOL_GOT_PAGE_OFST:
11466 case SYMBOL_GOT_DISP:
11467 /* Pick a global pointer. Use a call-clobbered register if
11468 TARGET_CALL_SAVED_GP. */
11469 cfun->machine->global_pointer =
11470 TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
11471 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
11473 /* Set up the global pointer for n32 or n64 abicalls. */
11474 mips_emit_loadgp ();
11481 /* We need two temporary registers in some cases. */
11482 temp1 = gen_rtx_REG (Pmode, 2);
11483 temp2 = gen_rtx_REG (Pmode, 3);
11485 /* Find out which register contains the "this" pointer. */
11486 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
11487 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
11489 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
11491 /* Add DELTA to THIS. */
11494 rtx offset = GEN_INT (delta);
11495 if (!SMALL_OPERAND (delta))
11497 mips_emit_move (temp1, offset);
11500 emit_insn (gen_add3_insn (this, this, offset));
11503 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
11504 if (vcall_offset != 0)
11508 /* Set TEMP1 to *THIS. */
11509 mips_emit_move (temp1, gen_rtx_MEM (Pmode, this));
11511 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
11512 addr = mips_add_offset (temp2, temp1, vcall_offset);
11514 /* Load the offset and add it to THIS. */
11515 mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
11516 emit_insn (gen_add3_insn (this, this, temp1));
11519 /* Jump to the target function. Use a sibcall if direct jumps are
11520 allowed, otherwise load the address into a register first. */
11523 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
11524 SIBLING_CALL_P (insn) = 1;
11528 /* This is messy. gas treats "la $25,foo" as part of a call
11529 sequence and may allow a global "foo" to be lazily bound.
11530 The general move patterns therefore reject this combination.
11532 In this context, lazy binding would actually be OK
11533 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
11534 TARGET_CALL_SAVED_GP; see mips_load_call_address.
11535 We must therefore load the address via a temporary
11536 register if mips_dangerous_for_la25_p.
11538 If we jump to the temporary register rather than $25, the assembler
11539 can use the move insn to fill the jump's delay slot. */
11540 if (TARGET_USE_PIC_FN_ADDR_REG
11541 && !mips_dangerous_for_la25_p (fnaddr))
11542 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
11543 mips_load_call_address (temp1, fnaddr, true);
11545 if (TARGET_USE_PIC_FN_ADDR_REG
11546 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
11547 mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
11548 emit_jump_insn (gen_indirect_jump (temp1));
11551 /* Run just enough of rest_of_compilation. This sequence was
11552 "borrowed" from alpha.c. */
11553 insn = get_insns ();
11554 insn_locators_alloc ();
11555 split_all_insns_noflow ();
11556 mips16_lay_out_constants ();
11557 shorten_branches (insn);
11558 final_start_function (insn, file, 1);
11559 final (insn, file, 1);
11560 final_end_function ();
11562 /* Clean up the vars set above. Note that final_end_function resets
11563 the global pointer for us. */
11564 reload_completed = 0;
11567 static GTY(()) int was_mips16_p = -1;
11569 /* Set up the target-dependent global state so that it matches the
11570 current function's ISA mode. */
11573 mips_set_mips16_mode (int mips16_p)
11575 if (mips16_p == was_mips16_p)
11578 /* Restore base settings of various flags. */
11579 target_flags = mips_base_target_flags;
11580 flag_delayed_branch = mips_flag_delayed_branch;
11581 flag_schedule_insns = mips_base_schedule_insns;
11582 flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
11583 flag_move_loop_invariants = mips_base_move_loop_invariants;
11584 align_loops = mips_base_align_loops;
11585 align_jumps = mips_base_align_jumps;
11586 align_functions = mips_base_align_functions;
11590 /* Select mips16 instruction set. */
11591 target_flags |= MASK_MIPS16;
11593 /* Don't run the scheduler before reload, since it tends to
11594 increase register pressure. */
11595 flag_schedule_insns = 0;
11597 /* Don't do hot/cold partitioning. The constant layout code expects
11598 the whole function to be in a single section. */
11599 flag_reorder_blocks_and_partition = 0;
11601 /* Don't move loop invariants, because it tends to increase
11602 register pressure. It also introduces an extra move in cases
11603 where the constant is the first operand in a two-operand binary
11604 instruction, or when it forms a register argument to a functon
11606 flag_move_loop_invariants = 0;
11608 /* Silently disable -mexplicit-relocs since it doesn't apply
11609 to mips16 code. Even so, it would overly pedantic to warn
11610 about "-mips16 -mexplicit-relocs", especially given that
11611 we use a %gprel() operator. */
11612 target_flags &= ~MASK_EXPLICIT_RELOCS;
11614 /* Experiments suggest we get the best overall results from using
11615 the range of an unextended lw or sw. Code that makes heavy use
11616 of byte or short accesses can do better with ranges of 0...31
11617 and 0...63 respectively, but most code is sensitive to the range
11618 of lw and sw instead. */
11619 targetm.min_anchor_offset = 0;
11620 targetm.max_anchor_offset = 127;
11622 if (flag_pic || TARGET_ABICALLS)
11623 sorry ("MIPS16 PIC");
11627 /* Reset to select base non-mips16 ISA. */
11628 target_flags &= ~MASK_MIPS16;
11630 /* When using explicit relocs, we call dbr_schedule from within
11632 if (TARGET_EXPLICIT_RELOCS)
11633 flag_delayed_branch = 0;
11635 /* Provide default values for align_* for 64-bit targets. */
11638 if (align_loops == 0)
11640 if (align_jumps == 0)
11642 if (align_functions == 0)
11643 align_functions = 8;
11646 targetm.min_anchor_offset = -32768;
11647 targetm.max_anchor_offset = 32767;
11650 /* (Re)initialize mips target internals for new ISA. */
11651 mips_init_split_addresses ();
11652 mips_init_relocs ();
11654 if (was_mips16_p >= 0)
11655 /* Reinitialize target-dependent state. */
11658 was_mips16_p = TARGET_MIPS16;
11661 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
11662 function should use the MIPS16 ISA and switch modes accordingly. */
11665 mips_set_current_function (tree fndecl)
11667 mips_set_mips16_mode (mips_use_mips16_mode_p (fndecl));
11670 /* Allocate a chunk of memory for per-function machine-dependent data. */
11671 static struct machine_function *
11672 mips_init_machine_status (void)
11674 return ((struct machine_function *)
11675 ggc_alloc_cleared (sizeof (struct machine_function)));
11678 /* Return the processor associated with the given ISA level, or null
11679 if the ISA isn't valid. */
11681 static const struct mips_cpu_info *
11682 mips_cpu_info_from_isa (int isa)
11686 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
11687 if (mips_cpu_info_table[i].isa == isa)
11688 return mips_cpu_info_table + i;
11693 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
11694 with a final "000" replaced by "k". Ignore case.
11696 Note: this function is shared between GCC and GAS. */
11699 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
11701 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
11702 given++, canonical++;
11704 return ((*given == 0 && *canonical == 0)
11705 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
11709 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
11710 CPU name. We've traditionally allowed a lot of variation here.
11712 Note: this function is shared between GCC and GAS. */
11715 mips_matching_cpu_name_p (const char *canonical, const char *given)
11717 /* First see if the name matches exactly, or with a final "000"
11718 turned into "k". */
11719 if (mips_strict_matching_cpu_name_p (canonical, given))
11722 /* If not, try comparing based on numerical designation alone.
11723 See if GIVEN is an unadorned number, or 'r' followed by a number. */
11724 if (TOLOWER (*given) == 'r')
11726 if (!ISDIGIT (*given))
11729 /* Skip over some well-known prefixes in the canonical name,
11730 hoping to find a number there too. */
11731 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
11733 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
11735 else if (TOLOWER (canonical[0]) == 'r')
11738 return mips_strict_matching_cpu_name_p (canonical, given);
11742 /* Return the mips_cpu_info entry for the processor or ISA given
11743 by CPU_STRING. Return null if the string isn't recognized.
11745 A similar function exists in GAS. */
11747 static const struct mips_cpu_info *
11748 mips_parse_cpu (const char *cpu_string)
11753 /* In the past, we allowed upper-case CPU names, but it doesn't
11754 work well with the multilib machinery. */
11755 for (s = cpu_string; *s != 0; s++)
11758 warning (0, "the cpu name must be lower case");
11762 /* 'from-abi' selects the most compatible architecture for the given
11763 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
11764 EABIs, we have to decide whether we're using the 32-bit or 64-bit
11765 version. Look first at the -mgp options, if given, otherwise base
11766 the choice on MASK_64BIT in TARGET_DEFAULT. */
11767 if (strcasecmp (cpu_string, "from-abi") == 0)
11768 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
11769 : ABI_NEEDS_64BIT_REGS ? 3
11770 : (TARGET_64BIT ? 3 : 1));
11772 /* 'default' has traditionally been a no-op. Probably not very useful. */
11773 if (strcasecmp (cpu_string, "default") == 0)
11776 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
11777 if (mips_matching_cpu_name_p (mips_cpu_info_table[i].name, cpu_string))
11778 return mips_cpu_info_table + i;
11784 /* Set up globals to generate code for the ISA or processor
11785 described by INFO. */
11788 mips_set_architecture (const struct mips_cpu_info *info)
11792 mips_arch_info = info;
11793 mips_arch = info->cpu;
11794 mips_isa = info->isa;
11799 /* Likewise for tuning. */
11802 mips_set_tune (const struct mips_cpu_info *info)
11806 mips_tune_info = info;
11807 mips_tune = info->cpu;
11811 /* Implement TARGET_HANDLE_OPTION. */
11814 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
11819 if (strcmp (arg, "32") == 0)
11821 else if (strcmp (arg, "o64") == 0)
11822 mips_abi = ABI_O64;
11823 else if (strcmp (arg, "n32") == 0)
11824 mips_abi = ABI_N32;
11825 else if (strcmp (arg, "64") == 0)
11827 else if (strcmp (arg, "eabi") == 0)
11828 mips_abi = ABI_EABI;
11835 return mips_parse_cpu (arg) != 0;
11838 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
11839 return mips_isa_info != 0;
11841 case OPT_mno_flush_func:
11842 mips_cache_flush_func = NULL;
11845 case OPT_mcode_readable_:
11846 if (strcmp (arg, "yes") == 0)
11847 mips_code_readable = CODE_READABLE_YES;
11848 else if (strcmp (arg, "pcrel") == 0)
11849 mips_code_readable = CODE_READABLE_PCREL;
11850 else if (strcmp (arg, "no") == 0)
11851 mips_code_readable = CODE_READABLE_NO;
11861 /* Set up the threshold for data to go into the small data area, instead
11862 of the normal data area, and detect any conflicts in the switches. */
11865 override_options (void)
11867 int i, start, regno;
11868 enum machine_mode mode;
11870 #ifdef SUBTARGET_OVERRIDE_OPTIONS
11871 SUBTARGET_OVERRIDE_OPTIONS;
11874 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
11876 /* The following code determines the architecture and register size.
11877 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
11878 The GAS and GCC code should be kept in sync as much as possible. */
11880 if (mips_arch_string != 0)
11881 mips_set_architecture (mips_parse_cpu (mips_arch_string));
11883 if (mips_isa_info != 0)
11885 if (mips_arch_info == 0)
11886 mips_set_architecture (mips_isa_info);
11887 else if (mips_arch_info->isa != mips_isa_info->isa)
11888 error ("-%s conflicts with the other architecture options, "
11889 "which specify a %s processor",
11890 mips_isa_info->name,
11891 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
11894 if (mips_arch_info == 0)
11896 #ifdef MIPS_CPU_STRING_DEFAULT
11897 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
11899 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
11903 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
11904 error ("-march=%s is not compatible with the selected ABI",
11905 mips_arch_info->name);
11907 /* Optimize for mips_arch, unless -mtune selects a different processor. */
11908 if (mips_tune_string != 0)
11909 mips_set_tune (mips_parse_cpu (mips_tune_string));
11911 if (mips_tune_info == 0)
11912 mips_set_tune (mips_arch_info);
11914 /* Set cost structure for the processor. */
11916 mips_cost = &mips_rtx_cost_optimize_size;
11918 mips_cost = &mips_rtx_cost_data[mips_tune];
11920 /* If the user hasn't specified a branch cost, use the processor's
11922 if (mips_branch_cost == 0)
11923 mips_branch_cost = mips_cost->branch_cost;
11925 if ((target_flags_explicit & MASK_64BIT) != 0)
11927 /* The user specified the size of the integer registers. Make sure
11928 it agrees with the ABI and ISA. */
11929 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
11930 error ("-mgp64 used with a 32-bit processor");
11931 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
11932 error ("-mgp32 used with a 64-bit ABI");
11933 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
11934 error ("-mgp64 used with a 32-bit ABI");
11938 /* Infer the integer register size from the ABI and processor.
11939 Restrict ourselves to 32-bit registers if that's all the
11940 processor has, or if the ABI cannot handle 64-bit registers. */
11941 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
11942 target_flags &= ~MASK_64BIT;
11944 target_flags |= MASK_64BIT;
11947 if ((target_flags_explicit & MASK_FLOAT64) != 0)
11949 /* Really, -mfp32 and -mfp64 are ornamental options. There's
11950 only one right answer here. */
11951 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
11952 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
11953 else if (!TARGET_64BIT && TARGET_FLOAT64
11954 && !(ISA_HAS_MXHC1 && mips_abi == ABI_32))
11955 error ("-mgp32 and -mfp64 can only be combined if the target"
11956 " supports the mfhc1 and mthc1 instructions");
11957 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
11958 error ("unsupported combination: %s", "-mfp64 -msingle-float");
11962 /* -msingle-float selects 32-bit float registers. Otherwise the
11963 float registers should be the same size as the integer ones. */
11964 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
11965 target_flags |= MASK_FLOAT64;
11967 target_flags &= ~MASK_FLOAT64;
11970 /* End of code shared with GAS. */
11972 if ((target_flags_explicit & MASK_LONG64) == 0)
11974 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
11975 target_flags |= MASK_LONG64;
11977 target_flags &= ~MASK_LONG64;
11980 if (!TARGET_OLDABI)
11981 flag_pcc_struct_return = 0;
11983 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
11985 /* If neither -mbranch-likely nor -mno-branch-likely was given
11986 on the command line, set MASK_BRANCHLIKELY based on the target
11987 architecture and tuning flags. Annulled delay slots are a
11988 size win, so we only consider the processor-specific tuning
11989 for !optimize_size. */
11990 if (ISA_HAS_BRANCHLIKELY
11992 || (mips_tune_info->tune_flags & PTF_AVOID_BRANCHLIKELY) == 0))
11993 target_flags |= MASK_BRANCHLIKELY;
11995 target_flags &= ~MASK_BRANCHLIKELY;
11997 else if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
11998 warning (0, "the %qs architecture does not support branch-likely"
11999 " instructions", mips_arch_info->name);
12001 /* The effect of -mabicalls isn't defined for the EABI. */
12002 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
12004 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
12005 target_flags &= ~MASK_ABICALLS;
12008 /* MIPS16 cannot generate PIC yet. */
12009 if (TARGET_MIPS16 && (flag_pic || TARGET_ABICALLS))
12011 sorry ("MIPS16 PIC");
12012 target_flags &= ~MASK_ABICALLS;
12013 flag_pic = flag_pie = flag_shlib = 0;
12016 if (TARGET_ABICALLS)
12017 /* We need to set flag_pic for executables as well as DSOs
12018 because we may reference symbols that are not defined in
12019 the final executable. (MIPS does not use things like
12020 copy relocs, for example.)
12022 Also, there is a body of code that uses __PIC__ to distinguish
12023 between -mabicalls and -mno-abicalls code. */
12026 /* -mvr4130-align is a "speed over size" optimization: it usually produces
12027 faster code, but at the expense of more nops. Enable it at -O3 and
12029 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
12030 target_flags |= MASK_VR4130_ALIGN;
12032 /* Prefer a call to memcpy over inline code when optimizing for size,
12033 though see MOVE_RATIO in mips.h. */
12034 if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
12035 target_flags |= MASK_MEMCPY;
12037 /* If we have a nonzero small-data limit, check that the -mgpopt
12038 setting is consistent with the other target flags. */
12039 if (mips_section_threshold > 0)
12043 if (!TARGET_MIPS16 && !TARGET_EXPLICIT_RELOCS)
12044 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
12046 TARGET_LOCAL_SDATA = false;
12047 TARGET_EXTERN_SDATA = false;
12051 if (TARGET_VXWORKS_RTP)
12052 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
12054 if (TARGET_ABICALLS)
12055 warning (0, "cannot use small-data accesses for %qs",
12060 #ifdef MIPS_TFMODE_FORMAT
12061 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
12064 /* Make sure that the user didn't turn off paired single support when
12065 MIPS-3D support is requested. */
12066 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
12067 && !TARGET_PAIRED_SINGLE_FLOAT)
12068 error ("-mips3d requires -mpaired-single");
12070 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
12072 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
12074 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
12075 and TARGET_HARD_FLOAT_ABI are both true. */
12076 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI))
12077 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
12079 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
12081 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
12082 error ("-mips3d/-mpaired-single must be used with -mips64");
12084 /* If TARGET_DSPR2, enable MASK_DSP. */
12086 target_flags |= MASK_DSP;
12088 mips_init_print_operand_punct ();
12090 /* Set up array to map GCC register number to debug register number.
12091 Ignore the special purpose register numbers. */
12093 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
12095 mips_dbx_regno[i] = INVALID_REGNUM;
12096 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
12097 mips_dwarf_regno[i] = i;
12099 mips_dwarf_regno[i] = INVALID_REGNUM;
12102 start = GP_DBX_FIRST - GP_REG_FIRST;
12103 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
12104 mips_dbx_regno[i] = i + start;
12106 start = FP_DBX_FIRST - FP_REG_FIRST;
12107 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
12108 mips_dbx_regno[i] = i + start;
12110 /* HI and LO debug registers use big-endian ordering. */
12111 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
12112 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
12113 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
12114 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
12115 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
12117 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
12118 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
12121 /* Set up array giving whether a given register can hold a given mode. */
12123 for (mode = VOIDmode;
12124 mode != MAX_MACHINE_MODE;
12125 mode = (enum machine_mode) ((int)mode + 1))
12127 register int size = GET_MODE_SIZE (mode);
12128 register enum mode_class class = GET_MODE_CLASS (mode);
12130 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
12134 if (mode == CCV2mode)
12135 temp = (ISA_HAS_8CC
12136 && ST_REG_P (regno)
12137 && (regno - ST_REG_FIRST) % 2 == 0);
12139 else if (mode == CCV4mode)
12140 temp = (ISA_HAS_8CC
12141 && ST_REG_P (regno)
12142 && (regno - ST_REG_FIRST) % 4 == 0);
12144 else if (mode == CCmode)
12147 temp = (regno == FPSW_REGNUM);
12149 temp = (ST_REG_P (regno) || GP_REG_P (regno)
12150 || FP_REG_P (regno));
12153 else if (GP_REG_P (regno))
12154 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
12156 else if (FP_REG_P (regno))
12157 temp = ((((regno % MAX_FPRS_PER_FMT) == 0)
12158 || (MIN_FPRS_PER_FMT == 1
12159 && size <= UNITS_PER_FPREG))
12160 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
12161 || class == MODE_VECTOR_FLOAT)
12162 && size <= UNITS_PER_FPVALUE)
12163 /* Allow integer modes that fit into a single
12164 register. We need to put integers into FPRs
12165 when using instructions like cvt and trunc.
12166 We can't allow sizes smaller than a word,
12167 the FPU has no appropriate load/store
12168 instructions for those. */
12169 || (class == MODE_INT
12170 && size >= MIN_UNITS_PER_WORD
12171 && size <= UNITS_PER_FPREG)
12172 /* Allow TFmode for CCmode reloads. */
12173 || (ISA_HAS_8CC && mode == TFmode)));
12175 else if (ACC_REG_P (regno))
12176 temp = ((INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode))
12177 && size <= UNITS_PER_WORD * 2
12178 && (size <= UNITS_PER_WORD
12179 || regno == MD_REG_FIRST
12180 || (DSP_ACC_REG_P (regno)
12181 && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)));
12183 else if (ALL_COP_REG_P (regno))
12184 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
12188 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
12192 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
12193 initialized yet, so we can't use that here. */
12194 gpr_mode = TARGET_64BIT ? DImode : SImode;
12196 /* Function to allocate machine-dependent function status. */
12197 init_machine_status = &mips_init_machine_status;
12199 /* Default to working around R4000 errata only if the processor
12200 was selected explicitly. */
12201 if ((target_flags_explicit & MASK_FIX_R4000) == 0
12202 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
12203 target_flags |= MASK_FIX_R4000;
12205 /* Default to working around R4400 errata only if the processor
12206 was selected explicitly. */
12207 if ((target_flags_explicit & MASK_FIX_R4400) == 0
12208 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
12209 target_flags |= MASK_FIX_R4400;
12211 /* Save base state of options. */
12212 mips_base_mips16 = TARGET_MIPS16;
12213 mips_base_target_flags = target_flags;
12214 mips_flag_delayed_branch = flag_delayed_branch;
12215 mips_base_schedule_insns = flag_schedule_insns;
12216 mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
12217 mips_base_move_loop_invariants = flag_move_loop_invariants;
12218 mips_base_align_loops = align_loops;
12219 mips_base_align_jumps = align_jumps;
12220 mips_base_align_functions = align_functions;
12222 /* Now select the mips16 or 32-bit instruction set, as requested. */
12223 mips_set_mips16_mode (mips_base_mips16);
12226 /* Swap the register information for registers I and I + 1, which
12227 currently have the wrong endianness. Note that the registers'
12228 fixedness and call-clobberedness might have been set on the
12232 mips_swap_registers (unsigned int i)
12237 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
12238 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
12240 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
12241 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
12242 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
12243 SWAP_STRING (reg_names[i], reg_names[i + 1]);
12249 /* Implement CONDITIONAL_REGISTER_USAGE. */
12252 mips_conditional_register_usage (void)
12258 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
12259 fixed_regs[regno] = call_used_regs[regno] = 1;
12261 if (!TARGET_HARD_FLOAT)
12265 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
12266 fixed_regs[regno] = call_used_regs[regno] = 1;
12267 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
12268 fixed_regs[regno] = call_used_regs[regno] = 1;
12270 else if (! ISA_HAS_8CC)
12274 /* We only have a single condition code register. We
12275 implement this by hiding all the condition code registers,
12276 and generating RTL that refers directly to ST_REG_FIRST. */
12277 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
12278 fixed_regs[regno] = call_used_regs[regno] = 1;
12280 /* In mips16 mode, we permit the $t temporary registers to be used
12281 for reload. We prohibit the unused $s registers, since they
12282 are caller saved, and saving them via a mips16 register would
12283 probably waste more time than just reloading the value. */
12286 fixed_regs[18] = call_used_regs[18] = 1;
12287 fixed_regs[19] = call_used_regs[19] = 1;
12288 fixed_regs[20] = call_used_regs[20] = 1;
12289 fixed_regs[21] = call_used_regs[21] = 1;
12290 fixed_regs[22] = call_used_regs[22] = 1;
12291 fixed_regs[23] = call_used_regs[23] = 1;
12292 fixed_regs[26] = call_used_regs[26] = 1;
12293 fixed_regs[27] = call_used_regs[27] = 1;
12294 fixed_regs[30] = call_used_regs[30] = 1;
12296 /* fp20-23 are now caller saved. */
12297 if (mips_abi == ABI_64)
12300 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
12301 call_really_used_regs[regno] = call_used_regs[regno] = 1;
12303 /* Odd registers from fp21 to fp31 are now caller saved. */
12304 if (mips_abi == ABI_N32)
12307 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
12308 call_really_used_regs[regno] = call_used_regs[regno] = 1;
12310 /* Make sure that double-register accumulator values are correctly
12311 ordered for the current endianness. */
12312 if (TARGET_LITTLE_ENDIAN)
12315 mips_swap_registers (MD_REG_FIRST);
12316 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
12317 mips_swap_registers (regno);
12321 /* On the mips16, we want to allocate $24 (T_REG) before other
12322 registers for instructions for which it is possible. This helps
12323 avoid shuffling registers around in order to set up for an xor,
12324 encouraging the compiler to use a cmp instead. */
12327 mips_order_regs_for_local_alloc (void)
12331 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
12332 reg_alloc_order[i] = i;
12336 /* It really doesn't matter where we put register 0, since it is
12337 a fixed register anyhow. */
12338 reg_alloc_order[0] = 24;
12339 reg_alloc_order[24] = 0;
12343 /* Initialize the GCC target structure. */
12344 #undef TARGET_ASM_ALIGNED_HI_OP
12345 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
12346 #undef TARGET_ASM_ALIGNED_SI_OP
12347 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
12348 #undef TARGET_ASM_ALIGNED_DI_OP
12349 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
12351 #undef TARGET_ASM_FUNCTION_PROLOGUE
12352 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
12353 #undef TARGET_ASM_FUNCTION_EPILOGUE
12354 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
12355 #undef TARGET_ASM_SELECT_RTX_SECTION
12356 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
12357 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
12358 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
12360 #undef TARGET_SCHED_INIT
12361 #define TARGET_SCHED_INIT mips_sched_init
12362 #undef TARGET_SCHED_REORDER
12363 #define TARGET_SCHED_REORDER mips_sched_reorder
12364 #undef TARGET_SCHED_REORDER2
12365 #define TARGET_SCHED_REORDER2 mips_sched_reorder
12366 #undef TARGET_SCHED_VARIABLE_ISSUE
12367 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
12368 #undef TARGET_SCHED_ADJUST_COST
12369 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
12370 #undef TARGET_SCHED_ISSUE_RATE
12371 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
12372 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
12373 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
12374 mips_multipass_dfa_lookahead
12376 #undef TARGET_DEFAULT_TARGET_FLAGS
12377 #define TARGET_DEFAULT_TARGET_FLAGS \
12379 | TARGET_CPU_DEFAULT \
12380 | TARGET_ENDIAN_DEFAULT \
12381 | TARGET_FP_EXCEPTIONS_DEFAULT \
12382 | MASK_CHECK_ZERO_DIV \
12384 #undef TARGET_HANDLE_OPTION
12385 #define TARGET_HANDLE_OPTION mips_handle_option
12387 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
12388 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
12390 #undef TARGET_INSERT_ATTRIBUTES
12391 #define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
12392 #undef TARGET_MERGE_DECL_ATTRIBUTES
12393 #define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
12394 #undef TARGET_SET_CURRENT_FUNCTION
12395 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
12397 #undef TARGET_VALID_POINTER_MODE
12398 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
12399 #undef TARGET_RTX_COSTS
12400 #define TARGET_RTX_COSTS mips_rtx_costs
12401 #undef TARGET_ADDRESS_COST
12402 #define TARGET_ADDRESS_COST mips_address_cost
12404 #undef TARGET_IN_SMALL_DATA_P
12405 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
12407 #undef TARGET_MACHINE_DEPENDENT_REORG
12408 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
12410 #undef TARGET_ASM_FILE_START
12411 #define TARGET_ASM_FILE_START mips_file_start
12412 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
12413 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
12415 #undef TARGET_INIT_LIBFUNCS
12416 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
12418 #undef TARGET_BUILD_BUILTIN_VA_LIST
12419 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
12420 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
12421 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
12423 #undef TARGET_PROMOTE_FUNCTION_ARGS
12424 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
12425 #undef TARGET_PROMOTE_FUNCTION_RETURN
12426 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
12427 #undef TARGET_PROMOTE_PROTOTYPES
12428 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
12430 #undef TARGET_RETURN_IN_MEMORY
12431 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
12432 #undef TARGET_RETURN_IN_MSB
12433 #define TARGET_RETURN_IN_MSB mips_return_in_msb
12435 #undef TARGET_ASM_OUTPUT_MI_THUNK
12436 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
12437 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
12438 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
12440 #undef TARGET_SETUP_INCOMING_VARARGS
12441 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
12442 #undef TARGET_STRICT_ARGUMENT_NAMING
12443 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
12444 #undef TARGET_MUST_PASS_IN_STACK
12445 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
12446 #undef TARGET_PASS_BY_REFERENCE
12447 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
12448 #undef TARGET_CALLEE_COPIES
12449 #define TARGET_CALLEE_COPIES mips_callee_copies
12450 #undef TARGET_ARG_PARTIAL_BYTES
12451 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
12453 #undef TARGET_MODE_REP_EXTENDED
12454 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
12456 #undef TARGET_VECTOR_MODE_SUPPORTED_P
12457 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
12459 #undef TARGET_SCALAR_MODE_SUPPORTED_P
12460 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
12462 #undef TARGET_INIT_BUILTINS
12463 #define TARGET_INIT_BUILTINS mips_init_builtins
12464 #undef TARGET_EXPAND_BUILTIN
12465 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
12467 #undef TARGET_HAVE_TLS
12468 #define TARGET_HAVE_TLS HAVE_AS_TLS
12470 #undef TARGET_CANNOT_FORCE_CONST_MEM
12471 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
12473 #undef TARGET_ENCODE_SECTION_INFO
12474 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
12476 #undef TARGET_ATTRIBUTE_TABLE
12477 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
12478 /* All our function attributes are related to how out-of-line copies should
12479 be compiled or called. They don't in themselves prevent inlining. */
12480 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
12481 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
12483 #undef TARGET_EXTRA_LIVE_ON_ENTRY
12484 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
12486 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
12487 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
12488 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
12489 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
12491 #undef TARGET_COMP_TYPE_ATTRIBUTES
12492 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
12494 #ifdef HAVE_AS_DTPRELWORD
12495 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
12496 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
12498 #undef TARGET_DWARF_REGISTER_SPAN
12499 #define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
12501 struct gcc_target targetm = TARGET_INITIALIZER;
12503 #include "gt-mips.h"