1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 3, or (at your option)
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING3. If not see
24 <http://www.gnu.org/licenses/>. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
60 #include "diagnostic.h"
62 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
63 #define UNSPEC_ADDRESS_P(X) \
64 (GET_CODE (X) == UNSPEC \
65 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
66 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
68 /* Extract the symbol or label from UNSPEC wrapper X. */
69 #define UNSPEC_ADDRESS(X) \
72 /* Extract the symbol type from UNSPEC wrapper X. */
73 #define UNSPEC_ADDRESS_TYPE(X) \
74 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
76 /* The maximum distance between the top of the stack frame and the
77 value $sp has when we save and restore registers.
79 The value for normal-mode code must be a SMALL_OPERAND and must
80 preserve the maximum stack alignment. We therefore use a value
81 of 0x7ff0 in this case.
83 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
84 up to 0x7f8 bytes and can usually save or restore all the registers
85 that we need to save or restore. (Note that we can only use these
86 instructions for o32, for which the stack alignment is 8 bytes.)
88 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
89 RESTORE are not available. We can then use unextended instructions
90 to save and restore registers, and to allocate and deallocate the top
92 #define MIPS_MAX_FIRST_STACK_STEP \
93 (!TARGET_MIPS16 ? 0x7ff0 \
94 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
95 : TARGET_64BIT ? 0x100 : 0x400)
97 /* True if INSN is a mips.md pattern or asm statement. */
98 #define USEFUL_INSN_P(INSN) \
100 && GET_CODE (PATTERN (INSN)) != USE \
101 && GET_CODE (PATTERN (INSN)) != CLOBBER \
102 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
103 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
105 /* If INSN is a delayed branch sequence, return the first instruction
106 in the sequence, otherwise return INSN itself. */
107 #define SEQ_BEGIN(INSN) \
108 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
109 ? XVECEXP (PATTERN (INSN), 0, 0) \
112 /* Likewise for the last instruction in a delayed branch sequence. */
113 #define SEQ_END(INSN) \
114 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
115 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
118 /* Execute the following loop body with SUBINSN set to each instruction
119 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
120 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
121 for ((SUBINSN) = SEQ_BEGIN (INSN); \
122 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
123 (SUBINSN) = NEXT_INSN (SUBINSN))
125 /* True if bit BIT is set in VALUE. */
126 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
128 /* Classifies an address.
131 A natural register + offset address. The register satisfies
132 mips_valid_base_register_p and the offset is a const_arith_operand.
135 A LO_SUM rtx. The first operand is a valid base register and
136 the second operand is a symbolic address.
139 A signed 16-bit constant address.
142 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
143 enum mips_address_type {
150 /* Macros to create an enumeration identifier for a function prototype. */
151 #define MIPS_FTYPE_NAME1(A, B) MIPS_##A##_FTYPE_##B
152 #define MIPS_FTYPE_NAME2(A, B, C) MIPS_##A##_FTYPE_##B##_##C
153 #define MIPS_FTYPE_NAME3(A, B, C, D) MIPS_##A##_FTYPE_##B##_##C##_##D
154 #define MIPS_FTYPE_NAME4(A, B, C, D, E) MIPS_##A##_FTYPE_##B##_##C##_##D##_##E
156 /* Classifies the prototype of a builtin function. */
157 enum mips_function_type
159 #define DEF_MIPS_FTYPE(NARGS, LIST) MIPS_FTYPE_NAME##NARGS LIST,
160 #include "config/mips/mips-ftypes.def"
161 #undef DEF_MIPS_FTYPE
165 /* Specifies how a builtin function should be converted into rtl. */
166 enum mips_builtin_type
168 /* The builtin corresponds directly to an .md pattern. The return
169 value is mapped to operand 0 and the arguments are mapped to
170 operands 1 and above. */
173 /* The builtin corresponds directly to an .md pattern. There is no return
174 value and the arguments are mapped to operands 0 and above. */
175 MIPS_BUILTIN_DIRECT_NO_TARGET,
177 /* The builtin corresponds to a comparison instruction followed by
178 a mips_cond_move_tf_ps pattern. The first two arguments are the
179 values to compare and the second two arguments are the vector
180 operands for the movt.ps or movf.ps instruction (in assembly order). */
184 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
185 of this instruction is the result of the comparison, which has mode
186 CCV2 or CCV4. The function arguments are mapped to operands 1 and
187 above. The function's return value is an SImode boolean that is
188 true under the following conditions:
190 MIPS_BUILTIN_CMP_ANY: one of the registers is true
191 MIPS_BUILTIN_CMP_ALL: all of the registers are true
192 MIPS_BUILTIN_CMP_LOWER: the first register is true
193 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
194 MIPS_BUILTIN_CMP_ANY,
195 MIPS_BUILTIN_CMP_ALL,
196 MIPS_BUILTIN_CMP_UPPER,
197 MIPS_BUILTIN_CMP_LOWER,
199 /* As above, but the instruction only sets a single $fcc register. */
200 MIPS_BUILTIN_CMP_SINGLE,
202 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
203 MIPS_BUILTIN_BPOSGE32
206 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
207 #define MIPS_FP_CONDITIONS(MACRO) \
225 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
226 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
227 enum mips_fp_condition {
228 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
231 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
232 #define STRINGIFY(X) #X
233 static const char *const mips_fp_conditions[] = {
234 MIPS_FP_CONDITIONS (STRINGIFY)
237 /* Information about a function's frame layout. */
238 struct mips_frame_info GTY(())
240 /* The size of the frame in bytes. */
241 HOST_WIDE_INT total_size;
243 /* The number of bytes allocated to variables. */
244 HOST_WIDE_INT var_size;
246 /* The number of bytes allocated to outgoing function arguments. */
247 HOST_WIDE_INT args_size;
249 /* The number of bytes allocated to the .cprestore slot, or 0 if there
251 HOST_WIDE_INT cprestore_size;
253 /* Bit X is set if the function saves or restores GPR X. */
256 /* Likewise FPR X. */
259 /* The number of GPRs and FPRs saved. */
263 /* The offset of the topmost GPR and FPR save slots from the top of
264 the frame, or zero if no such slots are needed. */
265 HOST_WIDE_INT gp_save_offset;
266 HOST_WIDE_INT fp_save_offset;
268 /* Likewise, but giving offsets from the bottom of the frame. */
269 HOST_WIDE_INT gp_sp_offset;
270 HOST_WIDE_INT fp_sp_offset;
272 /* The offset of arg_pointer_rtx from frame_pointer_rtx. */
273 HOST_WIDE_INT arg_pointer_offset;
275 /* The offset of hard_frame_pointer_rtx from frame_pointer_rtx. */
276 HOST_WIDE_INT hard_frame_pointer_offset;
279 struct machine_function GTY(()) {
280 /* Pseudo-reg holding the value of $28 in a mips16 function which
281 refers to GP relative global variables. */
282 rtx mips16_gp_pseudo_rtx;
284 /* The number of extra stack bytes taken up by register varargs.
285 This area is allocated by the callee at the very top of the frame. */
288 /* Current frame information, calculated by mips_compute_frame_info. */
289 struct mips_frame_info frame;
291 /* The register to use as the global pointer within this function. */
292 unsigned int global_pointer;
294 /* True if mips_adjust_insn_length should ignore an instruction's
296 bool ignore_hazard_length_p;
298 /* True if the whole function is suitable for .set noreorder and
300 bool all_noreorder_p;
302 /* True if the function is known to have an instruction that needs $gp. */
305 /* True if we have emitted an instruction to initialize
306 mips16_gp_pseudo_rtx. */
307 bool initialized_mips16_gp_pseudo_p;
310 /* Information about a single argument. */
313 /* True if the argument is passed in a floating-point register, or
314 would have been if we hadn't run out of registers. */
317 /* The number of words passed in registers, rounded up. */
318 unsigned int reg_words;
320 /* For EABI, the offset of the first register from GP_ARG_FIRST or
321 FP_ARG_FIRST. For other ABIs, the offset of the first register from
322 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
323 comment for details).
325 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
327 unsigned int reg_offset;
329 /* The number of words that must be passed on the stack, rounded up. */
330 unsigned int stack_words;
332 /* The offset from the start of the stack overflow area of the argument's
333 first stack word. Only meaningful when STACK_WORDS is nonzero. */
334 unsigned int stack_offset;
338 /* Information about an address described by mips_address_type.
344 REG is the base register and OFFSET is the constant offset.
347 REG is the register that contains the high part of the address,
348 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
349 is the type of OFFSET's symbol.
352 SYMBOL_TYPE is the type of symbol being referenced. */
354 struct mips_address_info
356 enum mips_address_type type;
359 enum mips_symbol_type symbol_type;
363 /* One stage in a constant building sequence. These sequences have
367 A = A CODE[1] VALUE[1]
368 A = A CODE[2] VALUE[2]
371 where A is an accumulator, each CODE[i] is a binary rtl operation
372 and each VALUE[i] is a constant integer. */
373 struct mips_integer_op {
375 unsigned HOST_WIDE_INT value;
379 /* The largest number of operations needed to load an integer constant.
380 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
381 When the lowest bit is clear, we can try, but reject a sequence with
382 an extra SLL at the end. */
383 #define MIPS_MAX_INTEGER_OPS 7
385 /* Information about a MIPS16e SAVE or RESTORE instruction. */
386 struct mips16e_save_restore_info {
387 /* The number of argument registers saved by a SAVE instruction.
388 0 for RESTORE instructions. */
391 /* Bit X is set if the instruction saves or restores GPR X. */
394 /* The total number of bytes to allocate. */
398 /* Global variables for machine-dependent things. */
400 /* Threshold for data being put into the small data/bss area, instead
401 of the normal data area. */
402 int mips_section_threshold = -1;
404 /* Count the number of .file directives, so that .loc is up to date. */
405 int num_source_filenames = 0;
407 /* Name of the file containing the current function. */
408 const char *current_function_file = "";
410 /* Count the number of sdb related labels are generated (to find block
411 start and end boundaries). */
412 int sdb_label_count = 0;
414 /* Next label # for each statement for Silicon Graphics IRIS systems. */
417 /* Map GCC register number to debugger register number. */
418 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
419 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
421 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
426 /* The next branch instruction is a branch likely, not branch normal. */
427 int mips_branch_likely;
429 /* The operands passed to the last cmpMM expander. */
432 /* The target cpu for code generation. */
433 enum processor_type mips_arch;
434 const struct mips_cpu_info *mips_arch_info;
436 /* The target cpu for optimization and scheduling. */
437 enum processor_type mips_tune;
438 const struct mips_cpu_info *mips_tune_info;
440 /* Which instruction set architecture to use. */
443 /* The architecture selected by -mipsN. */
444 static const struct mips_cpu_info *mips_isa_info;
446 /* Which ABI to use. */
447 int mips_abi = MIPS_ABI_DEFAULT;
449 /* Cost information to use. */
450 const struct mips_rtx_cost_data *mips_cost;
452 /* Remember the ambient target flags, excluding mips16. */
453 static int mips_base_target_flags;
454 /* The mips16 command-line target flags only. */
455 static bool mips_base_mips16;
456 /* Similar copies of option settings. */
457 static int mips_flag_delayed_branch; /* flag_delayed_branch */
458 static int mips_base_schedule_insns; /* flag_schedule_insns */
459 static int mips_base_reorder_blocks_and_partition; /* flag_reorder... */
460 static int mips_base_move_loop_invariants; /* flag_move_loop_invariants */
461 static int mips_base_align_loops; /* align_loops */
462 static int mips_base_align_jumps; /* align_jumps */
463 static int mips_base_align_functions; /* align_functions */
465 /* The -mtext-loads setting. */
466 enum mips_code_readable_setting mips_code_readable = CODE_READABLE_YES;
468 /* If TRUE, we split addresses into their high and low parts in the RTL. */
469 int mips_split_addresses;
471 /* Mode used for saving/restoring general purpose registers. */
472 static enum machine_mode gpr_mode;
474 /* Array giving truth value on whether or not a given hard register
475 can support a given mode. */
476 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
478 /* List of all MIPS punctuation characters used by print_operand. */
479 char mips_print_operand_punct[256];
481 static GTY (()) int mips_output_filename_first_time = 1;
483 /* mips_split_p[X] is true if symbols of type X can be split by
484 mips_split_symbol(). */
485 bool mips_split_p[NUM_SYMBOL_TYPES];
487 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
488 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
489 if they are matched by a special .md file pattern. */
490 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
492 /* Likewise for HIGHs. */
493 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
495 /* Map hard register number to register class */
496 const enum reg_class mips_regno_to_class[] =
498 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
499 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
500 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
501 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
502 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
503 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
504 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
505 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
506 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
507 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
508 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
509 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
510 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
511 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
512 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
513 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
514 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
515 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
516 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
517 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
518 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
519 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
520 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
521 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
522 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
523 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
524 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
525 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
526 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
527 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
528 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
529 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
530 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
531 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
532 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
533 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
534 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
535 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
536 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
537 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
538 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
539 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
540 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
541 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
542 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
543 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
544 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
547 /* Table of machine dependent attributes. */
548 const struct attribute_spec mips_attribute_table[] =
550 { "long_call", 0, 0, false, true, true, NULL },
551 { "far", 0, 0, false, true, true, NULL },
552 { "near", 0, 0, false, true, true, NULL },
553 /* Switch MIPS16 ASE on and off per-function. We would really like
554 to make these type attributes, but GCC doesn't provide the hooks
555 we need to support the right conversion rules. As declaration
556 attributes, they affect code generation but don't carry other
558 { "mips16", 0, 0, true, false, false, NULL },
559 { "nomips16", 0, 0, true, false, false, NULL },
560 { NULL, 0, 0, false, false, false, NULL }
563 /* A table describing all the processors gcc knows about. Names are
564 matched in the order listed. The first mention of an ISA level is
565 taken as the canonical name for that ISA.
567 To ease comparison, please keep this table in the same order
568 as gas's mips_cpu_info_table[]. Please also make sure that
569 MIPS_ISA_LEVEL_SPEC and MIPS_ARCH_FLOAT_SPEC handle all -march
570 options correctly. */
571 const struct mips_cpu_info mips_cpu_info_table[] = {
572 /* Entries for generic ISAs */
573 { "mips1", PROCESSOR_R3000, 1, 0 },
574 { "mips2", PROCESSOR_R6000, 2, 0 },
575 { "mips3", PROCESSOR_R4000, 3, 0 },
576 { "mips4", PROCESSOR_R8000, 4, 0 },
577 /* Prefer not to use branch-likely instructions for generic MIPS32rX
578 and MIPS64rX code. The instructions were officially deprecated
579 in revisions 2 and earlier, but revision 3 is likely to downgrade
580 that to a recommendation to avoid the instructions in code that
581 isn't tuned to a specific processor. */
582 { "mips32", PROCESSOR_4KC, 32, PTF_AVOID_BRANCHLIKELY },
583 { "mips32r2", PROCESSOR_M4K, 33, PTF_AVOID_BRANCHLIKELY },
584 { "mips64", PROCESSOR_5KC, 64, PTF_AVOID_BRANCHLIKELY },
587 { "r3000", PROCESSOR_R3000, 1, 0 },
588 { "r2000", PROCESSOR_R3000, 1, 0 }, /* = r3000 */
589 { "r3900", PROCESSOR_R3900, 1, 0 },
592 { "r6000", PROCESSOR_R6000, 2, 0 },
595 { "r4000", PROCESSOR_R4000, 3, 0 },
596 { "vr4100", PROCESSOR_R4100, 3, 0 },
597 { "vr4111", PROCESSOR_R4111, 3, 0 },
598 { "vr4120", PROCESSOR_R4120, 3, 0 },
599 { "vr4130", PROCESSOR_R4130, 3, 0 },
600 { "vr4300", PROCESSOR_R4300, 3, 0 },
601 { "r4400", PROCESSOR_R4000, 3, 0 }, /* = r4000 */
602 { "r4600", PROCESSOR_R4600, 3, 0 },
603 { "orion", PROCESSOR_R4600, 3, 0 }, /* = r4600 */
604 { "r4650", PROCESSOR_R4650, 3, 0 },
607 { "r8000", PROCESSOR_R8000, 4, 0 },
608 { "vr5000", PROCESSOR_R5000, 4, 0 },
609 { "vr5400", PROCESSOR_R5400, 4, 0 },
610 { "vr5500", PROCESSOR_R5500, 4, PTF_AVOID_BRANCHLIKELY },
611 { "rm7000", PROCESSOR_R7000, 4, 0 },
612 { "rm9000", PROCESSOR_R9000, 4, 0 },
615 { "4kc", PROCESSOR_4KC, 32, 0 },
616 { "4km", PROCESSOR_4KC, 32, 0 }, /* = 4kc */
617 { "4kp", PROCESSOR_4KP, 32, 0 },
618 { "4ksc", PROCESSOR_4KC, 32, 0 },
620 /* MIPS32 Release 2 */
621 { "m4k", PROCESSOR_M4K, 33, 0 },
622 { "4kec", PROCESSOR_4KC, 33, 0 },
623 { "4kem", PROCESSOR_4KC, 33, 0 },
624 { "4kep", PROCESSOR_4KP, 33, 0 },
625 { "4ksd", PROCESSOR_4KC, 33, 0 },
627 { "24kc", PROCESSOR_24KC, 33, 0 },
628 { "24kf2_1", PROCESSOR_24KF2_1, 33, 0 },
629 { "24kf", PROCESSOR_24KF2_1, 33, 0 },
630 { "24kf1_1", PROCESSOR_24KF1_1, 33, 0 },
631 { "24kfx", PROCESSOR_24KF1_1, 33, 0 },
632 { "24kx", PROCESSOR_24KF1_1, 33, 0 },
634 { "24kec", PROCESSOR_24KC, 33, 0 }, /* 24K with DSP */
635 { "24kef2_1", PROCESSOR_24KF2_1, 33, 0 },
636 { "24kef", PROCESSOR_24KF2_1, 33, 0 },
637 { "24kef1_1", PROCESSOR_24KF1_1, 33, 0 },
638 { "24kefx", PROCESSOR_24KF1_1, 33, 0 },
639 { "24kex", PROCESSOR_24KF1_1, 33, 0 },
641 { "34kc", PROCESSOR_24KC, 33, 0 }, /* 34K with MT/DSP */
642 { "34kf2_1", PROCESSOR_24KF2_1, 33, 0 },
643 { "34kf", PROCESSOR_24KF2_1, 33, 0 },
644 { "34kf1_1", PROCESSOR_24KF1_1, 33, 0 },
645 { "34kfx", PROCESSOR_24KF1_1, 33, 0 },
646 { "34kx", PROCESSOR_24KF1_1, 33, 0 },
648 { "74kc", PROCESSOR_74KC, 33, 0 }, /* 74K with DSPr2 */
649 { "74kf2_1", PROCESSOR_74KF2_1, 33, 0 },
650 { "74kf", PROCESSOR_74KF2_1, 33, 0 },
651 { "74kf1_1", PROCESSOR_74KF1_1, 33, 0 },
652 { "74kfx", PROCESSOR_74KF1_1, 33, 0 },
653 { "74kx", PROCESSOR_74KF1_1, 33, 0 },
654 { "74kf3_2", PROCESSOR_74KF3_2, 33, 0 },
657 { "5kc", PROCESSOR_5KC, 64, 0 },
658 { "5kf", PROCESSOR_5KF, 64, 0 },
659 { "20kc", PROCESSOR_20KC, 64, PTF_AVOID_BRANCHLIKELY },
660 { "sb1", PROCESSOR_SB1, 64, PTF_AVOID_BRANCHLIKELY },
661 { "sb1a", PROCESSOR_SB1A, 64, PTF_AVOID_BRANCHLIKELY },
662 { "sr71000", PROCESSOR_SR71000, 64, PTF_AVOID_BRANCHLIKELY },
665 /* Default costs. If these are used for a processor we should look
666 up the actual costs. */
667 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
668 COSTS_N_INSNS (7), /* fp_mult_sf */ \
669 COSTS_N_INSNS (8), /* fp_mult_df */ \
670 COSTS_N_INSNS (23), /* fp_div_sf */ \
671 COSTS_N_INSNS (36), /* fp_div_df */ \
672 COSTS_N_INSNS (10), /* int_mult_si */ \
673 COSTS_N_INSNS (10), /* int_mult_di */ \
674 COSTS_N_INSNS (69), /* int_div_si */ \
675 COSTS_N_INSNS (69), /* int_div_di */ \
676 2, /* branch_cost */ \
677 4 /* memory_latency */
679 /* Need to replace these with the costs of calling the appropriate
681 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
682 COSTS_N_INSNS (256), /* fp_mult_sf */ \
683 COSTS_N_INSNS (256), /* fp_mult_df */ \
684 COSTS_N_INSNS (256), /* fp_div_sf */ \
685 COSTS_N_INSNS (256) /* fp_div_df */
687 static struct mips_rtx_cost_data const mips_rtx_cost_optimize_size =
689 COSTS_N_INSNS (1), /* fp_add */
690 COSTS_N_INSNS (1), /* fp_mult_sf */
691 COSTS_N_INSNS (1), /* fp_mult_df */
692 COSTS_N_INSNS (1), /* fp_div_sf */
693 COSTS_N_INSNS (1), /* fp_div_df */
694 COSTS_N_INSNS (1), /* int_mult_si */
695 COSTS_N_INSNS (1), /* int_mult_di */
696 COSTS_N_INSNS (1), /* int_div_si */
697 COSTS_N_INSNS (1), /* int_div_di */
699 4 /* memory_latency */
702 static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
705 COSTS_N_INSNS (2), /* fp_add */
706 COSTS_N_INSNS (4), /* fp_mult_sf */
707 COSTS_N_INSNS (5), /* fp_mult_df */
708 COSTS_N_INSNS (12), /* fp_div_sf */
709 COSTS_N_INSNS (19), /* fp_div_df */
710 COSTS_N_INSNS (12), /* int_mult_si */
711 COSTS_N_INSNS (12), /* int_mult_di */
712 COSTS_N_INSNS (35), /* int_div_si */
713 COSTS_N_INSNS (35), /* int_div_di */
715 4 /* memory_latency */
720 COSTS_N_INSNS (6), /* int_mult_si */
721 COSTS_N_INSNS (6), /* int_mult_di */
722 COSTS_N_INSNS (36), /* int_div_si */
723 COSTS_N_INSNS (36), /* int_div_di */
725 4 /* memory_latency */
729 COSTS_N_INSNS (36), /* int_mult_si */
730 COSTS_N_INSNS (36), /* int_mult_di */
731 COSTS_N_INSNS (37), /* int_div_si */
732 COSTS_N_INSNS (37), /* int_div_di */
734 4 /* memory_latency */
738 COSTS_N_INSNS (4), /* int_mult_si */
739 COSTS_N_INSNS (11), /* int_mult_di */
740 COSTS_N_INSNS (36), /* int_div_si */
741 COSTS_N_INSNS (68), /* int_div_di */
743 4 /* memory_latency */
746 COSTS_N_INSNS (4), /* fp_add */
747 COSTS_N_INSNS (4), /* fp_mult_sf */
748 COSTS_N_INSNS (5), /* fp_mult_df */
749 COSTS_N_INSNS (17), /* fp_div_sf */
750 COSTS_N_INSNS (32), /* fp_div_df */
751 COSTS_N_INSNS (4), /* int_mult_si */
752 COSTS_N_INSNS (11), /* int_mult_di */
753 COSTS_N_INSNS (36), /* int_div_si */
754 COSTS_N_INSNS (68), /* int_div_di */
756 4 /* memory_latency */
759 COSTS_N_INSNS (4), /* fp_add */
760 COSTS_N_INSNS (4), /* fp_mult_sf */
761 COSTS_N_INSNS (5), /* fp_mult_df */
762 COSTS_N_INSNS (17), /* fp_div_sf */
763 COSTS_N_INSNS (32), /* fp_div_df */
764 COSTS_N_INSNS (4), /* int_mult_si */
765 COSTS_N_INSNS (7), /* int_mult_di */
766 COSTS_N_INSNS (42), /* int_div_si */
767 COSTS_N_INSNS (72), /* int_div_di */
769 4 /* memory_latency */
773 COSTS_N_INSNS (5), /* int_mult_si */
774 COSTS_N_INSNS (5), /* int_mult_di */
775 COSTS_N_INSNS (41), /* int_div_si */
776 COSTS_N_INSNS (41), /* int_div_di */
778 4 /* memory_latency */
781 COSTS_N_INSNS (8), /* fp_add */
782 COSTS_N_INSNS (8), /* fp_mult_sf */
783 COSTS_N_INSNS (10), /* fp_mult_df */
784 COSTS_N_INSNS (34), /* fp_div_sf */
785 COSTS_N_INSNS (64), /* fp_div_df */
786 COSTS_N_INSNS (5), /* int_mult_si */
787 COSTS_N_INSNS (5), /* int_mult_di */
788 COSTS_N_INSNS (41), /* int_div_si */
789 COSTS_N_INSNS (41), /* int_div_di */
791 4 /* memory_latency */
794 COSTS_N_INSNS (4), /* fp_add */
795 COSTS_N_INSNS (4), /* fp_mult_sf */
796 COSTS_N_INSNS (5), /* fp_mult_df */
797 COSTS_N_INSNS (17), /* fp_div_sf */
798 COSTS_N_INSNS (32), /* fp_div_df */
799 COSTS_N_INSNS (5), /* int_mult_si */
800 COSTS_N_INSNS (5), /* int_mult_di */
801 COSTS_N_INSNS (41), /* int_div_si */
802 COSTS_N_INSNS (41), /* int_div_di */
804 4 /* memory_latency */
808 COSTS_N_INSNS (5), /* int_mult_si */
809 COSTS_N_INSNS (5), /* int_mult_di */
810 COSTS_N_INSNS (41), /* int_div_si */
811 COSTS_N_INSNS (41), /* int_div_di */
813 4 /* memory_latency */
816 COSTS_N_INSNS (8), /* fp_add */
817 COSTS_N_INSNS (8), /* fp_mult_sf */
818 COSTS_N_INSNS (10), /* fp_mult_df */
819 COSTS_N_INSNS (34), /* fp_div_sf */
820 COSTS_N_INSNS (64), /* fp_div_df */
821 COSTS_N_INSNS (5), /* int_mult_si */
822 COSTS_N_INSNS (5), /* int_mult_di */
823 COSTS_N_INSNS (41), /* int_div_si */
824 COSTS_N_INSNS (41), /* int_div_di */
826 4 /* memory_latency */
829 COSTS_N_INSNS (4), /* fp_add */
830 COSTS_N_INSNS (4), /* fp_mult_sf */
831 COSTS_N_INSNS (5), /* fp_mult_df */
832 COSTS_N_INSNS (17), /* fp_div_sf */
833 COSTS_N_INSNS (32), /* fp_div_df */
834 COSTS_N_INSNS (5), /* int_mult_si */
835 COSTS_N_INSNS (5), /* int_mult_di */
836 COSTS_N_INSNS (41), /* int_div_si */
837 COSTS_N_INSNS (41), /* int_div_di */
839 4 /* memory_latency */
842 COSTS_N_INSNS (6), /* fp_add */
843 COSTS_N_INSNS (6), /* fp_mult_sf */
844 COSTS_N_INSNS (7), /* fp_mult_df */
845 COSTS_N_INSNS (25), /* fp_div_sf */
846 COSTS_N_INSNS (48), /* fp_div_df */
847 COSTS_N_INSNS (5), /* int_mult_si */
848 COSTS_N_INSNS (5), /* int_mult_di */
849 COSTS_N_INSNS (41), /* int_div_si */
850 COSTS_N_INSNS (41), /* int_div_di */
852 4 /* memory_latency */
858 COSTS_N_INSNS (2), /* fp_add */
859 COSTS_N_INSNS (4), /* fp_mult_sf */
860 COSTS_N_INSNS (5), /* fp_mult_df */
861 COSTS_N_INSNS (12), /* fp_div_sf */
862 COSTS_N_INSNS (19), /* fp_div_df */
863 COSTS_N_INSNS (2), /* int_mult_si */
864 COSTS_N_INSNS (2), /* int_mult_di */
865 COSTS_N_INSNS (35), /* int_div_si */
866 COSTS_N_INSNS (35), /* int_div_di */
868 4 /* memory_latency */
871 COSTS_N_INSNS (3), /* fp_add */
872 COSTS_N_INSNS (5), /* fp_mult_sf */
873 COSTS_N_INSNS (6), /* fp_mult_df */
874 COSTS_N_INSNS (15), /* fp_div_sf */
875 COSTS_N_INSNS (16), /* fp_div_df */
876 COSTS_N_INSNS (17), /* int_mult_si */
877 COSTS_N_INSNS (17), /* int_mult_di */
878 COSTS_N_INSNS (38), /* int_div_si */
879 COSTS_N_INSNS (38), /* int_div_di */
881 6 /* memory_latency */
884 COSTS_N_INSNS (6), /* fp_add */
885 COSTS_N_INSNS (7), /* fp_mult_sf */
886 COSTS_N_INSNS (8), /* fp_mult_df */
887 COSTS_N_INSNS (23), /* fp_div_sf */
888 COSTS_N_INSNS (36), /* fp_div_df */
889 COSTS_N_INSNS (10), /* int_mult_si */
890 COSTS_N_INSNS (10), /* int_mult_di */
891 COSTS_N_INSNS (69), /* int_div_si */
892 COSTS_N_INSNS (69), /* int_div_di */
894 6 /* memory_latency */
906 /* The only costs that appear to be updated here are
907 integer multiplication. */
909 COSTS_N_INSNS (4), /* int_mult_si */
910 COSTS_N_INSNS (6), /* int_mult_di */
911 COSTS_N_INSNS (69), /* int_div_si */
912 COSTS_N_INSNS (69), /* int_div_di */
914 4 /* memory_latency */
926 COSTS_N_INSNS (6), /* fp_add */
927 COSTS_N_INSNS (4), /* fp_mult_sf */
928 COSTS_N_INSNS (5), /* fp_mult_df */
929 COSTS_N_INSNS (23), /* fp_div_sf */
930 COSTS_N_INSNS (36), /* fp_div_df */
931 COSTS_N_INSNS (5), /* int_mult_si */
932 COSTS_N_INSNS (5), /* int_mult_di */
933 COSTS_N_INSNS (36), /* int_div_si */
934 COSTS_N_INSNS (36), /* int_div_di */
936 4 /* memory_latency */
939 COSTS_N_INSNS (6), /* fp_add */
940 COSTS_N_INSNS (5), /* fp_mult_sf */
941 COSTS_N_INSNS (6), /* fp_mult_df */
942 COSTS_N_INSNS (30), /* fp_div_sf */
943 COSTS_N_INSNS (59), /* fp_div_df */
944 COSTS_N_INSNS (3), /* int_mult_si */
945 COSTS_N_INSNS (4), /* int_mult_di */
946 COSTS_N_INSNS (42), /* int_div_si */
947 COSTS_N_INSNS (74), /* int_div_di */
949 4 /* memory_latency */
952 COSTS_N_INSNS (6), /* fp_add */
953 COSTS_N_INSNS (5), /* fp_mult_sf */
954 COSTS_N_INSNS (6), /* fp_mult_df */
955 COSTS_N_INSNS (30), /* fp_div_sf */
956 COSTS_N_INSNS (59), /* fp_div_df */
957 COSTS_N_INSNS (5), /* int_mult_si */
958 COSTS_N_INSNS (9), /* int_mult_di */
959 COSTS_N_INSNS (42), /* int_div_si */
960 COSTS_N_INSNS (74), /* int_div_di */
962 4 /* memory_latency */
965 /* The only costs that are changed here are
966 integer multiplication. */
967 COSTS_N_INSNS (6), /* fp_add */
968 COSTS_N_INSNS (7), /* fp_mult_sf */
969 COSTS_N_INSNS (8), /* fp_mult_df */
970 COSTS_N_INSNS (23), /* fp_div_sf */
971 COSTS_N_INSNS (36), /* fp_div_df */
972 COSTS_N_INSNS (5), /* int_mult_si */
973 COSTS_N_INSNS (9), /* int_mult_di */
974 COSTS_N_INSNS (69), /* int_div_si */
975 COSTS_N_INSNS (69), /* int_div_di */
977 4 /* memory_latency */
983 /* The only costs that are changed here are
984 integer multiplication. */
985 COSTS_N_INSNS (6), /* fp_add */
986 COSTS_N_INSNS (7), /* fp_mult_sf */
987 COSTS_N_INSNS (8), /* fp_mult_df */
988 COSTS_N_INSNS (23), /* fp_div_sf */
989 COSTS_N_INSNS (36), /* fp_div_df */
990 COSTS_N_INSNS (3), /* int_mult_si */
991 COSTS_N_INSNS (8), /* int_mult_di */
992 COSTS_N_INSNS (69), /* int_div_si */
993 COSTS_N_INSNS (69), /* int_div_di */
995 4 /* memory_latency */
998 /* These costs are the same as the SB-1A below. */
999 COSTS_N_INSNS (4), /* fp_add */
1000 COSTS_N_INSNS (4), /* fp_mult_sf */
1001 COSTS_N_INSNS (4), /* fp_mult_df */
1002 COSTS_N_INSNS (24), /* fp_div_sf */
1003 COSTS_N_INSNS (32), /* fp_div_df */
1004 COSTS_N_INSNS (3), /* int_mult_si */
1005 COSTS_N_INSNS (4), /* int_mult_di */
1006 COSTS_N_INSNS (36), /* int_div_si */
1007 COSTS_N_INSNS (68), /* int_div_di */
1008 1, /* branch_cost */
1009 4 /* memory_latency */
1012 /* These costs are the same as the SB-1 above. */
1013 COSTS_N_INSNS (4), /* fp_add */
1014 COSTS_N_INSNS (4), /* fp_mult_sf */
1015 COSTS_N_INSNS (4), /* fp_mult_df */
1016 COSTS_N_INSNS (24), /* fp_div_sf */
1017 COSTS_N_INSNS (32), /* fp_div_df */
1018 COSTS_N_INSNS (3), /* int_mult_si */
1019 COSTS_N_INSNS (4), /* int_mult_di */
1020 COSTS_N_INSNS (36), /* int_div_si */
1021 COSTS_N_INSNS (68), /* int_div_di */
1022 1, /* branch_cost */
1023 4 /* memory_latency */
1030 /* Use a hash table to keep track of implicit mips16/nomips16 attributes
1031 for -mflip_mips16. It maps decl names onto a boolean mode setting. */
1033 struct mflip_mips16_entry GTY (()) {
1037 static GTY ((param_is (struct mflip_mips16_entry))) htab_t mflip_mips16_htab;
1039 /* Hash table callbacks for mflip_mips16_htab. */
1042 mflip_mips16_htab_hash (const void *entry)
1044 return htab_hash_string (((const struct mflip_mips16_entry *) entry)->name);
1048 mflip_mips16_htab_eq (const void *entry, const void *name)
1050 return strcmp (((const struct mflip_mips16_entry *) entry)->name,
1051 (const char *) name) == 0;
1054 static GTY(()) int mips16_flipper;
1056 /* DECL is a function that needs a default "mips16" or "nomips16" attribute
1057 for -mflip-mips16. Return true if it should use "mips16" and false if
1058 it should use "nomips16". */
1061 mflip_mips16_use_mips16_p (tree decl)
1063 struct mflip_mips16_entry *entry;
1068 /* Use the opposite of the command-line setting for anonymous decls. */
1069 if (!DECL_NAME (decl))
1070 return !mips_base_mips16;
1072 if (!mflip_mips16_htab)
1073 mflip_mips16_htab = htab_create_ggc (37, mflip_mips16_htab_hash,
1074 mflip_mips16_htab_eq, NULL);
1076 name = IDENTIFIER_POINTER (DECL_NAME (decl));
1077 hash = htab_hash_string (name);
1078 slot = htab_find_slot_with_hash (mflip_mips16_htab, name, hash, INSERT);
1079 entry = (struct mflip_mips16_entry *) *slot;
1082 mips16_flipper = !mips16_flipper;
1083 entry = GGC_NEW (struct mflip_mips16_entry);
1085 entry->mips16_p = mips16_flipper ? !mips_base_mips16 : mips_base_mips16;
1088 return entry->mips16_p;
1091 /* Predicates to test for presence of "near" and "far"/"long_call"
1092 attributes on the given TYPE. */
1095 mips_near_type_p (const_tree type)
1097 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1101 mips_far_type_p (const_tree type)
1103 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1104 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1107 /* Similar predicates for "mips16"/"nomips16" attributes. */
1110 mips_mips16_decl_p (const_tree decl)
1112 return lookup_attribute ("mips16", DECL_ATTRIBUTES (decl)) != NULL;
1116 mips_nomips16_decl_p (const_tree decl)
1118 return lookup_attribute ("nomips16", DECL_ATTRIBUTES (decl)) != NULL;
1121 /* Return true if function DECL is a MIPS16 function. Return the ambient
1122 setting if DECL is null. */
1125 mips_use_mips16_mode_p (tree decl)
1129 /* Nested functions must use the same frame pointer as their
1130 parent and must therefore use the same ISA mode. */
1131 tree parent = decl_function_context (decl);
1134 if (mips_mips16_decl_p (decl))
1136 if (mips_nomips16_decl_p (decl))
1139 return mips_base_mips16;
1142 /* Return 0 if the attributes for two types are incompatible, 1 if they
1143 are compatible, and 2 if they are nearly compatible (which causes a
1144 warning to be generated). */
1147 mips_comp_type_attributes (const_tree type1, const_tree type2)
1149 /* Check for mismatch of non-default calling convention. */
1150 if (TREE_CODE (type1) != FUNCTION_TYPE)
1153 /* Disallow mixed near/far attributes. */
1154 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1156 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1162 /* Implement TARGET_INSERT_ATTRIBUTES. */
1165 mips_insert_attributes (tree decl, tree *attributes)
1168 bool mips16_p, nomips16_p;
1170 /* Check for "mips16" and "nomips16" attributes. */
1171 mips16_p = lookup_attribute ("mips16", *attributes) != NULL;
1172 nomips16_p = lookup_attribute ("nomips16", *attributes) != NULL;
1173 if (TREE_CODE (decl) != FUNCTION_DECL)
1176 error ("%qs attribute only applies to functions", "mips16");
1178 error ("%qs attribute only applies to functions", "nomips16");
1182 mips16_p |= mips_mips16_decl_p (decl);
1183 nomips16_p |= mips_nomips16_decl_p (decl);
1184 if (mips16_p || nomips16_p)
1186 /* DECL cannot be simultaneously mips16 and nomips16. */
1187 if (mips16_p && nomips16_p)
1188 error ("%qs cannot have both %<mips16%> and "
1189 "%<nomips16%> attributes",
1190 IDENTIFIER_POINTER (DECL_NAME (decl)));
1192 else if (TARGET_FLIP_MIPS16 && !DECL_ARTIFICIAL (decl))
1194 /* Implement -mflip-mips16. If DECL has neither a "nomips16" nor a
1195 "mips16" attribute, arbitrarily pick one. We must pick the same
1196 setting for duplicate declarations of a function. */
1197 name = mflip_mips16_use_mips16_p (decl) ? "mips16" : "nomips16";
1198 *attributes = tree_cons (get_identifier (name), NULL, *attributes);
1203 /* Implement TARGET_MERGE_DECL_ATTRIBUTES. */
1206 mips_merge_decl_attributes (tree olddecl, tree newdecl)
1208 /* The decls' "mips16" and "nomips16" attributes must match exactly. */
1209 if (mips_mips16_decl_p (olddecl) != mips_mips16_decl_p (newdecl))
1210 error ("%qs redeclared with conflicting %qs attributes",
1211 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "mips16");
1212 if (mips_nomips16_decl_p (olddecl) != mips_nomips16_decl_p (newdecl))
1213 error ("%qs redeclared with conflicting %qs attributes",
1214 IDENTIFIER_POINTER (DECL_NAME (newdecl)), "nomips16");
1216 return merge_attributes (DECL_ATTRIBUTES (olddecl),
1217 DECL_ATTRIBUTES (newdecl));
1220 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1221 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1224 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1226 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1228 *base_ptr = XEXP (x, 0);
1229 *offset_ptr = INTVAL (XEXP (x, 1));
1238 static unsigned int mips_build_integer (struct mips_integer_op *,
1239 unsigned HOST_WIDE_INT);
1241 /* Subroutine of mips_build_integer (with the same interface).
1242 Assume that the final action in the sequence should be a left shift. */
1245 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
1247 unsigned int i, shift;
1249 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
1250 since signed numbers are easier to load than unsigned ones. */
1252 while ((value & 1) == 0)
1253 value /= 2, shift++;
1255 i = mips_build_integer (codes, value);
1256 codes[i].code = ASHIFT;
1257 codes[i].value = shift;
1262 /* As for mips_build_shift, but assume that the final action will be
1263 an IOR or PLUS operation. */
1266 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
1268 unsigned HOST_WIDE_INT high;
1271 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
1272 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
1274 /* The constant is too complex to load with a simple lui/ori pair
1275 so our goal is to clear as many trailing zeros as possible.
1276 In this case, we know bit 16 is set and that the low 16 bits
1277 form a negative number. If we subtract that number from VALUE,
1278 we will clear at least the lowest 17 bits, maybe more. */
1279 i = mips_build_integer (codes, CONST_HIGH_PART (value));
1280 codes[i].code = PLUS;
1281 codes[i].value = CONST_LOW_PART (value);
1285 i = mips_build_integer (codes, high);
1286 codes[i].code = IOR;
1287 codes[i].value = value & 0xffff;
1293 /* Fill CODES with a sequence of rtl operations to load VALUE.
1294 Return the number of operations needed. */
1297 mips_build_integer (struct mips_integer_op *codes,
1298 unsigned HOST_WIDE_INT value)
1300 if (SMALL_OPERAND (value)
1301 || SMALL_OPERAND_UNSIGNED (value)
1302 || LUI_OPERAND (value))
1304 /* The value can be loaded with a single instruction. */
1305 codes[0].code = UNKNOWN;
1306 codes[0].value = value;
1309 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
1311 /* Either the constant is a simple LUI/ORI combination or its
1312 lowest bit is set. We don't want to shift in this case. */
1313 return mips_build_lower (codes, value);
1315 else if ((value & 0xffff) == 0)
1317 /* The constant will need at least three actions. The lowest
1318 16 bits are clear, so the final action will be a shift. */
1319 return mips_build_shift (codes, value);
1323 /* The final action could be a shift, add or inclusive OR.
1324 Rather than use a complex condition to select the best
1325 approach, try both mips_build_shift and mips_build_lower
1326 and pick the one that gives the shortest sequence.
1327 Note that this case is only used once per constant. */
1328 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
1329 unsigned int cost, alt_cost;
1331 cost = mips_build_shift (codes, value);
1332 alt_cost = mips_build_lower (alt_codes, value);
1333 if (alt_cost < cost)
1335 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
1342 /* Return true if X is a thread-local symbol. */
1345 mips_tls_operand_p (rtx x)
1347 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1350 /* Return true if SYMBOL_REF X is associated with a global symbol
1351 (in the STB_GLOBAL sense). */
1354 mips_global_symbol_p (const_rtx x)
1356 const_tree const decl = SYMBOL_REF_DECL (x);
1359 return !SYMBOL_REF_LOCAL_P (x);
1361 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1362 or weak symbols. Relocations in the object file will be against
1363 the target symbol, so it's that symbol's binding that matters here. */
1364 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1367 /* Return true if SYMBOL_REF X binds locally. */
1370 mips_symbol_binds_local_p (const_rtx x)
1372 return (SYMBOL_REF_DECL (x)
1373 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1374 : SYMBOL_REF_LOCAL_P (x));
1377 /* Return true if rtx constants of mode MODE should be put into a small
1381 mips_rtx_constant_in_small_data_p (enum machine_mode mode)
1383 return (!TARGET_EMBEDDED_DATA
1384 && TARGET_LOCAL_SDATA
1385 && GET_MODE_SIZE (mode) <= mips_section_threshold);
1388 /* Return true if X should not be moved directly into register $25.
1389 We need this because many versions of GAS will treat "la $25,foo" as
1390 part of a call sequence and so allow a global "foo" to be lazily bound. */
1393 mips_dangerous_for_la25_p (rtx x)
1395 return (!TARGET_EXPLICIT_RELOCS
1397 && GET_CODE (x) == SYMBOL_REF
1398 && mips_global_symbol_p (x));
1401 /* Return the method that should be used to access SYMBOL_REF or
1402 LABEL_REF X in context CONTEXT. */
1404 static enum mips_symbol_type
1405 mips_classify_symbol (const_rtx x, enum mips_symbol_context context)
1408 return SYMBOL_GOT_DISP;
1410 if (GET_CODE (x) == LABEL_REF)
1412 /* LABEL_REFs are used for jump tables as well as text labels.
1413 Only return SYMBOL_PC_RELATIVE if we know the label is in
1414 the text section. */
1415 if (TARGET_MIPS16_SHORT_JUMP_TABLES)
1416 return SYMBOL_PC_RELATIVE;
1417 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1418 return SYMBOL_GOT_PAGE_OFST;
1419 return SYMBOL_ABSOLUTE;
1422 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1424 if (SYMBOL_REF_TLS_MODEL (x))
1427 if (CONSTANT_POOL_ADDRESS_P (x))
1429 if (TARGET_MIPS16_TEXT_LOADS)
1430 return SYMBOL_PC_RELATIVE;
1432 if (TARGET_MIPS16_PCREL_LOADS && context == SYMBOL_CONTEXT_MEM)
1433 return SYMBOL_PC_RELATIVE;
1435 if (mips_rtx_constant_in_small_data_p (get_pool_mode (x)))
1436 return SYMBOL_GP_RELATIVE;
1439 /* Do not use small-data accesses for weak symbols; they may end up
1442 && SYMBOL_REF_SMALL_P (x)
1443 && !SYMBOL_REF_WEAK (x))
1444 return SYMBOL_GP_RELATIVE;
1446 /* Don't use GOT accesses for locally-binding symbols when -mno-shared
1449 && !(TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x)))
1451 /* There are three cases to consider:
1453 - o32 PIC (either with or without explicit relocs)
1454 - n32/n64 PIC without explicit relocs
1455 - n32/n64 PIC with explicit relocs
1457 In the first case, both local and global accesses will use an
1458 R_MIPS_GOT16 relocation. We must correctly predict which of
1459 the two semantics (local or global) the assembler and linker
1460 will apply. The choice depends on the symbol's binding rather
1461 than its visibility.
1463 In the second case, the assembler will not use R_MIPS_GOT16
1464 relocations, but it chooses between local and global accesses
1465 in the same way as for o32 PIC.
1467 In the third case we have more freedom since both forms of
1468 access will work for any kind of symbol. However, there seems
1469 little point in doing things differently. */
1470 if (mips_global_symbol_p (x))
1471 return SYMBOL_GOT_DISP;
1473 return SYMBOL_GOT_PAGE_OFST;
1476 if (TARGET_MIPS16_PCREL_LOADS && context != SYMBOL_CONTEXT_CALL)
1477 return SYMBOL_FORCE_TO_MEM;
1478 return SYMBOL_ABSOLUTE;
1481 /* Classify symbolic expression X, given that it appears in context
1484 static enum mips_symbol_type
1485 mips_classify_symbolic_expression (rtx x, enum mips_symbol_context context)
1489 split_const (x, &x, &offset);
1490 if (UNSPEC_ADDRESS_P (x))
1491 return UNSPEC_ADDRESS_TYPE (x);
1493 return mips_classify_symbol (x, context);
1496 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1497 is the alignment (in bytes) of SYMBOL_REF X. */
1500 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1502 /* If for some reason we can't get the alignment for the
1503 symbol, initializing this to one means we will only accept
1505 HOST_WIDE_INT align = 1;
1508 /* Get the alignment of the symbol we're referring to. */
1509 t = SYMBOL_REF_DECL (x);
1511 align = DECL_ALIGN_UNIT (t);
1513 return offset >= 0 && offset < align;
1516 /* Return true if X is a symbolic constant that can be used in context
1517 CONTEXT. If it is, store the type of the symbol in *SYMBOL_TYPE. */
1520 mips_symbolic_constant_p (rtx x, enum mips_symbol_context context,
1521 enum mips_symbol_type *symbol_type)
1525 split_const (x, &x, &offset);
1526 if (UNSPEC_ADDRESS_P (x))
1528 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1529 x = UNSPEC_ADDRESS (x);
1531 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1533 *symbol_type = mips_classify_symbol (x, context);
1534 if (*symbol_type == SYMBOL_TLS)
1540 if (offset == const0_rtx)
1543 /* Check whether a nonzero offset is valid for the underlying
1545 switch (*symbol_type)
1547 case SYMBOL_ABSOLUTE:
1548 case SYMBOL_FORCE_TO_MEM:
1549 case SYMBOL_32_HIGH:
1550 case SYMBOL_64_HIGH:
1553 /* If the target has 64-bit pointers and the object file only
1554 supports 32-bit symbols, the values of those symbols will be
1555 sign-extended. In this case we can't allow an arbitrary offset
1556 in case the 32-bit value X + OFFSET has a different sign from X. */
1557 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1558 return offset_within_block_p (x, INTVAL (offset));
1560 /* In other cases the relocations can handle any offset. */
1563 case SYMBOL_PC_RELATIVE:
1564 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1565 In this case, we no longer have access to the underlying constant,
1566 but the original symbol-based access was known to be valid. */
1567 if (GET_CODE (x) == LABEL_REF)
1572 case SYMBOL_GP_RELATIVE:
1573 /* Make sure that the offset refers to something within the
1574 same object block. This should guarantee that the final
1575 PC- or GP-relative offset is within the 16-bit limit. */
1576 return offset_within_block_p (x, INTVAL (offset));
1578 case SYMBOL_GOT_PAGE_OFST:
1579 case SYMBOL_GOTOFF_PAGE:
1580 /* If the symbol is global, the GOT entry will contain the symbol's
1581 address, and we will apply a 16-bit offset after loading it.
1582 If the symbol is local, the linker should provide enough local
1583 GOT entries for a 16-bit offset, but larger offsets may lead
1585 return SMALL_INT (offset);
1589 /* There is no carry between the HI and LO REL relocations, so the
1590 offset is only valid if we know it won't lead to such a carry. */
1591 return mips_offset_within_alignment_p (x, INTVAL (offset));
1593 case SYMBOL_GOT_DISP:
1594 case SYMBOL_GOTOFF_DISP:
1595 case SYMBOL_GOTOFF_CALL:
1596 case SYMBOL_GOTOFF_LOADGP:
1599 case SYMBOL_GOTTPREL:
1607 /* Like mips_symbol_insns, but treat extended MIPS16 instructions as a
1608 single instruction. We rely on the fact that, in the worst case,
1609 all instructions involved in a MIPS16 address calculation are usually
1613 mips_symbol_insns_1 (enum mips_symbol_type type, enum machine_mode mode)
1617 case SYMBOL_ABSOLUTE:
1618 /* When using 64-bit symbols, we need 5 preparatory instructions,
1621 lui $at,%highest(symbol)
1622 daddiu $at,$at,%higher(symbol)
1624 daddiu $at,$at,%hi(symbol)
1627 The final address is then $at + %lo(symbol). With 32-bit
1628 symbols we just need a preparatory lui for normal mode and
1629 a preparatory "li; sll" for MIPS16. */
1630 return ABI_HAS_64BIT_SYMBOLS ? 6 : TARGET_MIPS16 ? 3 : 2;
1632 case SYMBOL_GP_RELATIVE:
1633 /* Treat GP-relative accesses as taking a single instruction on
1634 MIPS16 too; the copy of $gp can often be shared. */
1637 case SYMBOL_PC_RELATIVE:
1638 /* PC-relative constants can be only be used with addiupc,
1640 if (mode == MAX_MACHINE_MODE
1641 || GET_MODE_SIZE (mode) == 4
1642 || GET_MODE_SIZE (mode) == 8)
1645 /* The constant must be loaded using addiupc first. */
1648 case SYMBOL_FORCE_TO_MEM:
1649 /* LEAs will be converted into constant-pool references by
1651 if (mode == MAX_MACHINE_MODE)
1654 /* The constant must be loaded from the constant pool. */
1657 case SYMBOL_GOT_DISP:
1658 /* The constant will have to be loaded from the GOT before it
1659 is used in an address. */
1660 if (mode != MAX_MACHINE_MODE)
1665 case SYMBOL_GOT_PAGE_OFST:
1666 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1667 the local/global classification is accurate. See override_options
1670 The worst cases are:
1672 (1) For local symbols when generating o32 or o64 code. The assembler
1678 ...and the final address will be $at + %lo(symbol).
1680 (2) For global symbols when -mxgot. The assembler will use:
1682 lui $at,%got_hi(symbol)
1685 ...and the final address will be $at + %got_lo(symbol). */
1688 case SYMBOL_GOTOFF_PAGE:
1689 case SYMBOL_GOTOFF_DISP:
1690 case SYMBOL_GOTOFF_CALL:
1691 case SYMBOL_GOTOFF_LOADGP:
1692 case SYMBOL_32_HIGH:
1693 case SYMBOL_64_HIGH:
1699 case SYMBOL_GOTTPREL:
1702 /* A 16-bit constant formed by a single relocation, or a 32-bit
1703 constant formed from a high 16-bit relocation and a low 16-bit
1704 relocation. Use mips_split_p to determine which. */
1705 return !mips_split_p[type] ? 1 : TARGET_MIPS16 ? 3 : 2;
1708 /* We don't treat a bare TLS symbol as a constant. */
1714 /* If MODE is MAX_MACHINE_MODE, return the number of instructions needed
1715 to load symbols of type TYPE into a register. Return 0 if the given
1716 type of symbol cannot be used as an immediate operand.
1718 Otherwise, return the number of instructions needed to load or store
1719 values of mode MODE to or from addresses of type TYPE. Return 0 if
1720 the given type of symbol is not valid in addresses.
1722 In both cases, treat extended MIPS16 instructions as two instructions. */
1725 mips_symbol_insns (enum mips_symbol_type type, enum machine_mode mode)
1727 return mips_symbol_insns_1 (type, mode) * (TARGET_MIPS16 ? 2 : 1);
1730 /* Return true if X can not be forced into a constant pool. */
1733 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1735 return mips_tls_operand_p (*x);
1738 /* Return true if X can not be forced into a constant pool. */
1741 mips_cannot_force_const_mem (rtx x)
1747 /* As an optimization, reject constants that mips_legitimize_move
1750 Suppose we have a multi-instruction sequence that loads constant C
1751 into register R. If R does not get allocated a hard register, and
1752 R is used in an operand that allows both registers and memory
1753 references, reload will consider forcing C into memory and using
1754 one of the instruction's memory alternatives. Returning false
1755 here will force it to use an input reload instead. */
1756 if (GET_CODE (x) == CONST_INT)
1759 split_const (x, &base, &offset);
1760 if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
1764 if (for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
1770 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. We can't use blocks for
1771 constants when we're using a per-function constant pool. */
1774 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1775 const_rtx x ATTRIBUTE_UNUSED)
1777 return !TARGET_MIPS16_PCREL_LOADS;
1780 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1783 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1785 if (!HARD_REGISTER_NUM_P (regno))
1789 regno = reg_renumber[regno];
1792 /* These fake registers will be eliminated to either the stack or
1793 hard frame pointer, both of which are usually valid base registers.
1794 Reload deals with the cases where the eliminated form isn't valid. */
1795 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1798 /* In mips16 mode, the stack pointer can only address word and doubleword
1799 values, nothing smaller. There are two problems here:
1801 (a) Instantiating virtual registers can introduce new uses of the
1802 stack pointer. If these virtual registers are valid addresses,
1803 the stack pointer should be too.
1805 (b) Most uses of the stack pointer are not made explicit until
1806 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1807 We don't know until that stage whether we'll be eliminating to the
1808 stack pointer (which needs the restriction) or the hard frame
1809 pointer (which doesn't).
1811 All in all, it seems more consistent to only enforce this restriction
1812 during and after reload. */
1813 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1814 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1816 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1820 /* Return true if X is a valid base register for the given mode.
1821 Allow only hard registers if STRICT. */
1824 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1826 if (!strict && GET_CODE (x) == SUBREG)
1830 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1834 /* Return true if X is a valid address for machine mode MODE. If it is,
1835 fill in INFO appropriately. STRICT is true if we should only accept
1836 hard base registers. */
1839 mips_classify_address (struct mips_address_info *info, rtx x,
1840 enum machine_mode mode, int strict)
1842 switch (GET_CODE (x))
1846 info->type = ADDRESS_REG;
1848 info->offset = const0_rtx;
1849 return mips_valid_base_register_p (info->reg, mode, strict);
1852 info->type = ADDRESS_REG;
1853 info->reg = XEXP (x, 0);
1854 info->offset = XEXP (x, 1);
1855 return (mips_valid_base_register_p (info->reg, mode, strict)
1856 && const_arith_operand (info->offset, VOIDmode));
1859 info->type = ADDRESS_LO_SUM;
1860 info->reg = XEXP (x, 0);
1861 info->offset = XEXP (x, 1);
1862 /* We have to trust the creator of the LO_SUM to do something vaguely
1863 sane. Target-independent code that creates a LO_SUM should also
1864 create and verify the matching HIGH. Target-independent code that
1865 adds an offset to a LO_SUM must prove that the offset will not
1866 induce a carry. Failure to do either of these things would be
1867 a bug, and we are not required to check for it here. The MIPS
1868 backend itself should only create LO_SUMs for valid symbolic
1869 constants, with the high part being either a HIGH or a copy
1872 = mips_classify_symbolic_expression (info->offset, SYMBOL_CONTEXT_MEM);
1873 return (mips_valid_base_register_p (info->reg, mode, strict)
1874 && mips_symbol_insns (info->symbol_type, mode) > 0
1875 && mips_lo_relocs[info->symbol_type] != 0);
1878 /* Small-integer addresses don't occur very often, but they
1879 are legitimate if $0 is a valid base register. */
1880 info->type = ADDRESS_CONST_INT;
1881 return !TARGET_MIPS16 && SMALL_INT (x);
1886 info->type = ADDRESS_SYMBOLIC;
1887 return (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_MEM,
1889 && mips_symbol_insns (info->symbol_type, mode) > 0
1890 && !mips_split_p[info->symbol_type]);
1897 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
1898 returns a nonzero value if X is a legitimate address for a memory
1899 operand of the indicated MODE. STRICT is nonzero if this function
1900 is called during reload. */
1903 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1905 struct mips_address_info addr;
1907 return mips_classify_address (&addr, x, mode, strict);
1910 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1913 mips_stack_address_p (rtx x, enum machine_mode mode)
1915 struct mips_address_info addr;
1917 return (mips_classify_address (&addr, x, mode, false)
1918 && addr.type == ADDRESS_REG
1919 && addr.reg == stack_pointer_rtx);
1922 /* Return true if ADDR matches the pattern for the lwxs load scaled indexed
1923 address instruction. */
1926 mips_lwxs_address_p (rtx addr)
1929 && GET_CODE (addr) == PLUS
1930 && REG_P (XEXP (addr, 1)))
1932 rtx offset = XEXP (addr, 0);
1933 if (GET_CODE (offset) == MULT
1934 && REG_P (XEXP (offset, 0))
1935 && GET_CODE (XEXP (offset, 1)) == CONST_INT
1936 && INTVAL (XEXP (offset, 1)) == 4)
1942 /* Return true if a value at OFFSET bytes from BASE can be accessed
1943 using an unextended mips16 instruction. MODE is the mode of the
1946 Usually the offset in an unextended instruction is a 5-bit field.
1947 The offset is unsigned and shifted left once for HIs, twice
1948 for SIs, and so on. An exception is SImode accesses off the
1949 stack pointer, which have an 8-bit immediate field. */
1952 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
1955 && GET_CODE (offset) == CONST_INT
1956 && INTVAL (offset) >= 0
1957 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
1959 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1960 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
1961 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
1967 /* Return the number of instructions needed to load or store a value
1968 of mode MODE at X. Return 0 if X isn't valid for MODE. Assume that
1969 multiword moves may need to be split into word moves if MIGHT_SPLIT_P,
1970 otherwise assume that a single load or store is enough.
1972 For mips16 code, count extended instructions as two instructions. */
1975 mips_address_insns (rtx x, enum machine_mode mode, bool might_split_p)
1977 struct mips_address_info addr;
1980 /* BLKmode is used for single unaligned loads and stores and should
1981 not count as a multiword mode. (GET_MODE_SIZE (BLKmode) is pretty
1982 meaningless, so we have to single it out as a special case one way
1984 if (mode != BLKmode && might_split_p)
1985 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1989 if (mips_classify_address (&addr, x, mode, false))
1994 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
1998 case ADDRESS_LO_SUM:
1999 return (TARGET_MIPS16 ? factor * 2 : factor);
2001 case ADDRESS_CONST_INT:
2004 case ADDRESS_SYMBOLIC:
2005 return factor * mips_symbol_insns (addr.symbol_type, mode);
2011 /* Likewise for constant X. */
2014 mips_const_insns (rtx x)
2016 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2017 enum mips_symbol_type symbol_type;
2020 switch (GET_CODE (x))
2023 if (!mips_symbolic_constant_p (XEXP (x, 0), SYMBOL_CONTEXT_LEA,
2025 || !mips_split_p[symbol_type])
2028 /* This is simply an lui for normal mode. It is an extended
2029 "li" followed by an extended "sll" for MIPS16. */
2030 return TARGET_MIPS16 ? 4 : 1;
2034 /* Unsigned 8-bit constants can be loaded using an unextended
2035 LI instruction. Unsigned 16-bit constants can be loaded
2036 using an extended LI. Negative constants must be loaded
2037 using LI and then negated. */
2038 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
2039 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2040 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
2041 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2044 return mips_build_integer (codes, INTVAL (x));
2048 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
2054 /* See if we can refer to X directly. */
2055 if (mips_symbolic_constant_p (x, SYMBOL_CONTEXT_LEA, &symbol_type))
2056 return mips_symbol_insns (symbol_type, MAX_MACHINE_MODE);
2058 /* Otherwise try splitting the constant into a base and offset.
2059 16-bit offsets can be added using an extra addiu. Larger offsets
2060 must be calculated separately and then added to the base. */
2061 split_const (x, &x, &offset);
2064 int n = mips_const_insns (x);
2067 if (SMALL_INT (offset))
2070 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2077 return mips_symbol_insns (mips_classify_symbol (x, SYMBOL_CONTEXT_LEA),
2086 /* Return the number of instructions needed to implement INSN,
2087 given that it loads from or stores to MEM. Count extended
2088 mips16 instructions as two instructions. */
2091 mips_load_store_insns (rtx mem, rtx insn)
2093 enum machine_mode mode;
2097 gcc_assert (MEM_P (mem));
2098 mode = GET_MODE (mem);
2100 /* Try to prove that INSN does not need to be split. */
2101 might_split_p = true;
2102 if (GET_MODE_BITSIZE (mode) == 64)
2104 set = single_set (insn);
2105 if (set && !mips_split_64bit_move_p (SET_DEST (set), SET_SRC (set)))
2106 might_split_p = false;
2109 return mips_address_insns (XEXP (mem, 0), mode, might_split_p);
2113 /* Return the number of instructions needed for an integer division. */
2116 mips_idiv_insns (void)
2121 if (TARGET_CHECK_ZERO_DIV)
2123 if (GENERATE_DIVIDE_TRAPS)
2129 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2134 /* Emit a move from SRC to DEST. Assume that the move expanders can
2135 handle all moves if !can_create_pseudo_p (). The distinction is
2136 important because, unlike emit_move_insn, the move expanders know
2137 how to force Pmode objects into the constant pool even when the
2138 constant pool address is not itself legitimate. */
2141 mips_emit_move (rtx dest, rtx src)
2143 return (can_create_pseudo_p ()
2144 ? emit_move_insn (dest, src)
2145 : emit_move_insn_1 (dest, src));
2148 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
2151 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
2153 emit_insn (gen_rtx_SET (VOIDmode, target,
2154 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
2157 /* Copy VALUE to a register and return that register. If new psuedos
2158 are allowed, copy it into a new register, otherwise use DEST. */
2161 mips_force_temporary (rtx dest, rtx value)
2163 if (can_create_pseudo_p ())
2164 return force_reg (Pmode, value);
2167 mips_emit_move (copy_rtx (dest), value);
2172 /* If we can access small data directly (using gp-relative relocation
2173 operators) return the small data pointer, otherwise return null.
2175 For each mips16 function which refers to GP relative symbols, we
2176 use a pseudo register, initialized at the start of the function, to
2177 hold the $gp value. */
2180 mips16_gp_pseudo_reg (void)
2182 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
2183 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
2185 /* Don't initialize the pseudo register if we are being called from
2186 the tree optimizers' cost-calculation routines. */
2187 if (!cfun->machine->initialized_mips16_gp_pseudo_p
2188 && (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl))
2192 /* We want to initialize this to a value which gcc will believe
2194 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
2196 push_topmost_sequence ();
2197 /* We need to emit the initialization after the FUNCTION_BEG
2198 note, so that it will be integrated. */
2199 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
2201 && NOTE_KIND (scan) == NOTE_INSN_FUNCTION_BEG)
2203 if (scan == NULL_RTX)
2204 scan = get_insns ();
2205 insn = emit_insn_after (insn, scan);
2206 pop_topmost_sequence ();
2208 cfun->machine->initialized_mips16_gp_pseudo_p = true;
2211 return cfun->machine->mips16_gp_pseudo_rtx;
2214 /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
2215 it appears in a MEM of that mode. Return true if ADDR is a legitimate
2216 constant in that context and can be split into a high part and a LO_SUM.
2217 If so, and if LO_SUM_OUT is nonnull, emit the high part and return
2218 the LO_SUM in *LO_SUM_OUT. Leave *LO_SUM_OUT unchanged otherwise.
2220 TEMP is as for mips_force_temporary and is used to load the high
2221 part into a register. */
2224 mips_split_symbol (rtx temp, rtx addr, enum machine_mode mode, rtx *lo_sum_out)
2226 enum mips_symbol_context context;
2227 enum mips_symbol_type symbol_type;
2230 context = (mode == MAX_MACHINE_MODE
2231 ? SYMBOL_CONTEXT_LEA
2232 : SYMBOL_CONTEXT_MEM);
2233 if (!mips_symbolic_constant_p (addr, context, &symbol_type)
2234 || mips_symbol_insns (symbol_type, mode) == 0
2235 || !mips_split_p[symbol_type])
2240 if (symbol_type == SYMBOL_GP_RELATIVE)
2242 if (!can_create_pseudo_p ())
2244 emit_insn (gen_load_const_gp (copy_rtx (temp)));
2248 high = mips16_gp_pseudo_reg ();
2252 high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
2253 high = mips_force_temporary (temp, high);
2255 *lo_sum_out = gen_rtx_LO_SUM (Pmode, high, addr);
2261 /* Wrap symbol or label BASE in an unspec address of type SYMBOL_TYPE
2262 and add CONST_INT OFFSET to the result. */
2265 mips_unspec_address_offset (rtx base, rtx offset,
2266 enum mips_symbol_type symbol_type)
2268 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2269 UNSPEC_ADDRESS_FIRST + symbol_type);
2270 if (offset != const0_rtx)
2271 base = gen_rtx_PLUS (Pmode, base, offset);
2272 return gen_rtx_CONST (Pmode, base);
2275 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2276 type SYMBOL_TYPE. */
2279 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2283 split_const (address, &base, &offset);
2284 return mips_unspec_address_offset (base, offset, symbol_type);
2288 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2289 high part to BASE and return the result. Just return BASE otherwise.
2290 TEMP is available as a temporary register if needed.
2292 The returned expression can be used as the first operand to a LO_SUM. */
2295 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2296 enum mips_symbol_type symbol_type)
2298 if (mips_split_p[symbol_type])
2300 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2301 addr = mips_force_temporary (temp, addr);
2302 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2308 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2309 mips_force_temporary; it is only needed when OFFSET is not a
2313 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2315 if (!SMALL_OPERAND (offset))
2320 /* Load the full offset into a register so that we can use
2321 an unextended instruction for the address itself. */
2322 high = GEN_INT (offset);
2327 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2328 high = GEN_INT (CONST_HIGH_PART (offset));
2329 offset = CONST_LOW_PART (offset);
2331 high = mips_force_temporary (temp, high);
2332 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2334 return plus_constant (reg, offset);
2337 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
2338 referencing, and TYPE is the symbol type to use (either global
2339 dynamic or local dynamic). V0 is an RTX for the return value
2340 location. The entire insn sequence is returned. */
2342 static GTY(()) rtx mips_tls_symbol;
2345 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2347 rtx insn, loc, tga, a0;
2349 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2351 if (!mips_tls_symbol)
2352 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2354 loc = mips_unspec_address (sym, type);
2358 emit_insn (gen_rtx_SET (Pmode, a0,
2359 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2360 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
2361 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
2362 CONST_OR_PURE_CALL_P (insn) = 1;
2363 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
2364 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2365 insn = get_insns ();
2372 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2373 return value will be a valid address and move_operand (either a REG
2377 mips_legitimize_tls_address (rtx loc)
2379 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2380 enum tls_model model;
2384 sorry ("MIPS16 TLS");
2385 return gen_reg_rtx (Pmode);
2388 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2389 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2391 model = SYMBOL_REF_TLS_MODEL (loc);
2392 /* Only TARGET_ABICALLS code can have more than one module; other
2393 code must be be static and should not use a GOT. All TLS models
2394 reduce to local exec in this situation. */
2395 if (!TARGET_ABICALLS)
2396 model = TLS_MODEL_LOCAL_EXEC;
2400 case TLS_MODEL_GLOBAL_DYNAMIC:
2401 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2402 dest = gen_reg_rtx (Pmode);
2403 emit_libcall_block (insn, dest, v0, loc);
2406 case TLS_MODEL_LOCAL_DYNAMIC:
2407 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2408 tmp1 = gen_reg_rtx (Pmode);
2410 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2411 share the LDM result with other LD model accesses. */
2412 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2414 emit_libcall_block (insn, tmp1, v0, eqv);
2416 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2417 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2418 mips_unspec_address (loc, SYMBOL_DTPREL));
2421 case TLS_MODEL_INITIAL_EXEC:
2422 tmp1 = gen_reg_rtx (Pmode);
2423 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2424 if (Pmode == DImode)
2426 emit_insn (gen_tls_get_tp_di (v1));
2427 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2431 emit_insn (gen_tls_get_tp_si (v1));
2432 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2434 dest = gen_reg_rtx (Pmode);
2435 emit_insn (gen_add3_insn (dest, tmp1, v1));
2438 case TLS_MODEL_LOCAL_EXEC:
2439 if (Pmode == DImode)
2440 emit_insn (gen_tls_get_tp_di (v1));
2442 emit_insn (gen_tls_get_tp_si (v1));
2444 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2445 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2446 mips_unspec_address (loc, SYMBOL_TPREL));
2456 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2457 be legitimized in a way that the generic machinery might not expect,
2458 put the new address in *XLOC and return true. MODE is the mode of
2459 the memory being accessed. */
2462 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2464 if (mips_tls_operand_p (*xloc))
2466 *xloc = mips_legitimize_tls_address (*xloc);
2470 /* See if the address can split into a high part and a LO_SUM. */
2471 if (mips_split_symbol (NULL, *xloc, mode, xloc))
2474 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
2476 /* Handle REG + CONSTANT using mips_add_offset. */
2479 reg = XEXP (*xloc, 0);
2480 if (!mips_valid_base_register_p (reg, mode, 0))
2481 reg = copy_to_mode_reg (Pmode, reg);
2482 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
2490 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2493 mips_move_integer (rtx dest, rtx temp, unsigned HOST_WIDE_INT value)
2495 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2496 enum machine_mode mode;
2497 unsigned int i, cost;
2500 mode = GET_MODE (dest);
2501 cost = mips_build_integer (codes, value);
2503 /* Apply each binary operation to X. Invariant: X is a legitimate
2504 source operand for a SET pattern. */
2505 x = GEN_INT (codes[0].value);
2506 for (i = 1; i < cost; i++)
2508 if (!can_create_pseudo_p ())
2510 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2514 x = force_reg (mode, x);
2515 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2518 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2522 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2523 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2527 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2531 /* Split moves of big integers into smaller pieces. */
2532 if (splittable_const_int_operand (src, mode))
2534 mips_move_integer (dest, dest, INTVAL (src));
2538 /* Split moves of symbolic constants into high/low pairs. */
2539 if (mips_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
2541 emit_insn (gen_rtx_SET (VOIDmode, dest, src));
2545 if (mips_tls_operand_p (src))
2547 mips_emit_move (dest, mips_legitimize_tls_address (src));
2551 /* If we have (const (plus symbol offset)), and that expression cannot
2552 be forced into memory, load the symbol first and add in the offset.
2553 In non-MIPS16 mode, prefer to do this even if the constant _can_ be
2554 forced into memory, as it usually produces better code. */
2555 split_const (src, &base, &offset);
2556 if (offset != const0_rtx
2557 && (targetm.cannot_force_const_mem (src)
2558 || (!TARGET_MIPS16 && can_create_pseudo_p ())))
2560 base = mips_force_temporary (dest, base);
2561 mips_emit_move (dest, mips_add_offset (0, base, INTVAL (offset)));
2565 src = force_const_mem (mode, src);
2567 /* When using explicit relocs, constant pool references are sometimes
2568 not legitimate addresses. */
2569 mips_split_symbol (dest, XEXP (src, 0), mode, &XEXP (src, 0));
2570 mips_emit_move (dest, src);
2574 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2575 sequence that is valid. */
2578 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2580 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2582 mips_emit_move (dest, force_reg (mode, src));
2586 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2587 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2588 && REG_P (src) && MD_REG_P (REGNO (src))
2589 && REG_P (dest) && GP_REG_P (REGNO (dest)))
2591 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2592 if (GET_MODE_SIZE (mode) <= 4)
2593 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
2594 gen_rtx_REG (SImode, REGNO (src)),
2595 gen_rtx_REG (SImode, other_regno)));
2597 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
2598 gen_rtx_REG (DImode, REGNO (src)),
2599 gen_rtx_REG (DImode, other_regno)));
2603 /* We need to deal with constants that would be legitimate
2604 immediate_operands but not legitimate move_operands. */
2605 if (CONSTANT_P (src) && !move_operand (src, mode))
2607 mips_legitimize_const_move (mode, dest, src);
2608 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2614 /* Return true if X in context CONTEXT is a small data address that can
2615 be rewritten as a LO_SUM. */
2618 mips_rewrite_small_data_p (rtx x, enum mips_symbol_context context)
2620 enum mips_symbol_type symbol_type;
2622 return (TARGET_EXPLICIT_RELOCS
2623 && mips_symbolic_constant_p (x, context, &symbol_type)
2624 && symbol_type == SYMBOL_GP_RELATIVE);
2628 /* A for_each_rtx callback for mips_small_data_pattern_p. DATA is the
2629 containing MEM, or null if none. */
2632 mips_small_data_pattern_1 (rtx *loc, void *data)
2634 enum mips_symbol_context context;
2636 if (GET_CODE (*loc) == LO_SUM)
2641 if (for_each_rtx (&XEXP (*loc, 0), mips_small_data_pattern_1, *loc))
2646 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
2647 return mips_rewrite_small_data_p (*loc, context);
2650 /* Return true if OP refers to small data symbols directly, not through
2654 mips_small_data_pattern_p (rtx op)
2656 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
2659 /* A for_each_rtx callback, used by mips_rewrite_small_data.
2660 DATA is the containing MEM, or null if none. */
2663 mips_rewrite_small_data_1 (rtx *loc, void *data)
2665 enum mips_symbol_context context;
2669 for_each_rtx (&XEXP (*loc, 0), mips_rewrite_small_data_1, *loc);
2673 context = data ? SYMBOL_CONTEXT_MEM : SYMBOL_CONTEXT_LEA;
2674 if (mips_rewrite_small_data_p (*loc, context))
2675 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
2677 if (GET_CODE (*loc) == LO_SUM)
2683 /* If possible, rewrite OP so that it refers to small data using
2684 explicit relocations. */
2687 mips_rewrite_small_data (rtx op)
2689 op = copy_insn (op);
2690 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
2694 /* We need a lot of little routines to check constant values on the
2695 mips16. These are used to figure out how long the instruction will
2696 be. It would be much better to do this using constraints, but
2697 there aren't nearly enough letters available. */
2700 m16_check_op (rtx op, int low, int high, int mask)
2702 return (GET_CODE (op) == CONST_INT
2703 && INTVAL (op) >= low
2704 && INTVAL (op) <= high
2705 && (INTVAL (op) & mask) == 0);
2709 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2711 return m16_check_op (op, 0x1, 0x8, 0);
2715 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2717 return m16_check_op (op, - 0x8, 0x7, 0);
2721 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2723 return m16_check_op (op, - 0x7, 0x8, 0);
2727 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2729 return m16_check_op (op, - 0x10, 0xf, 0);
2733 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2735 return m16_check_op (op, - 0xf, 0x10, 0);
2739 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2741 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2745 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2747 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2751 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2753 return m16_check_op (op, - 0x80, 0x7f, 0);
2757 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2759 return m16_check_op (op, - 0x7f, 0x80, 0);
2763 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2765 return m16_check_op (op, 0x0, 0xff, 0);
2769 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2771 return m16_check_op (op, - 0xff, 0x0, 0);
2775 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2777 return m16_check_op (op, - 0x1, 0xfe, 0);
2781 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2783 return m16_check_op (op, 0x0, 0xff << 2, 3);
2787 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2789 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2793 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2795 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2799 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2801 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2804 /* The cost of loading values from the constant pool. It should be
2805 larger than the cost of any constant we want to synthesize inline. */
2807 #define CONSTANT_POOL_COST COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 8)
2809 /* Return the cost of X when used as an operand to the MIPS16 instruction
2810 that implements CODE. Return -1 if there is no such instruction, or if
2811 X is not a valid immediate operand for it. */
2814 mips16_constant_cost (int code, HOST_WIDE_INT x)
2821 /* Shifts by between 1 and 8 bits (inclusive) are unextended,
2822 other shifts are extended. The shift patterns truncate the shift
2823 count to the right size, so there are no out-of-range values. */
2824 if (IN_RANGE (x, 1, 8))
2826 return COSTS_N_INSNS (1);
2829 if (IN_RANGE (x, -128, 127))
2831 if (SMALL_OPERAND (x))
2832 return COSTS_N_INSNS (1);
2836 /* Like LE, but reject the always-true case. */
2840 /* We add 1 to the immediate and use SLT. */
2843 /* We can use CMPI for an xor with an unsigned 16-bit X. */
2846 if (IN_RANGE (x, 0, 255))
2848 if (SMALL_OPERAND_UNSIGNED (x))
2849 return COSTS_N_INSNS (1);
2854 /* Equality comparisons with 0 are cheap. */
2864 /* Return true if there is a non-MIPS16 instruction that implements CODE
2865 and if that instruction accepts X as an immediate operand. */
2868 mips_immediate_operand_p (int code, HOST_WIDE_INT x)
2875 /* All shift counts are truncated to a valid constant. */
2880 /* Likewise rotates, if the target supports rotates at all. */
2886 /* These instructions take 16-bit unsigned immediates. */
2887 return SMALL_OPERAND_UNSIGNED (x);
2892 /* These instructions take 16-bit signed immediates. */
2893 return SMALL_OPERAND (x);
2899 /* The "immediate" forms of these instructions are really
2900 implemented as comparisons with register 0. */
2905 /* Likewise, meaning that the only valid immediate operand is 1. */
2909 /* We add 1 to the immediate and use SLT. */
2910 return SMALL_OPERAND (x + 1);
2913 /* Likewise SLTU, but reject the always-true case. */
2914 return SMALL_OPERAND (x + 1) && x + 1 != 0;
2918 /* The bit position and size are immediate operands. */
2919 return ISA_HAS_EXT_INS;
2922 /* By default assume that $0 can be used for 0. */
2927 /* Return the cost of binary operation X, given that the instruction
2928 sequence for a word-sized or smaller operation has cost SINGLE_COST
2929 and that the sequence of a double-word operation has cost DOUBLE_COST. */
2932 mips_binary_cost (rtx x, int single_cost, int double_cost)
2936 if (GET_MODE_SIZE (GET_MODE (x)) == UNITS_PER_WORD * 2)
2941 + rtx_cost (XEXP (x, 0), 0)
2942 + rtx_cost (XEXP (x, 1), GET_CODE (x)));
2945 /* Return the cost of floating-point multiplications of mode MODE. */
2948 mips_fp_mult_cost (enum machine_mode mode)
2950 return mode == DFmode ? mips_cost->fp_mult_df : mips_cost->fp_mult_sf;
2953 /* Return the cost of floating-point divisions of mode MODE. */
2956 mips_fp_div_cost (enum machine_mode mode)
2958 return mode == DFmode ? mips_cost->fp_div_df : mips_cost->fp_div_sf;
2961 /* Return the cost of sign-extending OP to mode MODE, not including the
2962 cost of OP itself. */
2965 mips_sign_extend_cost (enum machine_mode mode, rtx op)
2968 /* Extended loads are as cheap as unextended ones. */
2971 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
2972 /* A sign extension from SImode to DImode in 64-bit mode is free. */
2975 if (ISA_HAS_SEB_SEH || GENERATE_MIPS16E)
2976 /* We can use SEB or SEH. */
2977 return COSTS_N_INSNS (1);
2979 /* We need to use a shift left and a shift right. */
2980 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
2983 /* Return the cost of zero-extending OP to mode MODE, not including the
2984 cost of OP itself. */
2987 mips_zero_extend_cost (enum machine_mode mode, rtx op)
2990 /* Extended loads are as cheap as unextended ones. */
2993 if (TARGET_64BIT && mode == DImode && GET_MODE (op) == SImode)
2994 /* We need a shift left by 32 bits and a shift right by 32 bits. */
2995 return COSTS_N_INSNS (TARGET_MIPS16 ? 4 : 2);
2997 if (GENERATE_MIPS16E)
2998 /* We can use ZEB or ZEH. */
2999 return COSTS_N_INSNS (1);
3002 /* We need to load 0xff or 0xffff into a register and use AND. */
3003 return COSTS_N_INSNS (GET_MODE (op) == QImode ? 2 : 3);
3005 /* We can use ANDI. */
3006 return COSTS_N_INSNS (1);
3009 /* Implement TARGET_RTX_COSTS. */
3012 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
3014 enum machine_mode mode = GET_MODE (x);
3015 bool float_mode_p = FLOAT_MODE_P (mode);
3019 /* The cost of a COMPARE is hard to define for MIPS. COMPAREs don't
3020 appear in the instruction stream, and the cost of a comparison is
3021 really the cost of the branch or scc condition. At the time of
3022 writing, gcc only uses an explicit outer COMPARE code when optabs
3023 is testing whether a constant is expensive enough to force into a
3024 register. We want optabs to pass such constants through the MIPS
3025 expanders instead, so make all constants very cheap here. */
3026 if (outer_code == COMPARE)
3028 gcc_assert (CONSTANT_P (x));
3036 /* Treat *clear_upper32-style ANDs as having zero cost in the
3037 second operand. The cost is entirely in the first operand.
3039 ??? This is needed because we would otherwise try to CSE
3040 the constant operand. Although that's the right thing for
3041 instructions that continue to be a register operation throughout
3042 compilation, it is disastrous for instructions that could
3043 later be converted into a memory operation. */
3045 && outer_code == AND
3046 && UINTVAL (x) == 0xffffffff)
3054 cost = mips16_constant_cost (outer_code, INTVAL (x));
3063 /* When not optimizing for size, we care more about the cost
3064 of hot code, and hot code is often in a loop. If a constant
3065 operand needs to be forced into a register, we will often be
3066 able to hoist the constant load out of the loop, so the load
3067 should not contribute to the cost. */
3069 || mips_immediate_operand_p (outer_code, INTVAL (x)))
3081 if (force_to_mem_operand (x, VOIDmode))
3083 *total = COSTS_N_INSNS (1);
3086 cost = mips_const_insns (x);
3089 /* If the constant is likely to be stored in a GPR, SETs of
3090 single-insn constants are as cheap as register sets; we
3091 never want to CSE them.
3093 Don't reduce the cost of storing a floating-point zero in
3094 FPRs. If we have a zero in an FPR for other reasons, we
3095 can get better cfg-cleanup and delayed-branch results by
3096 using it consistently, rather than using $0 sometimes and
3097 an FPR at other times. Also, moves between floating-point
3098 registers are sometimes cheaper than (D)MTC1 $0. */
3100 && outer_code == SET
3101 && !(float_mode_p && TARGET_HARD_FLOAT))
3103 /* When non-MIPS16 code loads a constant N>1 times, we rarely
3104 want to CSE the constant itself. It is usually better to
3105 have N copies of the last operation in the sequence and one
3106 shared copy of the other operations. (Note that this is
3107 not true for MIPS16 code, where the final operation in the
3108 sequence is often an extended instruction.)
3110 Also, if we have a CONST_INT, we don't know whether it is
3111 for a word or doubleword operation, so we cannot rely on
3112 the result of mips_build_integer. */
3113 else if (!TARGET_MIPS16
3114 && (outer_code == SET || mode == VOIDmode))
3116 *total = COSTS_N_INSNS (cost);
3119 /* The value will need to be fetched from the constant pool. */
3120 *total = CONSTANT_POOL_COST;
3124 /* If the address is legitimate, return the number of
3125 instructions it needs. */
3127 cost = mips_address_insns (addr, mode, true);
3130 *total = COSTS_N_INSNS (cost + 1);
3133 /* Check for a scaled indexed address. */
3134 if (mips_lwxs_address_p (addr))
3136 *total = COSTS_N_INSNS (2);
3139 /* Otherwise use the default handling. */
3143 *total = COSTS_N_INSNS (6);
3147 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 2 : 1);
3151 /* Check for a *clear_upper32 pattern and treat it like a zero
3152 extension. See the pattern's comment for details. */
3155 && CONST_INT_P (XEXP (x, 1))
3156 && UINTVAL (XEXP (x, 1)) == 0xffffffff)
3158 *total = (mips_zero_extend_cost (mode, XEXP (x, 0))
3159 + rtx_cost (XEXP (x, 0), 0));
3166 /* Double-word operations use two single-word operations. */
3167 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2));
3175 if (CONSTANT_P (XEXP (x, 1)))
3176 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3178 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (12));
3183 *total = mips_cost->fp_add;
3185 *total = COSTS_N_INSNS (4);
3189 /* Low-part immediates need an extended MIPS16 instruction. */
3190 *total = (COSTS_N_INSNS (TARGET_MIPS16 ? 2 : 1)
3191 + rtx_cost (XEXP (x, 0), 0));
3206 /* Branch comparisons have VOIDmode, so use the first operand's
3208 mode = GET_MODE (XEXP (x, 0));
3209 if (FLOAT_MODE_P (mode))
3211 *total = mips_cost->fp_add;
3214 *total = mips_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (4));
3219 && ISA_HAS_NMADD_NMSUB
3220 && TARGET_FUSED_MADD
3221 && !HONOR_NANS (mode)
3222 && !HONOR_SIGNED_ZEROS (mode))
3224 /* See if we can use NMADD or NMSUB. See mips.md for the
3225 associated patterns. */
3226 rtx op0 = XEXP (x, 0);
3227 rtx op1 = XEXP (x, 1);
3228 if (GET_CODE (op0) == MULT && GET_CODE (XEXP (op0, 0)) == NEG)
3230 *total = (mips_fp_mult_cost (mode)
3231 + rtx_cost (XEXP (XEXP (op0, 0), 0), 0)
3232 + rtx_cost (XEXP (op0, 1), 0)
3233 + rtx_cost (op1, 0));
3236 if (GET_CODE (op1) == MULT)
3238 *total = (mips_fp_mult_cost (mode)
3240 + rtx_cost (XEXP (op1, 0), 0)
3241 + rtx_cost (XEXP (op1, 1), 0));
3251 && TARGET_FUSED_MADD
3252 && GET_CODE (XEXP (x, 0)) == MULT)
3255 *total = mips_cost->fp_add;
3259 /* Double-word operations require three single-word operations and
3260 an SLTU. The MIPS16 version then needs to move the result of
3261 the SLTU from $24 to a MIPS16 register. */
3262 *total = mips_binary_cost (x, COSTS_N_INSNS (1),
3263 COSTS_N_INSNS (TARGET_MIPS16 ? 5 : 4));
3268 && ISA_HAS_NMADD_NMSUB
3269 && TARGET_FUSED_MADD
3270 && !HONOR_NANS (mode)
3271 && HONOR_SIGNED_ZEROS (mode))
3273 /* See if we can use NMADD or NMSUB. See mips.md for the
3274 associated patterns. */
3275 rtx op = XEXP (x, 0);
3276 if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
3277 && GET_CODE (XEXP (op, 0)) == MULT)
3279 *total = (mips_fp_mult_cost (mode)
3280 + rtx_cost (XEXP (XEXP (op, 0), 0), 0)
3281 + rtx_cost (XEXP (XEXP (op, 0), 1), 0)
3282 + rtx_cost (XEXP (op, 1), 0));
3288 *total = mips_cost->fp_add;
3290 *total = COSTS_N_INSNS (GET_MODE_SIZE (mode) > UNITS_PER_WORD ? 4 : 1);
3295 *total = mips_fp_mult_cost (mode);
3296 else if (mode == DImode && !TARGET_64BIT)
3297 /* Synthesized from 2 mulsi3s, 1 mulsidi3 and two additions,
3298 where the mulsidi3 always includes an MFHI and an MFLO. */
3299 *total = (optimize_size
3300 ? COSTS_N_INSNS (ISA_HAS_MUL3 ? 7 : 9)
3301 : mips_cost->int_mult_si * 3 + 6);
3302 else if (optimize_size)
3303 *total = (ISA_HAS_MUL3 ? 1 : 2);
3304 else if (mode == DImode)
3305 *total = mips_cost->int_mult_di;
3307 *total = mips_cost->int_mult_si;
3311 /* Check for a reciprocal. */
3312 if (float_mode_p && XEXP (x, 0) == CONST1_RTX (mode))
3315 && flag_unsafe_math_optimizations
3316 && (outer_code == SQRT || GET_CODE (XEXP (x, 1)) == SQRT))
3318 /* An rsqrt<mode>a or rsqrt<mode>b pattern. Count the
3319 division as being free. */
3320 *total = rtx_cost (XEXP (x, 1), 0);
3325 *total = mips_fp_div_cost (mode) + rtx_cost (XEXP (x, 1), 0);
3335 *total = mips_fp_div_cost (mode);
3344 /* It is our responsibility to make division by a power of 2
3345 as cheap as 2 register additions if we want the division
3346 expanders to be used for such operations; see the setting
3347 of sdiv_pow2_cheap in optabs.c. Using (D)DIV for MIPS16
3348 should always produce shorter code than using
3349 expand_sdiv2_pow2. */
3351 && CONST_INT_P (XEXP (x, 1))
3352 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
3354 *total = COSTS_N_INSNS (2) + rtx_cost (XEXP (x, 0), 0);
3357 *total = COSTS_N_INSNS (mips_idiv_insns ());
3359 else if (mode == DImode)
3360 *total = mips_cost->int_div_di;
3362 *total = mips_cost->int_div_si;
3366 *total = mips_sign_extend_cost (mode, XEXP (x, 0));
3370 *total = mips_zero_extend_cost (mode, XEXP (x, 0));
3374 case UNSIGNED_FLOAT:
3377 case FLOAT_TRUNCATE:
3378 *total = mips_cost->fp_add;
3386 /* Provide the costs of an addressing mode that contains ADDR.
3387 If ADDR is not a valid address, its cost is irrelevant. */
3390 mips_address_cost (rtx addr)
3392 return mips_address_insns (addr, SImode, false);
3395 /* Return one word of double-word value OP, taking into account the fixed
3396 endianness of certain registers. HIGH_P is true to select the high part,
3397 false to select the low part. */
3400 mips_subword (rtx op, int high_p)
3402 unsigned int byte, offset;
3403 enum machine_mode mode;
3405 mode = GET_MODE (op);
3406 if (mode == VOIDmode)
3409 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
3410 byte = UNITS_PER_WORD;
3414 if (FP_REG_RTX_P (op))
3416 /* Paired FPRs are always ordered little-endian. */
3417 offset = (UNITS_PER_WORD < UNITS_PER_HWFPVALUE ? high_p : byte != 0);
3418 return gen_rtx_REG (word_mode, REGNO (op) + offset);
3422 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
3424 return simplify_gen_subreg (word_mode, op, mode, byte);
3428 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3431 mips_split_64bit_move_p (rtx dest, rtx src)
3436 /* FP->FP moves can be done in a single instruction. */
3437 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
3440 /* Check for floating-point loads and stores. They can be done using
3441 ldc1 and sdc1 on MIPS II and above. */
3444 if (FP_REG_RTX_P (dest) && MEM_P (src))
3446 if (FP_REG_RTX_P (src) && MEM_P (dest))
3453 /* Split a doubleword move from SRC to DEST. On 32-bit targets,
3454 this function handles 64-bit moves for which mips_split_64bit_move_p
3455 holds. For 64-bit targets, this function handles 128-bit moves. */
3458 mips_split_doubleword_move (rtx dest, rtx src)
3460 if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src))
3462 if (!TARGET_64BIT && GET_MODE (dest) == DImode)
3463 emit_insn (gen_move_doubleword_fprdi (dest, src));
3464 else if (!TARGET_64BIT && GET_MODE (dest) == DFmode)
3465 emit_insn (gen_move_doubleword_fprdf (dest, src));
3466 else if (TARGET_64BIT && GET_MODE (dest) == TFmode)
3467 emit_insn (gen_move_doubleword_fprtf (dest, src));
3473 /* The operation can be split into two normal moves. Decide in
3474 which order to do them. */
3477 low_dest = mips_subword (dest, 0);
3478 if (REG_P (low_dest)
3479 && reg_overlap_mentioned_p (low_dest, src))
3481 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3482 mips_emit_move (low_dest, mips_subword (src, 0));
3486 mips_emit_move (low_dest, mips_subword (src, 0));
3487 mips_emit_move (mips_subword (dest, 1), mips_subword (src, 1));
3492 /* Return the appropriate instructions to move SRC into DEST. Assume
3493 that SRC is operand 1 and DEST is operand 0. */
3496 mips_output_move (rtx dest, rtx src)
3498 enum rtx_code dest_code, src_code;
3499 enum mips_symbol_type symbol_type;
3502 dest_code = GET_CODE (dest);
3503 src_code = GET_CODE (src);
3504 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
3506 if (dbl_p && mips_split_64bit_move_p (dest, src))
3509 if ((src_code == REG && GP_REG_P (REGNO (src)))
3510 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
3512 if (dest_code == REG)
3514 if (GP_REG_P (REGNO (dest)))
3515 return "move\t%0,%z1";
3517 if (MD_REG_P (REGNO (dest)))
3520 if (DSP_ACC_REG_P (REGNO (dest)))
3522 static char retval[] = "mt__\t%z1,%q0";
3523 retval[2] = reg_names[REGNO (dest)][4];
3524 retval[3] = reg_names[REGNO (dest)][5];
3528 if (FP_REG_P (REGNO (dest)))
3529 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
3531 if (ALL_COP_REG_P (REGNO (dest)))
3533 static char retval[] = "dmtc_\t%z1,%0";
3535 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3536 return (dbl_p ? retval : retval + 1);
3539 if (dest_code == MEM)
3540 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
3542 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3544 if (src_code == REG)
3546 if (DSP_ACC_REG_P (REGNO (src)))
3548 static char retval[] = "mf__\t%0,%q1";
3549 retval[2] = reg_names[REGNO (src)][4];
3550 retval[3] = reg_names[REGNO (src)][5];
3554 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3555 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3557 if (FP_REG_P (REGNO (src)))
3558 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
3560 if (ALL_COP_REG_P (REGNO (src)))
3562 static char retval[] = "dmfc_\t%0,%1";
3564 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3565 return (dbl_p ? retval : retval + 1);
3569 if (src_code == MEM)
3570 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
3572 if (src_code == CONST_INT)
3574 /* Don't use the X format, because that will give out of
3575 range numbers for 64-bit hosts and 32-bit targets. */
3577 return "li\t%0,%1\t\t\t# %X1";
3579 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
3582 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
3586 if (src_code == HIGH)
3587 return TARGET_MIPS16 ? "#" : "lui\t%0,%h1";
3589 if (CONST_GP_P (src))
3590 return "move\t%0,%1";
3592 if (mips_symbolic_constant_p (src, SYMBOL_CONTEXT_LEA, &symbol_type)
3593 && mips_lo_relocs[symbol_type] != 0)
3595 /* A signed 16-bit constant formed by applying a relocation
3596 operator to a symbolic address. */
3597 gcc_assert (!mips_split_p[symbol_type]);
3598 return "li\t%0,%R1";
3601 if (symbolic_operand (src, VOIDmode))
3603 gcc_assert (TARGET_MIPS16
3604 ? TARGET_MIPS16_TEXT_LOADS
3605 : !TARGET_EXPLICIT_RELOCS);
3606 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
3609 if (src_code == REG && FP_REG_P (REGNO (src)))
3611 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3613 if (GET_MODE (dest) == V2SFmode)
3614 return "mov.ps\t%0,%1";
3616 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
3619 if (dest_code == MEM)
3620 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
3622 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3624 if (src_code == MEM)
3625 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
3627 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3629 static char retval[] = "l_c_\t%0,%1";
3631 retval[1] = (dbl_p ? 'd' : 'w');
3632 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3635 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
3637 static char retval[] = "s_c_\t%1,%0";
3639 retval[1] = (dbl_p ? 'd' : 'w');
3640 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3646 /* Return true if CMP1 is a suitable second operand for relational
3647 operator CODE. See also the *sCC patterns in mips.md. */
3650 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
3656 return reg_or_0_operand (cmp1, VOIDmode);
3660 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3664 return arith_operand (cmp1, VOIDmode);
3667 return sle_operand (cmp1, VOIDmode);
3670 return sleu_operand (cmp1, VOIDmode);
3677 /* Canonicalize LE or LEU comparisons into LT comparisons when
3678 possible to avoid extra instructions or inverting the
3682 mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
3683 enum machine_mode mode)
3685 HOST_WIDE_INT original, plus_one;
3687 if (GET_CODE (*cmp1) != CONST_INT)
3690 original = INTVAL (*cmp1);
3691 plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
3696 if (original < plus_one)
3699 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3708 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3721 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3722 result in TARGET. CMP0 and TARGET are register_operands that have
3723 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3724 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3727 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
3728 rtx target, rtx cmp0, rtx cmp1)
3730 /* First see if there is a MIPS instruction that can do this operation
3731 with CMP1 in its current form. If not, try to canonicalize the
3732 comparison to LT. If that fails, try doing the same for the
3733 inverse operation. If that also fails, force CMP1 into a register
3735 if (mips_relational_operand_ok_p (code, cmp1))
3736 mips_emit_binary (code, target, cmp0, cmp1);
3737 else if (mips_canonicalize_comparison (&code, &cmp1, GET_MODE (target)))
3738 mips_emit_binary (code, target, cmp0, cmp1);
3741 enum rtx_code inv_code = reverse_condition (code);
3742 if (!mips_relational_operand_ok_p (inv_code, cmp1))
3744 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
3745 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
3747 else if (invert_ptr == 0)
3749 rtx inv_target = gen_reg_rtx (GET_MODE (target));
3750 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3751 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3755 *invert_ptr = !*invert_ptr;
3756 mips_emit_binary (inv_code, target, cmp0, cmp1);
3761 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3762 The register will have the same mode as CMP0. */
3765 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3767 if (cmp1 == const0_rtx)
3770 if (uns_arith_operand (cmp1, VOIDmode))
3771 return expand_binop (GET_MODE (cmp0), xor_optab,
3772 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3774 return expand_binop (GET_MODE (cmp0), sub_optab,
3775 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3778 /* Convert *CODE into a code that can be used in a floating-point
3779 scc instruction (c.<cond>.<fmt>). Return true if the values of
3780 the condition code registers will be inverted, with 0 indicating
3781 that the condition holds. */
3784 mips_reverse_fp_cond_p (enum rtx_code *code)
3791 *code = reverse_condition_maybe_unordered (*code);
3799 /* Convert a comparison into something that can be used in a branch or
3800 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3801 being compared and *CODE is the code used to compare them.
3803 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3804 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3805 otherwise any standard branch condition can be used. The standard branch
3808 - EQ/NE between two registers.
3809 - any comparison between a register and zero. */
3812 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3814 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3816 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3818 *op0 = cmp_operands[0];
3819 *op1 = cmp_operands[1];
3821 else if (*code == EQ || *code == NE)
3825 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3830 *op0 = cmp_operands[0];
3831 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3836 /* The comparison needs a separate scc instruction. Store the
3837 result of the scc in *OP0 and compare it against zero. */
3838 bool invert = false;
3839 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3841 mips_emit_int_relational (*code, &invert, *op0,
3842 cmp_operands[0], cmp_operands[1]);
3843 *code = (invert ? EQ : NE);
3846 else if (ALL_FIXED_POINT_MODE_P (GET_MODE (cmp_operands[0])))
3848 *op0 = gen_rtx_REG (CCDSPmode, CCDSP_CC_REGNUM);
3849 mips_emit_binary (*code, *op0, cmp_operands[0], cmp_operands[1]);
3855 enum rtx_code cmp_code;
3857 /* Floating-point tests use a separate c.cond.fmt comparison to
3858 set a condition code register. The branch or conditional move
3859 will then compare that register against zero.
3861 Set CMP_CODE to the code of the comparison instruction and
3862 *CODE to the code that the branch or move should use. */
3864 *code = mips_reverse_fp_cond_p (&cmp_code) ? EQ : NE;
3866 ? gen_reg_rtx (CCmode)
3867 : gen_rtx_REG (CCmode, FPSW_REGNUM));
3869 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
3873 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
3874 Store the result in TARGET and return true if successful.
3876 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
3879 mips_emit_scc (enum rtx_code code, rtx target)
3881 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
3884 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
3885 if (code == EQ || code == NE)
3887 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3888 mips_emit_binary (code, target, zie, const0_rtx);
3891 mips_emit_int_relational (code, 0, target,
3892 cmp_operands[0], cmp_operands[1]);
3896 /* Emit the common code for doing conditional branches.
3897 operand[0] is the label to jump to.
3898 The comparison operands are saved away by cmp{si,di,sf,df}. */
3901 gen_conditional_branch (rtx *operands, enum rtx_code code)
3903 rtx op0, op1, condition;
3905 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
3906 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
3907 emit_jump_insn (gen_condjump (condition, operands[0]));
3912 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
3913 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
3916 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
3917 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
3922 reversed_p = mips_reverse_fp_cond_p (&cond);
3923 cmp_result = gen_reg_rtx (CCV2mode);
3924 emit_insn (gen_scc_ps (cmp_result,
3925 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
3927 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
3930 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
3934 /* Emit the common code for conditional moves. OPERANDS is the array
3935 of operands passed to the conditional move define_expand. */
3938 gen_conditional_move (rtx *operands)
3943 code = GET_CODE (operands[1]);
3944 mips_emit_compare (&code, &op0, &op1, true);
3945 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
3946 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3947 gen_rtx_fmt_ee (code,
3950 operands[2], operands[3])));
3953 /* Emit a conditional trap. OPERANDS is the array of operands passed to
3954 the conditional_trap expander. */
3957 mips_gen_conditional_trap (rtx *operands)
3960 enum rtx_code cmp_code = GET_CODE (operands[0]);
3961 enum machine_mode mode = GET_MODE (cmp_operands[0]);
3963 /* MIPS conditional trap machine instructions don't have GT or LE
3964 flavors, so we must invert the comparison and convert to LT and
3965 GE, respectively. */
3968 case GT: cmp_code = LT; break;
3969 case LE: cmp_code = GE; break;
3970 case GTU: cmp_code = LTU; break;
3971 case LEU: cmp_code = GEU; break;
3974 if (cmp_code == GET_CODE (operands[0]))
3976 op0 = cmp_operands[0];
3977 op1 = cmp_operands[1];
3981 op0 = cmp_operands[1];
3982 op1 = cmp_operands[0];
3984 op0 = force_reg (mode, op0);
3985 if (!arith_operand (op1, mode))
3986 op1 = force_reg (mode, op1);
3988 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
3989 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
3993 /* Argument support functions. */
3995 /* Initialize CUMULATIVE_ARGS for a function. */
3998 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
3999 rtx libname ATTRIBUTE_UNUSED)
4001 static CUMULATIVE_ARGS zero_cum;
4002 tree param, next_param;
4005 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
4007 /* Determine if this function has variable arguments. This is
4008 indicated by the last argument being 'void_type_mode' if there
4009 are no variable arguments. The standard MIPS calling sequence
4010 passes all arguments in the general purpose registers in this case. */
4012 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
4013 param != 0; param = next_param)
4015 next_param = TREE_CHAIN (param);
4016 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
4017 cum->gp_reg_found = 1;
4022 /* Fill INFO with information about a single argument. CUM is the
4023 cumulative state for earlier arguments. MODE is the mode of this
4024 argument and TYPE is its type (if known). NAMED is true if this
4025 is a named (fixed) argument rather than a variable one. */
4028 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4029 tree type, int named, struct mips_arg_info *info)
4031 bool doubleword_aligned_p;
4032 unsigned int num_bytes, num_words, max_regs;
4034 /* Work out the size of the argument. */
4035 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4036 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
4038 /* Decide whether it should go in a floating-point register, assuming
4039 one is free. Later code checks for availability.
4041 The checks against UNITS_PER_FPVALUE handle the soft-float and
4042 single-float cases. */
4046 /* The EABI conventions have traditionally been defined in terms
4047 of TYPE_MODE, regardless of the actual type. */
4048 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
4049 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4050 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4055 /* Only leading floating-point scalars are passed in
4056 floating-point registers. We also handle vector floats the same
4057 say, which is OK because they are not covered by the standard ABI. */
4058 info->fpr_p = (!cum->gp_reg_found
4059 && cum->arg_number < 2
4060 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
4061 || VECTOR_FLOAT_TYPE_P (type))
4062 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4063 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4064 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4069 /* Scalar and complex floating-point types are passed in
4070 floating-point registers. */
4071 info->fpr_p = (named
4072 && (type == 0 || FLOAT_TYPE_P (type))
4073 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4074 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4075 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4076 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4078 /* ??? According to the ABI documentation, the real and imaginary
4079 parts of complex floats should be passed in individual registers.
4080 The real and imaginary parts of stack arguments are supposed
4081 to be contiguous and there should be an extra word of padding
4084 This has two problems. First, it makes it impossible to use a
4085 single "void *" va_list type, since register and stack arguments
4086 are passed differently. (At the time of writing, MIPSpro cannot
4087 handle complex float varargs correctly.) Second, it's unclear
4088 what should happen when there is only one register free.
4090 For now, we assume that named complex floats should go into FPRs
4091 if there are two FPRs free, otherwise they should be passed in the
4092 same way as a struct containing two floats. */
4094 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4095 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4097 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4098 info->fpr_p = false;
4108 /* See whether the argument has doubleword alignment. */
4109 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
4111 /* Set REG_OFFSET to the register count we're interested in.
4112 The EABI allocates the floating-point registers separately,
4113 but the other ABIs allocate them like integer registers. */
4114 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4118 /* Advance to an even register if the argument is doubleword-aligned. */
4119 if (doubleword_aligned_p)
4120 info->reg_offset += info->reg_offset & 1;
4122 /* Work out the offset of a stack argument. */
4123 info->stack_offset = cum->stack_words;
4124 if (doubleword_aligned_p)
4125 info->stack_offset += info->stack_offset & 1;
4127 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4129 /* Partition the argument between registers and stack. */
4130 info->reg_words = MIN (num_words, max_regs);
4131 info->stack_words = num_words - info->reg_words;
4134 /* INFO describes an argument that is passed in a single-register value.
4135 Return the register it uses, assuming that FPRs are available if
4139 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4141 if (!info->fpr_p || !hard_float_p)
4142 return GP_ARG_FIRST + info->reg_offset;
4143 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4144 /* In o32, the second argument is always passed in $f14
4145 for TARGET_DOUBLE_FLOAT, regardless of whether the
4146 first argument was a word or doubleword. */
4147 return FP_ARG_FIRST + 2;
4149 return FP_ARG_FIRST + info->reg_offset;
4153 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
4155 return !TARGET_OLDABI;
4158 /* Implement FUNCTION_ARG. */
4161 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4162 tree type, int named)
4164 struct mips_arg_info info;
4166 /* We will be called with a mode of VOIDmode after the last argument
4167 has been seen. Whatever we return will be passed to the call
4168 insn. If we need a mips16 fp_code, return a REG with the code
4169 stored as the mode. */
4170 if (mode == VOIDmode)
4172 if (TARGET_MIPS16 && cum->fp_code != 0)
4173 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4179 mips_arg_info (cum, mode, type, named, &info);
4181 /* Return straight away if the whole argument is passed on the stack. */
4182 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4186 && TREE_CODE (type) == RECORD_TYPE
4188 && TYPE_SIZE_UNIT (type)
4189 && host_integerp (TYPE_SIZE_UNIT (type), 1)
4192 /* The Irix 6 n32/n64 ABIs say that if any 64-bit chunk of the
4193 structure contains a double in its entirety, then that 64-bit
4194 chunk is passed in a floating point register. */
4197 /* First check to see if there is any such field. */
4198 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4199 if (TREE_CODE (field) == FIELD_DECL
4200 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4201 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4202 && host_integerp (bit_position (field), 0)
4203 && int_bit_position (field) % BITS_PER_WORD == 0)
4208 /* Now handle the special case by returning a PARALLEL
4209 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4210 chunks are passed in registers. */
4212 HOST_WIDE_INT bitpos;
4215 /* assign_parms checks the mode of ENTRY_PARM, so we must
4216 use the actual mode here. */
4217 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4220 field = TYPE_FIELDS (type);
4221 for (i = 0; i < info.reg_words; i++)
4225 for (; field; field = TREE_CHAIN (field))
4226 if (TREE_CODE (field) == FIELD_DECL
4227 && int_bit_position (field) >= bitpos)
4231 && int_bit_position (field) == bitpos
4232 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4233 && !TARGET_SOFT_FLOAT
4234 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4235 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4237 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4240 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4241 GEN_INT (bitpos / BITS_PER_UNIT));
4243 bitpos += BITS_PER_WORD;
4249 /* Handle the n32/n64 conventions for passing complex floating-point
4250 arguments in FPR pairs. The real part goes in the lower register
4251 and the imaginary part goes in the upper register. */
4254 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4257 enum machine_mode inner;
4260 inner = GET_MODE_INNER (mode);
4261 reg = FP_ARG_FIRST + info.reg_offset;
4262 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4264 /* Real part in registers, imaginary part on stack. */
4265 gcc_assert (info.stack_words == info.reg_words);
4266 return gen_rtx_REG (inner, reg);
4270 gcc_assert (info.stack_words == 0);
4271 real = gen_rtx_EXPR_LIST (VOIDmode,
4272 gen_rtx_REG (inner, reg),
4274 imag = gen_rtx_EXPR_LIST (VOIDmode,
4276 reg + info.reg_words / 2),
4277 GEN_INT (GET_MODE_SIZE (inner)));
4278 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4282 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4285 /* Implement FUNCTION_ARG_ADVANCE. */
4288 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4289 tree type, int named)
4291 struct mips_arg_info info;
4293 mips_arg_info (cum, mode, type, named, &info);
4296 cum->gp_reg_found = true;
4298 /* See the comment above the cumulative args structure in mips.h
4299 for an explanation of what this code does. It assumes the O32
4300 ABI, which passes at most 2 arguments in float registers. */
4301 if (cum->arg_number < 2 && info.fpr_p)
4302 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4304 if (mips_abi != ABI_EABI || !info.fpr_p)
4305 cum->num_gprs = info.reg_offset + info.reg_words;
4306 else if (info.reg_words > 0)
4307 cum->num_fprs += MAX_FPRS_PER_FMT;
4309 if (info.stack_words > 0)
4310 cum->stack_words = info.stack_offset + info.stack_words;
4315 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4318 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4319 enum machine_mode mode, tree type, bool named)
4321 struct mips_arg_info info;
4323 mips_arg_info (cum, mode, type, named, &info);
4324 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4328 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4329 PARM_BOUNDARY bits of alignment, but will be given anything up
4330 to STACK_BOUNDARY bits if the type requires it. */
4333 function_arg_boundary (enum machine_mode mode, tree type)
4335 unsigned int alignment;
4337 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4338 if (alignment < PARM_BOUNDARY)
4339 alignment = PARM_BOUNDARY;
4340 if (alignment > STACK_BOUNDARY)
4341 alignment = STACK_BOUNDARY;
4345 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4346 upward rather than downward. In other words, return true if the
4347 first byte of the stack slot has useful data, false if the last
4351 mips_pad_arg_upward (enum machine_mode mode, const_tree type)
4353 /* On little-endian targets, the first byte of every stack argument
4354 is passed in the first byte of the stack slot. */
4355 if (!BYTES_BIG_ENDIAN)
4358 /* Otherwise, integral types are padded downward: the last byte of a
4359 stack argument is passed in the last byte of the stack slot. */
4361 ? (INTEGRAL_TYPE_P (type)
4362 || POINTER_TYPE_P (type)
4363 || FIXED_POINT_TYPE_P (type))
4364 : (GET_MODE_CLASS (mode) == MODE_INT
4365 || ALL_SCALAR_FIXED_POINT_MODE_P (mode)))
4368 /* Big-endian o64 pads floating-point arguments downward. */
4369 if (mips_abi == ABI_O64)
4370 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4373 /* Other types are padded upward for o32, o64, n32 and n64. */
4374 if (mips_abi != ABI_EABI)
4377 /* Arguments smaller than a stack slot are padded downward. */
4378 if (mode != BLKmode)
4379 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
4381 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
4385 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4386 if the least significant byte of the register has useful data. Return
4387 the opposite if the most significant byte does. */
4390 mips_pad_reg_upward (enum machine_mode mode, tree type)
4392 /* No shifting is required for floating-point arguments. */
4393 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4394 return !BYTES_BIG_ENDIAN;
4396 /* Otherwise, apply the same padding to register arguments as we do
4397 to stack arguments. */
4398 return mips_pad_arg_upward (mode, type);
4402 /* Return nonzero when an argument must be passed by reference. */
4405 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4406 enum machine_mode mode, const_tree type,
4407 bool named ATTRIBUTE_UNUSED)
4409 if (mips_abi == ABI_EABI)
4413 /* ??? How should SCmode be handled? */
4414 if (mode == DImode || mode == DFmode
4415 || mode == DQmode || mode == UDQmode
4416 || mode == DAmode || mode == UDAmode)
4419 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
4420 return size == -1 || size > UNITS_PER_WORD;
4424 /* If we have a variable-sized parameter, we have no choice. */
4425 return targetm.calls.must_pass_in_stack (mode, type);
4430 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
4431 enum machine_mode mode ATTRIBUTE_UNUSED,
4432 const_tree type ATTRIBUTE_UNUSED, bool named)
4434 return mips_abi == ABI_EABI && named;
4437 /* See whether VALTYPE is a record whose fields should be returned in
4438 floating-point registers. If so, return the number of fields and
4439 list them in FIELDS (which should have two elements). Return 0
4442 For n32 & n64, a structure with one or two fields is returned in
4443 floating-point registers as long as every field has a floating-point
4447 mips_fpr_return_fields (const_tree valtype, tree *fields)
4455 if (TREE_CODE (valtype) != RECORD_TYPE)
4459 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
4461 if (TREE_CODE (field) != FIELD_DECL)
4464 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
4470 fields[i++] = field;
4476 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
4477 a value in the most significant part of $2/$3 if:
4479 - the target is big-endian;
4481 - the value has a structure or union type (we generalize this to
4482 cover aggregates from other languages too); and
4484 - the structure is not returned in floating-point registers. */
4487 mips_return_in_msb (const_tree valtype)
4491 return (TARGET_NEWABI
4492 && TARGET_BIG_ENDIAN
4493 && AGGREGATE_TYPE_P (valtype)
4494 && mips_fpr_return_fields (valtype, fields) == 0);
4498 /* Return true if the function return value MODE will get returned in a
4499 floating-point register. */
4502 mips_return_mode_in_fpr_p (enum machine_mode mode)
4504 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
4505 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
4506 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4507 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
4510 /* Return a composite value in a pair of floating-point registers.
4511 MODE1 and OFFSET1 are the mode and byte offset for the first value,
4512 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
4515 For n32 & n64, $f0 always holds the first value and $f2 the second.
4516 Otherwise the values are packed together as closely as possible. */
4519 mips_return_fpr_pair (enum machine_mode mode,
4520 enum machine_mode mode1, HOST_WIDE_INT offset1,
4521 enum machine_mode mode2, HOST_WIDE_INT offset2)
4525 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
4526 return gen_rtx_PARALLEL
4529 gen_rtx_EXPR_LIST (VOIDmode,
4530 gen_rtx_REG (mode1, FP_RETURN),
4532 gen_rtx_EXPR_LIST (VOIDmode,
4533 gen_rtx_REG (mode2, FP_RETURN + inc),
4534 GEN_INT (offset2))));
4539 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
4540 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
4541 VALTYPE is null and MODE is the mode of the return value. */
4544 mips_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
4545 enum machine_mode mode)
4552 mode = TYPE_MODE (valtype);
4553 unsignedp = TYPE_UNSIGNED (valtype);
4555 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
4556 true, we must promote the mode just as PROMOTE_MODE does. */
4557 mode = promote_mode (valtype, mode, &unsignedp, 1);
4559 /* Handle structures whose fields are returned in $f0/$f2. */
4560 switch (mips_fpr_return_fields (valtype, fields))
4563 return gen_rtx_REG (mode, FP_RETURN);
4566 return mips_return_fpr_pair (mode,
4567 TYPE_MODE (TREE_TYPE (fields[0])),
4568 int_byte_position (fields[0]),
4569 TYPE_MODE (TREE_TYPE (fields[1])),
4570 int_byte_position (fields[1]));
4573 /* If a value is passed in the most significant part of a register, see
4574 whether we have to round the mode up to a whole number of words. */
4575 if (mips_return_in_msb (valtype))
4577 HOST_WIDE_INT size = int_size_in_bytes (valtype);
4578 if (size % UNITS_PER_WORD != 0)
4580 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
4581 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
4585 /* For EABI, the class of return register depends entirely on MODE.
4586 For example, "struct { some_type x; }" and "union { some_type x; }"
4587 are returned in the same way as a bare "some_type" would be.
4588 Other ABIs only use FPRs for scalar, complex or vector types. */
4589 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
4590 return gen_rtx_REG (mode, GP_RETURN);
4595 /* Handle long doubles for n32 & n64. */
4597 return mips_return_fpr_pair (mode,
4599 DImode, GET_MODE_SIZE (mode) / 2);
4601 if (mips_return_mode_in_fpr_p (mode))
4603 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4604 return mips_return_fpr_pair (mode,
4605 GET_MODE_INNER (mode), 0,
4606 GET_MODE_INNER (mode),
4607 GET_MODE_SIZE (mode) / 2);
4609 return gen_rtx_REG (mode, FP_RETURN);
4613 return gen_rtx_REG (mode, GP_RETURN);
4616 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
4617 all BLKmode objects are returned in memory. Under the new (N32 and
4618 64-bit MIPS ABIs) small structures are returned in a register.
4619 Objects with varying size must still be returned in memory, of
4623 mips_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED)
4626 return (TYPE_MODE (type) == BLKmode);
4628 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
4629 || (int_size_in_bytes (type) == -1));
4633 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4634 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4637 CUMULATIVE_ARGS local_cum;
4638 int gp_saved, fp_saved;
4640 /* The caller has advanced CUM up to, but not beyond, the last named
4641 argument. Advance a local copy of CUM past the last "real" named
4642 argument, to find out how many registers are left over. */
4645 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
4647 /* Found out how many registers we need to save. */
4648 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4649 fp_saved = (EABI_FLOAT_VARARGS_P
4650 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4659 ptr = plus_constant (virtual_incoming_args_rtx,
4660 REG_PARM_STACK_SPACE (cfun->decl)
4661 - gp_saved * UNITS_PER_WORD);
4662 mem = gen_rtx_MEM (BLKmode, ptr);
4663 set_mem_alias_set (mem, get_varargs_alias_set ());
4665 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4670 /* We can't use move_block_from_reg, because it will use
4672 enum machine_mode mode;
4675 /* Set OFF to the offset from virtual_incoming_args_rtx of
4676 the first float register. The FP save area lies below
4677 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4678 off = -gp_saved * UNITS_PER_WORD;
4679 off &= ~(UNITS_PER_FPVALUE - 1);
4680 off -= fp_saved * UNITS_PER_FPREG;
4682 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4684 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
4685 i += MAX_FPRS_PER_FMT)
4689 ptr = plus_constant (virtual_incoming_args_rtx, off);
4690 mem = gen_rtx_MEM (mode, ptr);
4691 set_mem_alias_set (mem, get_varargs_alias_set ());
4692 mips_emit_move (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4693 off += UNITS_PER_HWFPVALUE;
4697 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4698 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4699 + fp_saved * UNITS_PER_FPREG);
4702 /* Create the va_list data type.
4703 We keep 3 pointers, and two offsets.
4704 Two pointers are to the overflow area, which starts at the CFA.
4705 One of these is constant, for addressing into the GPR save area below it.
4706 The other is advanced up the stack through the overflow region.
4707 The third pointer is to the GPR save area. Since the FPR save area
4708 is just below it, we can address FPR slots off this pointer.
4709 We also keep two one-byte offsets, which are to be subtracted from the
4710 constant pointers to yield addresses in the GPR and FPR save areas.
4711 These are downcounted as float or non-float arguments are used,
4712 and when they get to zero, the argument must be obtained from the
4714 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4715 pointer is enough. It's started at the GPR save area, and is
4717 Note that the GPR save area is not constant size, due to optimization
4718 in the prologue. Hence, we can't use a design with two pointers
4719 and two offsets, although we could have designed this with two pointers
4720 and three offsets. */
4723 mips_build_builtin_va_list (void)
4725 if (EABI_FLOAT_VARARGS_P)
4727 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4730 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4732 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4734 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4736 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4738 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4739 unsigned_char_type_node);
4740 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4741 unsigned_char_type_node);
4742 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4743 warn on every user file. */
4744 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4745 array = build_array_type (unsigned_char_type_node,
4746 build_index_type (index));
4747 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4749 DECL_FIELD_CONTEXT (f_ovfl) = record;
4750 DECL_FIELD_CONTEXT (f_gtop) = record;
4751 DECL_FIELD_CONTEXT (f_ftop) = record;
4752 DECL_FIELD_CONTEXT (f_goff) = record;
4753 DECL_FIELD_CONTEXT (f_foff) = record;
4754 DECL_FIELD_CONTEXT (f_res) = record;
4756 TYPE_FIELDS (record) = f_ovfl;
4757 TREE_CHAIN (f_ovfl) = f_gtop;
4758 TREE_CHAIN (f_gtop) = f_ftop;
4759 TREE_CHAIN (f_ftop) = f_goff;
4760 TREE_CHAIN (f_goff) = f_foff;
4761 TREE_CHAIN (f_foff) = f_res;
4763 layout_type (record);
4766 else if (TARGET_IRIX && TARGET_IRIX6)
4767 /* On IRIX 6, this type is 'char *'. */
4768 return build_pointer_type (char_type_node);
4770 /* Otherwise, we use 'void *'. */
4771 return ptr_type_node;
4774 /* Implement va_start. */
4777 mips_va_start (tree valist, rtx nextarg)
4779 if (EABI_FLOAT_VARARGS_P)
4781 const CUMULATIVE_ARGS *cum;
4782 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4783 tree ovfl, gtop, ftop, goff, foff;
4785 int gpr_save_area_size;
4786 int fpr_save_area_size;
4789 cum = ¤t_function_args_info;
4791 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
4793 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
4795 f_ovfl = TYPE_FIELDS (va_list_type_node);
4796 f_gtop = TREE_CHAIN (f_ovfl);
4797 f_ftop = TREE_CHAIN (f_gtop);
4798 f_goff = TREE_CHAIN (f_ftop);
4799 f_foff = TREE_CHAIN (f_goff);
4801 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4803 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4805 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4807 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4809 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4812 /* Emit code to initialize OVFL, which points to the next varargs
4813 stack argument. CUM->STACK_WORDS gives the number of stack
4814 words used by named arguments. */
4815 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
4816 if (cum->stack_words > 0)
4817 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
4818 size_int (cum->stack_words * UNITS_PER_WORD));
4819 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4820 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4822 /* Emit code to initialize GTOP, the top of the GPR save area. */
4823 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
4824 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gtop), gtop, t);
4825 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4827 /* Emit code to initialize FTOP, the top of the FPR save area.
4828 This address is gpr_save_area_bytes below GTOP, rounded
4829 down to the next fp-aligned boundary. */
4830 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
4831 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
4832 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
4834 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
4835 size_int (-fpr_offset));
4836 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ftop), ftop, t);
4837 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4839 /* Emit code to initialize GOFF, the offset from GTOP of the
4840 next GPR argument. */
4841 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (goff), goff,
4842 build_int_cst (NULL_TREE, gpr_save_area_size));
4843 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4845 /* Likewise emit code to initialize FOFF, the offset from FTOP
4846 of the next FPR argument. */
4847 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (foff), foff,
4848 build_int_cst (NULL_TREE, fpr_save_area_size));
4849 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4853 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
4854 std_expand_builtin_va_start (valist, nextarg);
4858 /* Implement va_arg. */
4861 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
4863 HOST_WIDE_INT size, rsize;
4867 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
4870 type = build_pointer_type (type);
4872 size = int_size_in_bytes (type);
4873 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
4875 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
4876 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4879 /* Not a simple merged stack. */
4881 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4882 tree ovfl, top, off, align;
4883 HOST_WIDE_INT osize;
4886 f_ovfl = TYPE_FIELDS (va_list_type_node);
4887 f_gtop = TREE_CHAIN (f_ovfl);
4888 f_ftop = TREE_CHAIN (f_gtop);
4889 f_goff = TREE_CHAIN (f_ftop);
4890 f_foff = TREE_CHAIN (f_goff);
4892 /* We maintain separate pointers and offsets for floating-point
4893 and integer arguments, but we need similar code in both cases.
4896 TOP be the top of the register save area;
4897 OFF be the offset from TOP of the next register;
4898 ADDR_RTX be the address of the argument;
4899 RSIZE be the number of bytes used to store the argument
4900 when it's in the register save area;
4901 OSIZE be the number of bytes used to store it when it's
4902 in the stack overflow area; and
4903 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
4905 The code we want is:
4907 1: off &= -rsize; // round down
4910 4: addr_rtx = top - off;
4915 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
4916 10: addr_rtx = ovfl + PADDING;
4920 [1] and [9] can sometimes be optimized away. */
4922 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4925 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
4926 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
4928 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4930 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4933 /* When floating-point registers are saved to the stack,
4934 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
4935 of the float's precision. */
4936 rsize = UNITS_PER_HWFPVALUE;
4938 /* Overflow arguments are padded to UNITS_PER_WORD bytes
4939 (= PARM_BOUNDARY bits). This can be different from RSIZE
4942 (1) On 32-bit targets when TYPE is a structure such as:
4944 struct s { float f; };
4946 Such structures are passed in paired FPRs, so RSIZE
4947 will be 8 bytes. However, the structure only takes
4948 up 4 bytes of memory, so OSIZE will only be 4.
4950 (2) In combinations such as -mgp64 -msingle-float
4951 -fshort-double. Doubles passed in registers
4952 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
4953 but those passed on the stack take up
4954 UNITS_PER_WORD bytes. */
4955 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
4959 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4961 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4963 if (rsize > UNITS_PER_WORD)
4965 /* [1] Emit code for: off &= -rsize. */
4966 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
4967 build_int_cst (NULL_TREE, -rsize));
4968 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (off), off, t);
4969 gimplify_and_add (t, pre_p);
4974 /* [2] Emit code to branch if off == 0. */
4975 t = build2 (NE_EXPR, boolean_type_node, off,
4976 build_int_cst (TREE_TYPE (off), 0));
4977 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
4979 /* [5] Emit code for: off -= rsize. We do this as a form of
4980 post-increment not available to C. Also widen for the
4981 coming pointer arithmetic. */
4982 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
4983 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
4984 t = fold_convert (sizetype, t);
4985 t = fold_build1 (NEGATE_EXPR, sizetype, t);
4987 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
4988 the argument has RSIZE - SIZE bytes of leading padding. */
4989 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
4990 if (BYTES_BIG_ENDIAN && rsize > size)
4992 u = size_int (rsize - size);
4993 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
4995 COND_EXPR_THEN (addr) = t;
4997 if (osize > UNITS_PER_WORD)
4999 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
5000 u = size_int (osize - 1);
5001 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
5002 t = fold_convert (sizetype, t);
5003 u = size_int (-osize);
5004 t = build2 (BIT_AND_EXPR, sizetype, t, u);
5005 t = fold_convert (TREE_TYPE (ovfl), t);
5006 align = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
5011 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
5012 post-increment ovfl by osize. On big-endian machines,
5013 the argument has OSIZE - SIZE bytes of leading padding. */
5014 u = fold_convert (TREE_TYPE (ovfl),
5015 build_int_cst (NULL_TREE, osize));
5016 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
5017 if (BYTES_BIG_ENDIAN && osize > size)
5019 u = size_int (osize - size);
5020 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
5023 /* String [9] and [10,11] together. */
5025 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
5026 COND_EXPR_ELSE (addr) = t;
5028 addr = fold_convert (build_pointer_type (type), addr);
5029 addr = build_va_arg_indirect_ref (addr);
5033 addr = build_va_arg_indirect_ref (addr);
5038 /* We keep a list of functions for which we have already built stubs
5039 in build_mips16_call_stub. */
5043 struct mips16_stub *next;
5048 static struct mips16_stub *mips16_stubs;
5050 /* Return a two-character string representing a function floating-point
5051 return mode, used to name MIPS16 function stubs. */
5054 mips16_call_stub_mode_suffix (enum machine_mode mode)
5058 else if (mode == DFmode)
5060 else if (mode == SCmode)
5062 else if (mode == DCmode)
5064 else if (mode == V2SFmode)
5070 /* Write instructions to move a 32-bit value between general register
5071 GPREG and floating-point register FPREG. DIRECTION is 't' to move
5072 from GPREG to FPREG and 'f' to move in the opposite direction. */
5075 mips_output_32bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
5077 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5078 reg_names[gpreg], reg_names[fpreg]);
5081 /* Likewise for 64-bit values. */
5084 mips_output_64bit_xfer (char direction, unsigned int gpreg, unsigned int fpreg)
5087 fprintf (asm_out_file, "\tdm%cc1\t%s,%s\n", direction,
5088 reg_names[gpreg], reg_names[fpreg]);
5089 else if (TARGET_FLOAT64)
5091 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5092 reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
5093 fprintf (asm_out_file, "\tm%chc1\t%s,%s\n", direction,
5094 reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg]);
5098 /* Move the least-significant word. */
5099 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5100 reg_names[gpreg + TARGET_BIG_ENDIAN], reg_names[fpreg]);
5101 /* ...then the most significant word. */
5102 fprintf (asm_out_file, "\tm%cc1\t%s,%s\n", direction,
5103 reg_names[gpreg + TARGET_LITTLE_ENDIAN], reg_names[fpreg + 1]);
5107 /* Write out code to move floating-point arguments into or out of
5108 general registers. FP_CODE is the code describing which arguments
5109 are present (see the comment above the definition of CUMULATIVE_ARGS
5110 in mips.h). DIRECTION is as for mips_output_32bit_xfer. */
5113 mips_output_args_xfer (int fp_code, char direction)
5115 unsigned int gparg, fparg, f;
5116 CUMULATIVE_ARGS cum;
5118 /* This code only works for the original 32-bit ABI and the O64 ABI. */
5119 gcc_assert (TARGET_OLDABI);
5121 init_cumulative_args (&cum, NULL, NULL);
5123 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
5125 enum machine_mode mode;
5126 struct mips_arg_info info;
5130 else if ((f & 3) == 2)
5135 mips_arg_info (&cum, mode, NULL, true, &info);
5136 gparg = mips_arg_regno (&info, false);
5137 fparg = mips_arg_regno (&info, true);
5140 mips_output_32bit_xfer (direction, gparg, fparg);
5142 mips_output_64bit_xfer (direction, gparg, fparg);
5144 function_arg_advance (&cum, mode, NULL, true);
5148 /* Build a mips16 function stub. This is used for functions which
5149 take arguments in the floating point registers. It is 32-bit code
5150 that moves the floating point args into the general registers, and
5151 then jumps to the 16-bit code. */
5154 build_mips16_function_stub (void)
5157 char *secname, *stubname;
5158 tree stubid, stubdecl;
5162 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
5163 fnname = targetm.strip_name_encoding (fnname);
5164 secname = (char *) alloca (strlen (fnname) + 20);
5165 sprintf (secname, ".mips16.fn.%s", fnname);
5166 stubname = (char *) alloca (strlen (fnname) + 20);
5167 sprintf (stubname, "__fn_stub_%s", fnname);
5168 stubid = get_identifier (stubname);
5169 stubdecl = build_decl (FUNCTION_DECL, stubid,
5170 build_function_type (void_type_node, NULL_TREE));
5171 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
5172 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
5174 fprintf (asm_out_file, "\t# Stub function for %s (",
5175 current_function_name ());
5177 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
5179 fprintf (asm_out_file, "%s%s",
5180 need_comma ? ", " : "",
5181 (f & 3) == 1 ? "float" : "double");
5184 fprintf (asm_out_file, ")\n");
5186 fprintf (asm_out_file, "\t.set\tnomips16\n");
5187 switch_to_section (function_section (stubdecl));
5188 ASM_OUTPUT_ALIGN (asm_out_file,
5189 floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
5191 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
5192 within a .ent, and we cannot emit another .ent. */
5193 if (!FUNCTION_NAME_ALREADY_DECLARED)
5195 fputs ("\t.ent\t", asm_out_file);
5196 assemble_name (asm_out_file, stubname);
5197 fputs ("\n", asm_out_file);
5200 assemble_name (asm_out_file, stubname);
5201 fputs (":\n", asm_out_file);
5203 /* Load the address of the MIPS16 function into $at. Do this first so
5204 that targets with coprocessor interlocks can use an MFC1 to fill the
5206 fprintf (asm_out_file, "\t.set\tnoat\n");
5207 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
5208 assemble_name (asm_out_file, fnname);
5209 fprintf (asm_out_file, "\n");
5211 mips_output_args_xfer (current_function_args_info.fp_code, 'f');
5213 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
5214 fprintf (asm_out_file, "\t.set\tat\n");
5216 if (!FUNCTION_NAME_ALREADY_DECLARED)
5218 fputs ("\t.end\t", asm_out_file);
5219 assemble_name (asm_out_file, stubname);
5220 fputs ("\n", asm_out_file);
5223 switch_to_section (function_section (current_function_decl));
5226 /* The current function is a MIPS16 function that returns a value in an FPR.
5227 Copy the return value from its soft-float to its hard-float location.
5228 libgcc2 has special non-MIPS16 helper functions for each case. */
5231 mips16_copy_fpr_return_value (void)
5233 rtx fn, insn, arg, call;
5234 tree id, return_type;
5235 enum machine_mode return_mode;
5237 return_type = DECL_RESULT (current_function_decl);
5238 return_mode = DECL_MODE (return_type);
5240 id = get_identifier (ACONCAT (("__mips16_ret_",
5241 mips16_call_stub_mode_suffix (return_mode),
5243 fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
5244 arg = gen_rtx_REG (return_mode, GP_RETURN);
5245 call = gen_call_value_internal (arg, fn, const0_rtx);
5246 insn = emit_call_insn (call);
5247 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), arg);
5250 /* Build a call stub for a mips16 call. A stub is needed if we are
5251 passing any floating point values which should go into the floating
5252 point registers. If we are, and the call turns out to be to a
5253 32-bit function, the stub will be used to move the values into the
5254 floating point registers before calling the 32-bit function. The
5255 linker will magically adjust the function call to either the 16-bit
5256 function or the 32-bit stub, depending upon where the function call
5257 is actually defined.
5259 Similarly, we need a stub if the return value might come back in a
5260 floating point register.
5262 RETVAL is the location of the return value, or null if this is
5263 a call rather than a call_value. FN is the address of the
5264 function and ARG_SIZE is the size of the arguments. FP_CODE
5265 is the code built by function_arg. This function returns a nonzero
5266 value if it builds the call instruction itself. */
5269 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
5273 char *secname, *stubname;
5274 struct mips16_stub *l;
5275 tree stubid, stubdecl;
5280 /* We don't need to do anything if we aren't in mips16 mode, or if
5281 we were invoked with the -msoft-float option. */
5282 if (!TARGET_MIPS16 || TARGET_SOFT_FLOAT_ABI)
5285 /* Figure out whether the value might come back in a floating point
5288 fpret = mips_return_mode_in_fpr_p (GET_MODE (retval));
5290 /* We don't need to do anything if there were no floating point
5291 arguments and the value will not be returned in a floating point
5293 if (fp_code == 0 && ! fpret)
5296 /* We don't need to do anything if this is a call to a special
5297 mips16 support function. */
5298 if (GET_CODE (fn) == SYMBOL_REF
5299 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
5302 /* This code will only work for o32 and o64 abis. The other ABI's
5303 require more sophisticated support. */
5304 gcc_assert (TARGET_OLDABI);
5306 /* If we're calling via a function pointer, then we must always call
5307 via a stub. There are magic stubs provided in libgcc.a for each
5308 of the required cases. Each of them expects the function address
5309 to arrive in register $2. */
5311 if (GET_CODE (fn) != SYMBOL_REF)
5317 /* ??? If this code is modified to support other ABI's, we need
5318 to handle PARALLEL return values here. */
5321 sprintf (buf, "__mips16_call_stub_%s_%d",
5322 mips16_call_stub_mode_suffix (GET_MODE (retval)),
5325 sprintf (buf, "__mips16_call_stub_%d",
5328 id = get_identifier (buf);
5329 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
5331 mips_emit_move (gen_rtx_REG (Pmode, 2), fn);
5333 if (retval == NULL_RTX)
5334 insn = gen_call_internal (stub_fn, arg_size);
5336 insn = gen_call_value_internal (retval, stub_fn, arg_size);
5337 insn = emit_call_insn (insn);
5339 /* Put the register usage information on the CALL. */
5340 CALL_INSN_FUNCTION_USAGE (insn) =
5341 gen_rtx_EXPR_LIST (VOIDmode,
5342 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
5343 CALL_INSN_FUNCTION_USAGE (insn));
5345 /* If we are handling a floating point return value, we need to
5346 save $18 in the function prologue. Putting a note on the
5347 call will mean that df_regs_ever_live_p ($18) will be true if the
5348 call is not eliminated, and we can check that in the prologue
5351 CALL_INSN_FUNCTION_USAGE (insn) =
5352 gen_rtx_EXPR_LIST (VOIDmode,
5353 gen_rtx_USE (VOIDmode,
5354 gen_rtx_REG (word_mode, 18)),
5355 CALL_INSN_FUNCTION_USAGE (insn));
5357 /* Return 1 to tell the caller that we've generated the call
5362 /* We know the function we are going to call. If we have already
5363 built a stub, we don't need to do anything further. */
5365 fnname = targetm.strip_name_encoding (XSTR (fn, 0));
5366 for (l = mips16_stubs; l != NULL; l = l->next)
5367 if (strcmp (l->name, fnname) == 0)
5372 /* Build a special purpose stub. When the linker sees a
5373 function call in mips16 code, it will check where the target
5374 is defined. If the target is a 32-bit call, the linker will
5375 search for the section defined here. It can tell which
5376 symbol this section is associated with by looking at the
5377 relocation information (the name is unreliable, since this
5378 might be a static function). If such a section is found, the
5379 linker will redirect the call to the start of the magic
5382 If the function does not return a floating point value, the
5383 special stub section is named
5386 If the function does return a floating point value, the stub
5388 .mips16.call.fp.FNNAME
5391 secname = (char *) alloca (strlen (fnname) + 40);
5392 sprintf (secname, ".mips16.call.%s%s",
5395 stubname = (char *) alloca (strlen (fnname) + 20);
5396 sprintf (stubname, "__call_stub_%s%s",
5399 stubid = get_identifier (stubname);
5400 stubdecl = build_decl (FUNCTION_DECL, stubid,
5401 build_function_type (void_type_node, NULL_TREE));
5402 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
5403 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
5405 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
5407 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
5411 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
5413 fprintf (asm_out_file, "%s%s",
5414 need_comma ? ", " : "",
5415 (f & 3) == 1 ? "float" : "double");
5418 fprintf (asm_out_file, ")\n");
5420 fprintf (asm_out_file, "\t.set\tnomips16\n");
5421 assemble_start_function (stubdecl, stubname);
5423 if (!FUNCTION_NAME_ALREADY_DECLARED)
5425 fputs ("\t.ent\t", asm_out_file);
5426 assemble_name (asm_out_file, stubname);
5427 fputs ("\n", asm_out_file);
5429 assemble_name (asm_out_file, stubname);
5430 fputs (":\n", asm_out_file);
5433 /* We build the stub code by hand. That's the only way we can
5434 do it, since we can't generate 32-bit code during a 16-bit
5439 /* Load the address of the MIPS16 function into $at. Do this
5440 first so that targets with coprocessor interlocks can use
5441 an MFC1 to fill the delay slot. */
5442 fprintf (asm_out_file, "\t.set\tnoat\n");
5443 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
5447 mips_output_args_xfer (fp_code, 't');
5451 /* Jump to the previously-loaded address. */
5452 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
5453 fprintf (asm_out_file, "\t.set\tat\n");
5457 fprintf (asm_out_file, "\tmove\t%s,%s\n",
5458 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
5459 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
5460 switch (GET_MODE (retval))
5463 mips_output_32bit_xfer ('f', GP_RETURN + 1,
5464 FP_REG_FIRST + MAX_FPRS_PER_FMT);
5467 mips_output_32bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
5468 if (GET_MODE (retval) == SCmode && TARGET_64BIT)
5470 /* On 64-bit targets, complex floats are returned in
5471 a single GPR, such that "sd" on a suitably-aligned
5472 target would store the value correctly. */
5473 fprintf (asm_out_file, "\tdsll\t%s,%s,32\n",
5474 reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN],
5475 reg_names[GP_RETURN + TARGET_LITTLE_ENDIAN]);
5476 fprintf (asm_out_file, "\tor\t%s,%s,%s\n",
5477 reg_names[GP_RETURN],
5478 reg_names[GP_RETURN],
5479 reg_names[GP_RETURN + 1]);
5484 mips_output_64bit_xfer ('f', GP_RETURN + (8 / UNITS_PER_WORD),
5485 FP_REG_FIRST + MAX_FPRS_PER_FMT);
5489 mips_output_64bit_xfer ('f', GP_RETURN, FP_REG_FIRST);
5495 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
5498 #ifdef ASM_DECLARE_FUNCTION_SIZE
5499 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
5502 if (!FUNCTION_NAME_ALREADY_DECLARED)
5504 fputs ("\t.end\t", asm_out_file);
5505 assemble_name (asm_out_file, stubname);
5506 fputs ("\n", asm_out_file);
5509 /* Record this stub. */
5510 l = (struct mips16_stub *) xmalloc (sizeof *l);
5511 l->name = xstrdup (fnname);
5513 l->next = mips16_stubs;
5517 /* If we expect a floating point return value, but we've built a
5518 stub which does not expect one, then we're in trouble. We can't
5519 use the existing stub, because it won't handle the floating point
5520 value. We can't build a new stub, because the linker won't know
5521 which stub to use for the various calls in this object file.
5522 Fortunately, this case is illegal, since it means that a function
5523 was declared in two different ways in a single compilation. */
5524 if (fpret && ! l->fpret)
5525 error ("cannot handle inconsistent calls to %qs", fnname);
5527 if (retval == NULL_RTX)
5528 insn = gen_call_internal_direct (fn, arg_size);
5530 insn = gen_call_value_internal_direct (retval, fn, arg_size);
5531 insn = emit_call_insn (insn);
5533 /* If we are calling a stub which handles a floating point return
5534 value, we need to arrange to save $18 in the prologue. We do
5535 this by marking the function call as using the register. The
5536 prologue will later see that it is used, and emit code to save
5539 CALL_INSN_FUNCTION_USAGE (insn) =
5540 gen_rtx_EXPR_LIST (VOIDmode,
5541 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
5542 CALL_INSN_FUNCTION_USAGE (insn));
5544 /* Return 1 to tell the caller that we've generated the call
5549 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
5552 mips_ok_for_lazy_binding_p (rtx x)
5554 return (TARGET_USE_GOT
5555 && GET_CODE (x) == SYMBOL_REF
5556 && !mips_symbol_binds_local_p (x));
5559 /* Load function address ADDR into register DEST. SIBCALL_P is true
5560 if the address is needed for a sibling call. Return true if we
5561 used an explicit lazy-binding sequence. */
5564 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
5566 /* If we're generating PIC, and this call is to a global function,
5567 try to allow its address to be resolved lazily. This isn't
5568 possible if TARGET_CALL_SAVED_GP since the value of $gp on entry
5569 to the stub would be our caller's gp, not ours. */
5570 if (TARGET_EXPLICIT_RELOCS
5571 && !(sibcall_p && TARGET_CALL_SAVED_GP)
5572 && mips_ok_for_lazy_binding_p (addr))
5574 rtx high, lo_sum_symbol;
5576 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
5577 addr, SYMBOL_GOTOFF_CALL);
5578 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
5579 if (Pmode == SImode)
5580 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
5582 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
5587 mips_emit_move (dest, addr);
5593 /* Expand a call or call_value instruction. RESULT is where the
5594 result will go (null for calls), ADDR is the address of the
5595 function, ARGS_SIZE is the size of the arguments and AUX is
5596 the value passed to us by mips_function_arg. SIBCALL_P is true
5597 if we are expanding a sibling call, false if we're expanding
5601 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
5603 rtx orig_addr, pattern, insn;
5608 if (!call_insn_operand (addr, VOIDmode))
5610 addr = gen_reg_rtx (Pmode);
5611 lazy_p = mips_load_call_address (addr, orig_addr, sibcall_p);
5615 && TARGET_HARD_FLOAT_ABI
5616 && build_mips16_call_stub (result, addr, args_size,
5617 aux == 0 ? 0 : (int) GET_MODE (aux)))
5621 pattern = (sibcall_p
5622 ? gen_sibcall_internal (addr, args_size)
5623 : gen_call_internal (addr, args_size));
5624 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
5628 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
5629 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
5632 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
5633 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
5636 pattern = (sibcall_p
5637 ? gen_sibcall_value_internal (result, addr, args_size)
5638 : gen_call_value_internal (result, addr, args_size));
5640 insn = emit_call_insn (pattern);
5642 /* Lazy-binding stubs require $gp to be valid on entry. We also pretend
5643 that they use FAKE_CALL_REGNO; see the load_call<mode> patterns for
5647 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
5648 use_reg (&CALL_INSN_FUNCTION_USAGE (insn),
5649 gen_rtx_REG (Pmode, FAKE_CALL_REGNO));
5654 /* Implement TARGET_FUNCTION_OK_FOR_SIBCALL. */
5657 mips_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
5659 if (!TARGET_SIBCALLS)
5662 /* We can't do a sibcall if the called function is a MIPS16 function
5663 because there is no direct "jx" instruction equivalent to "jalx" to
5664 switch the ISA mode. */
5665 if (mips_use_mips16_mode_p (decl))
5668 /* ...and when -minterlink-mips16 is in effect, assume that external
5669 functions could be MIPS16 ones unless an attribute explicitly
5670 tells us otherwise. We only care about cases where the sibling
5671 and normal calls would both be direct. */
5672 if (TARGET_INTERLINK_MIPS16
5674 && DECL_EXTERNAL (decl)
5675 && !mips_nomips16_decl_p (decl)
5676 && const_call_insn_operand (XEXP (DECL_RTL (decl), 0), VOIDmode))
5683 /* Emit code to move general operand SRC into condition-code
5684 register DEST. SCRATCH is a scratch TFmode float register.
5691 where FP1 and FP2 are single-precision float registers
5692 taken from SCRATCH. */
5695 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
5699 /* Change the source to SFmode. */
5701 src = adjust_address (src, SFmode, 0);
5702 else if (REG_P (src) || GET_CODE (src) == SUBREG)
5703 src = gen_rtx_REG (SFmode, true_regnum (src));
5705 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
5706 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
5708 mips_emit_move (copy_rtx (fp1), src);
5709 mips_emit_move (copy_rtx (fp2), CONST0_RTX (SFmode));
5710 emit_insn (gen_slt_sf (dest, fp2, fp1));
5713 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
5714 Assume that the areas do not overlap. */
5717 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
5719 HOST_WIDE_INT offset, delta;
5720 unsigned HOST_WIDE_INT bits;
5722 enum machine_mode mode;
5725 /* Work out how many bits to move at a time. If both operands have
5726 half-word alignment, it is usually better to move in half words.
5727 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
5728 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
5729 Otherwise move word-sized chunks. */
5730 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
5731 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
5732 bits = BITS_PER_WORD / 2;
5734 bits = BITS_PER_WORD;
5736 mode = mode_for_size (bits, MODE_INT, 0);
5737 delta = bits / BITS_PER_UNIT;
5739 /* Allocate a buffer for the temporary registers. */
5740 regs = alloca (sizeof (rtx) * length / delta);
5742 /* Load as many BITS-sized chunks as possible. Use a normal load if
5743 the source has enough alignment, otherwise use left/right pairs. */
5744 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
5746 regs[i] = gen_reg_rtx (mode);
5747 if (MEM_ALIGN (src) >= bits)
5748 mips_emit_move (regs[i], adjust_address (src, mode, offset));
5751 rtx part = adjust_address (src, BLKmode, offset);
5752 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
5757 /* Copy the chunks to the destination. */
5758 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
5759 if (MEM_ALIGN (dest) >= bits)
5760 mips_emit_move (adjust_address (dest, mode, offset), regs[i]);
5763 rtx part = adjust_address (dest, BLKmode, offset);
5764 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
5768 /* Mop up any left-over bytes. */
5769 if (offset < length)
5771 src = adjust_address (src, BLKmode, offset);
5772 dest = adjust_address (dest, BLKmode, offset);
5773 move_by_pieces (dest, src, length - offset,
5774 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
5778 #define MAX_MOVE_REGS 4
5779 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
5782 /* Helper function for doing a loop-based block operation on memory
5783 reference MEM. Each iteration of the loop will operate on LENGTH
5786 Create a new base register for use within the loop and point it to
5787 the start of MEM. Create a new memory reference that uses this
5788 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
5791 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
5792 rtx *loop_reg, rtx *loop_mem)
5794 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
5796 /* Although the new mem does not refer to a known location,
5797 it does keep up to LENGTH bytes of alignment. */
5798 *loop_mem = change_address (mem, BLKmode, *loop_reg);
5799 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
5803 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
5804 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
5805 memory regions do not overlap. */
5808 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
5810 rtx label, src_reg, dest_reg, final_src;
5811 HOST_WIDE_INT leftover;
5813 leftover = length % MAX_MOVE_BYTES;
5816 /* Create registers and memory references for use within the loop. */
5817 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
5818 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
5820 /* Calculate the value that SRC_REG should have after the last iteration
5822 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
5825 /* Emit the start of the loop. */
5826 label = gen_label_rtx ();
5829 /* Emit the loop body. */
5830 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
5832 /* Move on to the next block. */
5833 mips_emit_move (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
5834 mips_emit_move (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
5836 /* Emit the loop condition. */
5837 if (Pmode == DImode)
5838 emit_insn (gen_cmpdi (src_reg, final_src));
5840 emit_insn (gen_cmpsi (src_reg, final_src));
5841 emit_jump_insn (gen_bne (label));
5843 /* Mop up any left-over bytes. */
5845 mips_block_move_straight (dest, src, leftover);
5848 /* Expand a movmemsi instruction. */
5851 mips_expand_block_move (rtx dest, rtx src, rtx length)
5853 if (GET_CODE (length) == CONST_INT)
5855 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
5857 mips_block_move_straight (dest, src, INTVAL (length));
5862 mips_block_move_loop (dest, src, INTVAL (length));
5870 /* Expand a loop of synci insns for the address range [BEGIN, END). */
5873 mips_expand_synci_loop (rtx begin, rtx end)
5875 rtx inc, label, cmp, cmp_result;
5877 /* Load INC with the cache line size (rdhwr INC,$1). */
5878 inc = gen_reg_rtx (SImode);
5879 emit_insn (gen_rdhwr (inc, const1_rtx));
5881 /* Loop back to here. */
5882 label = gen_label_rtx ();
5885 emit_insn (gen_synci (begin));
5887 cmp = gen_reg_rtx (Pmode);
5888 mips_emit_binary (GTU, cmp, begin, end);
5890 mips_emit_binary (PLUS, begin, begin, inc);
5892 cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
5893 emit_jump_insn (gen_condjump (cmp_result, label));
5896 /* Return true if it is possible to use left/right accesses for a
5897 bitfield of WIDTH bits starting BITPOS bits into *OP. When
5898 returning true, update *OP, *LEFT and *RIGHT as follows:
5900 *OP is a BLKmode reference to the whole field.
5902 *LEFT is a QImode reference to the first byte if big endian or
5903 the last byte if little endian. This address can be used in the
5904 left-side instructions (lwl, swl, ldl, sdl).
5906 *RIGHT is a QImode reference to the opposite end of the field and
5907 can be used in the patterning right-side instruction. */
5910 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
5911 rtx *left, rtx *right)
5915 /* Check that the operand really is a MEM. Not all the extv and
5916 extzv predicates are checked. */
5920 /* Check that the size is valid. */
5921 if (width != 32 && (!TARGET_64BIT || width != 64))
5924 /* We can only access byte-aligned values. Since we are always passed
5925 a reference to the first byte of the field, it is not necessary to
5926 do anything with BITPOS after this check. */
5927 if (bitpos % BITS_PER_UNIT != 0)
5930 /* Reject aligned bitfields: we want to use a normal load or store
5931 instead of a left/right pair. */
5932 if (MEM_ALIGN (*op) >= width)
5935 /* Adjust *OP to refer to the whole field. This also has the effect
5936 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
5937 *op = adjust_address (*op, BLKmode, 0);
5938 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
5940 /* Get references to both ends of the field. We deliberately don't
5941 use the original QImode *OP for FIRST since the new BLKmode one
5942 might have a simpler address. */
5943 first = adjust_address (*op, QImode, 0);
5944 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
5946 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
5947 be the upper word and RIGHT the lower word. */
5948 if (TARGET_BIG_ENDIAN)
5949 *left = first, *right = last;
5951 *left = last, *right = first;
5957 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
5958 Return true on success. We only handle cases where zero_extract is
5959 equivalent to sign_extract. */
5962 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
5964 rtx left, right, temp;
5966 /* If TARGET_64BIT, the destination of a 32-bit load will be a
5967 paradoxical word_mode subreg. This is the only case in which
5968 we allow the destination to be larger than the source. */
5969 if (GET_CODE (dest) == SUBREG
5970 && GET_MODE (dest) == DImode
5971 && SUBREG_BYTE (dest) == 0
5972 && GET_MODE (SUBREG_REG (dest)) == SImode)
5973 dest = SUBREG_REG (dest);
5975 /* After the above adjustment, the destination must be the same
5976 width as the source. */
5977 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
5980 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
5983 temp = gen_reg_rtx (GET_MODE (dest));
5984 if (GET_MODE (dest) == DImode)
5986 emit_insn (gen_mov_ldl (temp, src, left));
5987 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
5991 emit_insn (gen_mov_lwl (temp, src, left));
5992 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
5998 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
6002 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
6005 enum machine_mode mode;
6007 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
6010 mode = mode_for_size (width, MODE_INT, 0);
6011 src = gen_lowpart (mode, src);
6015 emit_insn (gen_mov_sdl (dest, src, left));
6016 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
6020 emit_insn (gen_mov_swl (dest, src, left));
6021 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
6026 /* Return true if X is a MEM with the same size as MODE. */
6029 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
6036 size = MEM_SIZE (x);
6037 return size && INTVAL (size) == GET_MODE_SIZE (mode);
6040 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
6041 source of an "ext" instruction or the destination of an "ins"
6042 instruction. OP must be a register operand and the following
6043 conditions must hold:
6045 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
6046 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
6047 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
6049 Also reject lengths equal to a word as they are better handled
6050 by the move patterns. */
6053 mips_use_ins_ext_p (rtx op, rtx size, rtx position)
6055 HOST_WIDE_INT len, pos;
6057 if (!ISA_HAS_EXT_INS
6058 || !register_operand (op, VOIDmode)
6059 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
6062 len = INTVAL (size);
6063 pos = INTVAL (position);
6065 if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
6066 || pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
6072 /* Initialize mips_split_addresses from the associated command-line
6075 mips_split_addresses is a half-way house between explicit
6076 relocations and the traditional assembler macros. It can
6077 split absolute 32-bit symbolic constants into a high/lo_sum
6078 pair but uses macros for other sorts of access.
6080 Like explicit relocation support for REL targets, it relies
6081 on GNU extensions in the assembler and the linker.
6083 Although this code should work for -O0, it has traditionally
6084 been treated as an optimization. */
6087 mips_init_split_addresses (void)
6089 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
6090 && optimize && !flag_pic
6091 && !ABI_HAS_64BIT_SYMBOLS)
6092 mips_split_addresses = 1;
6094 mips_split_addresses = 0;
6097 /* (Re-)Initialize information about relocs. */
6100 mips_init_relocs (void)
6102 memset (mips_split_p, '\0', sizeof (mips_split_p));
6103 memset (mips_hi_relocs, '\0', sizeof (mips_hi_relocs));
6104 memset (mips_lo_relocs, '\0', sizeof (mips_lo_relocs));
6106 if (ABI_HAS_64BIT_SYMBOLS)
6108 if (TARGET_EXPLICIT_RELOCS)
6110 mips_split_p[SYMBOL_64_HIGH] = true;
6111 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
6112 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
6114 mips_split_p[SYMBOL_64_MID] = true;
6115 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
6116 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
6118 mips_split_p[SYMBOL_64_LOW] = true;
6119 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
6120 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
6122 mips_split_p[SYMBOL_ABSOLUTE] = true;
6123 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
6128 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses || TARGET_MIPS16)
6130 mips_split_p[SYMBOL_ABSOLUTE] = true;
6131 mips_hi_relocs[SYMBOL_ABSOLUTE] = "%hi(";
6132 mips_lo_relocs[SYMBOL_ABSOLUTE] = "%lo(";
6134 mips_lo_relocs[SYMBOL_32_HIGH] = "%hi(";
6140 /* The high part is provided by a pseudo copy of $gp. */
6141 mips_split_p[SYMBOL_GP_RELATIVE] = true;
6142 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gprel(";
6145 if (TARGET_EXPLICIT_RELOCS)
6147 /* Small data constants are kept whole until after reload,
6148 then lowered by mips_rewrite_small_data. */
6149 mips_lo_relocs[SYMBOL_GP_RELATIVE] = "%gp_rel(";
6151 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
6154 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
6155 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
6159 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
6160 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
6165 /* The HIGH and LO_SUM are matched by special .md patterns. */
6166 mips_split_p[SYMBOL_GOT_DISP] = true;
6168 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
6169 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
6170 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
6172 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
6173 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
6174 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
6179 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
6181 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
6182 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
6188 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
6189 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
6190 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
6193 /* Thread-local relocation operators. */
6194 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
6195 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
6196 mips_split_p[SYMBOL_DTPREL] = 1;
6197 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
6198 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
6199 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
6200 mips_split_p[SYMBOL_TPREL] = 1;
6201 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
6202 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
6204 mips_lo_relocs[SYMBOL_HALF] = "%half(";
6207 /* If OP is an UNSPEC address, return the address to which it refers,
6208 otherwise return OP itself. */
6211 mips_strip_unspec_address (rtx op)
6215 split_const (op, &base, &offset);
6216 if (UNSPEC_ADDRESS_P (base))
6217 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
6221 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM
6222 in context CONTEXT. RELOCS is the array of relocations to use. */
6225 print_operand_reloc (FILE *file, rtx op, enum mips_symbol_context context,
6226 const char **relocs)
6228 enum mips_symbol_type symbol_type;
6231 symbol_type = mips_classify_symbolic_expression (op, context);
6232 if (relocs[symbol_type] == 0)
6233 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
6235 fputs (relocs[symbol_type], file);
6236 output_addr_const (file, mips_strip_unspec_address (op));
6237 for (p = relocs[symbol_type]; *p != 0; p++)
6242 /* Print the text for PRINT_OPERAND punctation character CH to FILE.
6243 The punctuation characters are:
6245 '(' Start a nested ".set noreorder" block.
6246 ')' End a nested ".set noreorder" block.
6247 '[' Start a nested ".set noat" block.
6248 ']' End a nested ".set noat" block.
6249 '<' Start a nested ".set nomacro" block.
6250 '>' End a nested ".set nomacro" block.
6251 '*' Behave like %(%< if generating a delayed-branch sequence.
6252 '#' Print a nop if in a ".set noreorder" block.
6253 '/' Like '#', but do nothing within a delayed-branch sequence.
6254 '?' Print "l" if mips_branch_likely is true
6255 '.' Print the name of the register with a hard-wired zero (zero or $0).
6256 '@' Print the name of the assembler temporary register (at or $1).
6257 '^' Print the name of the pic call-through register (t9 or $25).
6258 '+' Print the name of the gp register (usually gp or $28).
6259 '$' Print the name of the stack pointer register (sp or $29).
6260 '|' Print ".set push; .set mips2" if !ISA_HAS_LL_SC.
6261 '-' Print ".set pop" under the same conditions for '|'.
6263 See also mips_init_print_operand_pucnt. */
6266 mips_print_operand_punctuation (FILE *file, int ch)
6271 if (set_noreorder++ == 0)
6272 fputs (".set\tnoreorder\n\t", file);
6276 gcc_assert (set_noreorder > 0);
6277 if (--set_noreorder == 0)
6278 fputs ("\n\t.set\treorder", file);
6282 if (set_noat++ == 0)
6283 fputs (".set\tnoat\n\t", file);
6287 gcc_assert (set_noat > 0);
6288 if (--set_noat == 0)
6289 fputs ("\n\t.set\tat", file);
6293 if (set_nomacro++ == 0)
6294 fputs (".set\tnomacro\n\t", file);
6298 gcc_assert (set_nomacro > 0);
6299 if (--set_nomacro == 0)
6300 fputs ("\n\t.set\tmacro", file);
6304 if (final_sequence != 0)
6306 mips_print_operand_punctuation (file, '(');
6307 mips_print_operand_punctuation (file, '<');
6312 if (set_noreorder != 0)
6313 fputs ("\n\tnop", file);
6317 /* Print an extra newline so that the delayed insn is separated
6318 from the following ones. This looks neater and is consistent
6319 with non-nop delayed sequences. */
6320 if (set_noreorder != 0 && final_sequence == 0)
6321 fputs ("\n\tnop\n", file);
6325 if (mips_branch_likely)
6330 fputs (reg_names[GP_REG_FIRST + 0], file);
6334 fputs (reg_names[GP_REG_FIRST + 1], file);
6338 fputs (reg_names[PIC_FUNCTION_ADDR_REGNUM], file);
6342 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
6346 fputs (reg_names[STACK_POINTER_REGNUM], file);
6351 fputs (".set\tpush\n\t.set\tmips2\n\t", file);
6356 fputs ("\n\t.set\tpop", file);
6365 /* Initialize mips_print_operand_punct. */
6368 mips_init_print_operand_punct (void)
6372 for (p = "()[]<>*#/?.@^+$|-"; *p; p++)
6373 mips_print_operand_punct[(unsigned char) *p] = true;
6376 /* PRINT_OPERAND prefix LETTER refers to the integer branch instruction
6377 associated with condition CODE. Print the condition part of the
6381 mips_print_int_branch_condition (FILE *file, enum rtx_code code, int letter)
6395 /* Conveniently, the MIPS names for these conditions are the same
6396 as their RTL equivalents. */
6397 fputs (GET_RTX_NAME (code), file);
6401 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
6406 /* Likewise floating-point branches. */
6409 mips_print_float_branch_condition (FILE *file, enum rtx_code code, int letter)
6414 fputs ("c1f", file);
6418 fputs ("c1t", file);
6422 output_operand_lossage ("'%%%c' is not a valid operand prefix", letter);
6427 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
6429 'X' Print CONST_INT OP in hexadecimal format.
6430 'x' Print the low 16 bits of CONST_INT OP in hexadecimal format.
6431 'd' Print CONST_INT OP in decimal.
6432 'h' Print the high-part relocation associated with OP, after stripping
6434 'R' Print the low-part relocation associated with OP.
6435 'C' Print the integer branch condition for comparison OP.
6436 'N' Print the inverse of the integer branch condition for comparison OP.
6437 'F' Print the FPU branch condition for comparison OP.
6438 'W' Print the inverse of the FPU branch condition for comparison OP.
6439 'T' Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
6440 'z' for (eq:?I ...), 'n' for (ne:?I ...).
6441 't' Like 'T', but with the EQ/NE cases reversed
6442 'Y' Print mips_fp_conditions[INTVAL (OP)]
6443 'Z' Print OP and a comma for ISA_HAS_8CC, otherwise print nothing.
6444 'q' Print a DSP accumulator register.
6445 'D' Print the second part of a double-word register or memory operand.
6446 'L' Print the low-order register in a double-word register operand.
6447 'M' Print high-order register in a double-word register operand.
6448 'z' Print $0 if OP is zero, otherwise print OP normally. */
6451 print_operand (FILE *file, rtx op, int letter)
6455 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
6457 mips_print_operand_punctuation (file, letter);
6462 code = GET_CODE (op);
6467 if (GET_CODE (op) == CONST_INT)
6468 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
6470 output_operand_lossage ("invalid use of '%%%c'", letter);
6474 if (GET_CODE (op) == CONST_INT)
6475 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff);
6477 output_operand_lossage ("invalid use of '%%%c'", letter);
6481 if (GET_CODE (op) == CONST_INT)
6482 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op));
6484 output_operand_lossage ("invalid use of '%%%c'", letter);
6490 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_hi_relocs);
6494 print_operand_reloc (file, op, SYMBOL_CONTEXT_LEA, mips_lo_relocs);
6498 mips_print_int_branch_condition (file, code, letter);
6502 mips_print_int_branch_condition (file, reverse_condition (code), letter);
6506 mips_print_float_branch_condition (file, code, letter);
6510 mips_print_float_branch_condition (file, reverse_condition (code),
6517 int truth = (code == NE) == (letter == 'T');
6518 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
6523 if (code == CONST_INT && UINTVAL (op) < ARRAY_SIZE (mips_fp_conditions))
6524 fputs (mips_fp_conditions[UINTVAL (op)], file);
6526 output_operand_lossage ("'%%%c' is not a valid operand prefix",
6533 print_operand (file, op, 0);
6539 if (code == REG && MD_REG_P (REGNO (op)))
6540 fprintf (file, "$ac0");
6541 else if (code == REG && DSP_ACC_REG_P (REGNO (op)))
6542 fprintf (file, "$ac%c", reg_names[REGNO (op)][3]);
6544 output_operand_lossage ("invalid use of '%%%c'", letter);
6552 unsigned int regno = REGNO (op);
6553 if ((letter == 'M' && TARGET_LITTLE_ENDIAN)
6554 || (letter == 'L' && TARGET_BIG_ENDIAN)
6557 fprintf (file, "%s", reg_names[regno]);
6563 output_address (plus_constant (XEXP (op, 0), 4));
6565 output_address (XEXP (op, 0));
6569 if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
6570 fputs (reg_names[GP_REG_FIRST], file);
6571 else if (CONST_GP_P (op))
6572 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
6574 output_addr_const (file, mips_strip_unspec_address (op));
6580 /* Output address operand X to FILE. */
6583 print_operand_address (FILE *file, rtx x)
6585 struct mips_address_info addr;
6587 if (mips_classify_address (&addr, x, word_mode, true))
6591 print_operand (file, addr.offset, 0);
6592 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6595 case ADDRESS_LO_SUM:
6596 print_operand_reloc (file, addr.offset, SYMBOL_CONTEXT_MEM,
6598 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6601 case ADDRESS_CONST_INT:
6602 output_addr_const (file, x);
6603 fprintf (file, "(%s)", reg_names[0]);
6606 case ADDRESS_SYMBOLIC:
6607 output_addr_const (file, mips_strip_unspec_address (x));
6613 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
6614 FIRST is true if this is the first time handling this decl. */
6617 mips_encode_section_info (tree decl, rtx rtl, int first)
6619 default_encode_section_info (decl, rtl, first);
6621 if (TREE_CODE (decl) == FUNCTION_DECL)
6623 rtx symbol = XEXP (rtl, 0);
6624 tree type = TREE_TYPE (decl);
6626 if ((TARGET_LONG_CALLS && !mips_near_type_p (type))
6627 || mips_far_type_p (type))
6628 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
6632 /* Implement TARGET_SELECT_RTX_SECTION. */
6635 mips_select_rtx_section (enum machine_mode mode, rtx x,
6636 unsigned HOST_WIDE_INT align)
6638 /* ??? Consider using mergeable small data sections. */
6639 if (mips_rtx_constant_in_small_data_p (mode))
6640 return get_named_section (NULL, ".sdata", 0);
6642 return default_elf_select_rtx_section (mode, x, align);
6645 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
6647 The complication here is that, with the combination TARGET_ABICALLS
6648 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
6649 therefore not be included in the read-only part of a DSO. Handle such
6650 cases by selecting a normal data section instead of a read-only one.
6651 The logic apes that in default_function_rodata_section. */
6654 mips_function_rodata_section (tree decl)
6656 if (!TARGET_ABICALLS || TARGET_GPWORD)
6657 return default_function_rodata_section (decl);
6659 if (decl && DECL_SECTION_NAME (decl))
6661 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6662 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
6664 char *rname = ASTRDUP (name);
6666 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
6668 else if (flag_function_sections && flag_data_sections
6669 && strncmp (name, ".text.", 6) == 0)
6671 char *rname = ASTRDUP (name);
6672 memcpy (rname + 1, "data", 4);
6673 return get_section (rname, SECTION_WRITE, decl);
6676 return data_section;
6679 /* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
6680 locally-defined objects go in a small data section. It also controls
6681 the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
6682 mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
6685 mips_in_small_data_p (const_tree decl)
6689 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
6692 /* We don't yet generate small-data references for -mabicalls or
6693 VxWorks RTP code. See the related -G handling in override_options. */
6694 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
6697 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
6701 /* Reject anything that isn't in a known small-data section. */
6702 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6703 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
6706 /* If a symbol is defined externally, the assembler will use the
6707 usual -G rules when deciding how to implement macros. */
6708 if (mips_lo_relocs[SYMBOL_GP_RELATIVE] || !DECL_EXTERNAL (decl))
6711 else if (TARGET_EMBEDDED_DATA)
6713 /* Don't put constants into the small data section: we want them
6714 to be in ROM rather than RAM. */
6715 if (TREE_CODE (decl) != VAR_DECL)
6718 if (TREE_READONLY (decl)
6719 && !TREE_SIDE_EFFECTS (decl)
6720 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
6724 /* Enforce -mlocal-sdata. */
6725 if (!TARGET_LOCAL_SDATA && !TREE_PUBLIC (decl))
6728 /* Enforce -mextern-sdata. */
6729 if (!TARGET_EXTERN_SDATA && DECL_P (decl))
6731 if (DECL_EXTERNAL (decl))
6733 if (DECL_COMMON (decl) && DECL_INITIAL (decl) == NULL)
6737 size = int_size_in_bytes (TREE_TYPE (decl));
6738 return (size > 0 && size <= mips_section_threshold);
6741 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
6742 anchors for small data: the GP register acts as an anchor in that
6743 case. We also don't want to use them for PC-relative accesses,
6744 where the PC acts as an anchor. */
6747 mips_use_anchors_for_symbol_p (const_rtx symbol)
6749 switch (mips_classify_symbol (symbol, SYMBOL_CONTEXT_MEM))
6751 case SYMBOL_PC_RELATIVE:
6752 case SYMBOL_GP_RELATIVE:
6756 return default_use_anchors_for_symbol_p (symbol);
6760 /* The MIPS debug format wants all automatic variables and arguments
6761 to be in terms of the virtual frame pointer (stack pointer before
6762 any adjustment in the function), while the MIPS 3.0 linker wants
6763 the frame pointer to be the stack pointer after the initial
6764 adjustment. So, we do the adjustment here. The arg pointer (which
6765 is eliminated) points to the virtual frame pointer, while the frame
6766 pointer (which may be eliminated) points to the stack pointer after
6767 the initial adjustments. */
6770 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
6772 rtx offset2 = const0_rtx;
6773 rtx reg = eliminate_constant_term (addr, &offset2);
6776 offset = INTVAL (offset2);
6778 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
6779 || reg == hard_frame_pointer_rtx)
6781 offset -= cfun->machine->frame.total_size;
6782 if (reg == hard_frame_pointer_rtx)
6783 offset += cfun->machine->frame.hard_frame_pointer_offset;
6786 /* sdbout_parms does not want this to crash for unrecognized cases. */
6788 else if (reg != arg_pointer_rtx)
6789 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
6796 /* When using assembler macros, keep track of all of small-data externs
6797 so that mips_file_end can emit the appropriate declarations for them.
6799 In most cases it would be safe (though pointless) to emit .externs
6800 for other symbols too. One exception is when an object is within
6801 the -G limit but declared by the user to be in a section other
6802 than .sbss or .sdata. */
6805 mips_output_external (FILE *file, tree decl, const char *name)
6807 default_elf_asm_output_external (file, decl, name);
6809 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
6810 set in order to avoid putting out names that are never really
6812 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
6814 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
6816 fputs ("\t.extern\t", file);
6817 assemble_name (file, name);
6818 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
6819 int_size_in_bytes (TREE_TYPE (decl)));
6821 else if (TARGET_IRIX
6822 && mips_abi == ABI_32
6823 && TREE_CODE (decl) == FUNCTION_DECL)
6825 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
6826 `.global name .text' directive for every used but
6827 undefined function. If we don't, the linker may perform
6828 an optimization (skipping over the insns that set $gp)
6829 when it is unsafe. */
6830 fputs ("\t.globl ", file);
6831 assemble_name (file, name);
6832 fputs (" .text\n", file);
6837 /* Emit a new filename to a stream. If we are smuggling stabs, try to
6838 put out a MIPS ECOFF file and a stab. */
6841 mips_output_filename (FILE *stream, const char *name)
6844 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
6846 if (write_symbols == DWARF2_DEBUG)
6848 else if (mips_output_filename_first_time)
6850 mips_output_filename_first_time = 0;
6851 num_source_filenames += 1;
6852 current_function_file = name;
6853 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6854 output_quoted_string (stream, name);
6855 putc ('\n', stream);
6858 /* If we are emitting stabs, let dbxout.c handle this (except for
6859 the mips_output_filename_first_time case). */
6860 else if (write_symbols == DBX_DEBUG)
6863 else if (name != current_function_file
6864 && strcmp (name, current_function_file) != 0)
6866 num_source_filenames += 1;
6867 current_function_file = name;
6868 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6869 output_quoted_string (stream, name);
6870 putc ('\n', stream);
6874 /* MIPS implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
6877 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
6882 fputs ("\t.dtprelword\t", file);
6886 fputs ("\t.dtpreldword\t", file);
6892 output_addr_const (file, x);
6893 fputs ("+0x8000", file);
6896 /* Implement TARGET_DWARF_REGISTER_SPAN. */
6899 mips_dwarf_register_span (rtx reg)
6902 enum machine_mode mode;
6904 /* By default, GCC maps increasing register numbers to increasing
6905 memory locations, but paired FPRs are always little-endian,
6906 regardless of the prevailing endianness. */
6907 mode = GET_MODE (reg);
6908 if (FP_REG_P (REGNO (reg))
6909 && TARGET_BIG_ENDIAN
6910 && MAX_FPRS_PER_FMT > 1
6911 && GET_MODE_SIZE (mode) > UNITS_PER_FPREG)
6913 gcc_assert (GET_MODE_SIZE (mode) == UNITS_PER_HWFPVALUE);
6914 high = mips_subword (reg, true);
6915 low = mips_subword (reg, false);
6916 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, high, low));
6922 /* Output an ASCII string, in a space-saving way. PREFIX is the string
6923 that should be written before the opening quote, such as "\t.ascii\t"
6924 for real string data or "\t# " for a comment. */
6927 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
6932 register const unsigned char *string =
6933 (const unsigned char *)string_param;
6935 fprintf (stream, "%s\"", prefix);
6936 for (i = 0; i < len; i++)
6938 register int c = string[i];
6942 if (c == '\\' || c == '\"')
6944 putc ('\\', stream);
6952 fprintf (stream, "\\%03o", c);
6956 if (cur_pos > 72 && i+1 < len)
6959 fprintf (stream, "\"\n%s\"", prefix);
6962 fprintf (stream, "\"\n");
6965 #ifdef BSS_SECTION_ASM_OP
6966 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
6967 in the use of sbss. */
6970 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
6971 unsigned HOST_WIDE_INT size, int align)
6973 extern tree last_assemble_variable_decl;
6975 if (mips_in_small_data_p (decl))
6976 switch_to_section (get_named_section (NULL, ".sbss", 0));
6978 switch_to_section (bss_section);
6979 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6980 last_assemble_variable_decl = decl;
6981 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
6982 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
6986 /* Emit either a label, .comm, or .lcomm directive. When using assembler
6987 macros, mark the symbol as written so that mips_file_end won't emit an
6988 .extern for it. STREAM is the output file, NAME is the name of the
6989 symbol, INIT_STRING is the string that should be written before the
6990 symbol and FINAL_STRING is the string that should be written after it.
6991 FINAL_STRING is a printf() format that consumes the remaining arguments. */
6994 mips_declare_object (FILE *stream, const char *name, const char *init_string,
6995 const char *final_string, ...)
6999 fputs (init_string, stream);
7000 assemble_name (stream, name);
7001 va_start (ap, final_string);
7002 vfprintf (stream, final_string, ap);
7005 if (!TARGET_EXPLICIT_RELOCS)
7007 tree name_tree = get_identifier (name);
7008 TREE_ASM_WRITTEN (name_tree) = 1;
7012 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
7013 NAME is the name of the object and ALIGN is the required alignment
7014 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
7015 alignment argument. */
7018 mips_declare_common_object (FILE *stream, const char *name,
7019 const char *init_string,
7020 unsigned HOST_WIDE_INT size,
7021 unsigned int align, bool takes_alignment_p)
7023 if (!takes_alignment_p)
7025 size += (align / BITS_PER_UNIT) - 1;
7026 size -= size % (align / BITS_PER_UNIT);
7027 mips_declare_object (stream, name, init_string,
7028 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
7031 mips_declare_object (stream, name, init_string,
7032 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
7033 size, align / BITS_PER_UNIT);
7036 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
7037 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
7040 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
7041 unsigned HOST_WIDE_INT size,
7044 /* If the target wants uninitialized const declarations in
7045 .rdata then don't put them in .comm. */
7046 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
7047 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
7048 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
7050 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
7051 targetm.asm_out.globalize_label (stream, name);
7053 switch_to_section (readonly_data_section);
7054 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
7055 mips_declare_object (stream, name, "",
7056 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
7060 mips_declare_common_object (stream, name, "\n\t.comm\t",
7064 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
7065 extern int size_directive_output;
7067 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
7068 definitions except that it uses mips_declare_object() to emit the label. */
7071 mips_declare_object_name (FILE *stream, const char *name,
7072 tree decl ATTRIBUTE_UNUSED)
7074 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
7075 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
7078 size_directive_output = 0;
7079 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
7083 size_directive_output = 1;
7084 size = int_size_in_bytes (TREE_TYPE (decl));
7085 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7088 mips_declare_object (stream, name, "", ":\n");
7091 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
7094 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
7098 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
7099 if (!flag_inhibit_size_directive
7100 && DECL_SIZE (decl) != 0
7101 && !at_end && top_level
7102 && DECL_INITIAL (decl) == error_mark_node
7103 && !size_directive_output)
7107 size_directive_output = 1;
7108 size = int_size_in_bytes (TREE_TYPE (decl));
7109 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
7114 /* Return the FOO in the name of the ".mdebug.FOO" section associated
7115 with the current ABI. */
7118 mips_mdebug_abi_name (void)
7131 return TARGET_64BIT ? "eabi64" : "eabi32";
7137 /* Implement TARGET_ASM_FILE_START. */
7140 mips_file_start (void)
7142 default_file_start ();
7146 /* Generate a special section to describe the ABI switches used to
7147 produce the resultant binary. This used to be done by the assembler
7148 setting bits in the ELF header's flags field, but we have run out of
7149 bits. GDB needs this information in order to be able to correctly
7150 debug these binaries. See the function mips_gdbarch_init() in
7151 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
7152 causes unnecessary IRIX 6 ld warnings. */
7153 /* Note - we use fprintf directly rather than calling switch_to_section
7154 because in this way we can avoid creating an allocated section. We
7155 do not want this section to take up any space in the running
7157 fprintf (asm_out_file, "\t.section .mdebug.%s\n\t.previous\n",
7158 mips_mdebug_abi_name ());
7160 /* There is no ELF header flag to distinguish long32 forms of the
7161 EABI from long64 forms. Emit a special section to help tools
7162 such as GDB. Do the same for o64, which is sometimes used with
7164 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
7165 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n"
7166 "\t.previous\n", TARGET_LONG64 ? 64 : 32);
7168 #ifdef HAVE_AS_GNU_ATTRIBUTE
7169 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
7170 TARGET_HARD_FLOAT_ABI ? (TARGET_DOUBLE_FLOAT ? 1 : 2) : 3);
7174 /* Generate the pseudo ops that System V.4 wants. */
7175 if (TARGET_ABICALLS)
7176 fprintf (asm_out_file, "\t.abicalls\n");
7178 if (flag_verbose_asm)
7179 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
7181 mips_section_threshold, mips_arch_info->name, mips_isa);
7185 /* Make the last instruction frame related and note that it performs
7186 the operation described by FRAME_PATTERN. */
7189 mips_set_frame_expr (rtx frame_pattern)
7193 insn = get_last_insn ();
7194 RTX_FRAME_RELATED_P (insn) = 1;
7195 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7201 /* Return a frame-related rtx that stores REG at MEM.
7202 REG must be a single register. */
7205 mips_frame_set (rtx mem, rtx reg)
7209 /* If we're saving the return address register and the dwarf return
7210 address column differs from the hard register number, adjust the
7211 note reg to refer to the former. */
7212 if (REGNO (reg) == GP_REG_FIRST + 31
7213 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
7214 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
7216 set = gen_rtx_SET (VOIDmode, mem, reg);
7217 RTX_FRAME_RELATED_P (set) = 1;
7222 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
7223 mips16e_s2_s8_regs[X], it must also save the registers in indexes
7224 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
7225 static const unsigned char mips16e_s2_s8_regs[] = {
7226 30, 23, 22, 21, 20, 19, 18
7228 static const unsigned char mips16e_a0_a3_regs[] = {
7232 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
7233 ordered from the uppermost in memory to the lowest in memory. */
7234 static const unsigned char mips16e_save_restore_regs[] = {
7235 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
7238 /* Return the index of the lowest X in the range [0, SIZE) for which
7239 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
7242 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
7247 for (i = 0; i < size; i++)
7248 if (BITSET_P (mask, regs[i]))
7254 /* *MASK_PTR is a mask of general-purpose registers and *NUM_REGS_PTR
7255 is the number of set bits. If *MASK_PTR contains REGS[X] for some X
7256 in [0, SIZE), adjust *MASK_PTR and *NUM_REGS_PTR so that the same
7257 is true for all indexes (X, SIZE). */
7260 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
7261 unsigned int size, unsigned int *num_regs_ptr)
7265 i = mips16e_find_first_register (*mask_ptr, regs, size);
7266 for (i++; i < size; i++)
7267 if (!BITSET_P (*mask_ptr, regs[i]))
7270 *mask_ptr |= 1 << regs[i];
7274 /* Return a simplified form of X using the register values in REG_VALUES.
7275 REG_VALUES[R] is the last value assigned to hard register R, or null
7276 if R has not been modified.
7278 This function is rather limited, but is good enough for our purposes. */
7281 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
7285 x = avoid_constant_pool_reference (x);
7289 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7290 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
7291 x0, GET_MODE (XEXP (x, 0)));
7294 if (ARITHMETIC_P (x))
7296 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7297 x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
7298 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
7302 && reg_values[REGNO (x)]
7303 && !rtx_unstable_p (reg_values[REGNO (x)]))
7304 return reg_values[REGNO (x)];
7309 /* Return true if (set DEST SRC) stores an argument register into its
7310 caller-allocated save slot, storing the number of that argument
7311 register in *REGNO_PTR if so. REG_VALUES is as for
7312 mips16e_collect_propagate_value. */
7315 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
7316 unsigned int *regno_ptr)
7318 unsigned int argno, regno;
7319 HOST_WIDE_INT offset, required_offset;
7322 /* Check that this is a word-mode store. */
7323 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
7326 /* Check that the register being saved is an unmodified argument
7328 regno = REGNO (src);
7329 if (regno < GP_ARG_FIRST || regno > GP_ARG_LAST || reg_values[regno])
7331 argno = regno - GP_ARG_FIRST;
7333 /* Check whether the address is an appropriate stack pointer or
7334 frame pointer access. */
7335 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
7336 mips_split_plus (addr, &base, &offset);
7337 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
7338 if (base == hard_frame_pointer_rtx)
7339 required_offset -= cfun->machine->frame.hard_frame_pointer_offset;
7340 else if (base != stack_pointer_rtx)
7342 if (offset != required_offset)
7349 /* A subroutine of mips_expand_prologue, called only when generating
7350 MIPS16e SAVE instructions. Search the start of the function for any
7351 instructions that save argument registers into their caller-allocated
7352 save slots. Delete such instructions and return a value N such that
7353 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
7354 instructions redundant. */
7357 mips16e_collect_argument_saves (void)
7359 rtx reg_values[FIRST_PSEUDO_REGISTER];
7360 rtx insn, next, set, dest, src;
7361 unsigned int nargs, regno;
7363 push_topmost_sequence ();
7365 memset (reg_values, 0, sizeof (reg_values));
7366 for (insn = get_insns (); insn; insn = next)
7368 next = NEXT_INSN (insn);
7375 set = PATTERN (insn);
7376 if (GET_CODE (set) != SET)
7379 dest = SET_DEST (set);
7380 src = SET_SRC (set);
7381 if (mips16e_collect_argument_save_p (dest, src, reg_values, ®no))
7383 if (!BITSET_P (cfun->machine->frame.mask, regno))
7386 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
7389 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
7390 reg_values[REGNO (dest)]
7391 = mips16e_collect_propagate_value (src, reg_values);
7395 pop_topmost_sequence ();
7400 /* Return a move between register REGNO and memory location SP + OFFSET.
7401 Make the move a load if RESTORE_P, otherwise make it a frame-related
7405 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
7410 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
7411 reg = gen_rtx_REG (SImode, regno);
7413 ? gen_rtx_SET (VOIDmode, reg, mem)
7414 : mips_frame_set (mem, reg));
7417 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
7418 The instruction must:
7420 - Allocate or deallocate SIZE bytes in total; SIZE is known
7423 - Save or restore as many registers in *MASK_PTR as possible.
7424 The instruction saves the first registers at the top of the
7425 allocated area, with the other registers below it.
7427 - Save NARGS argument registers above the allocated area.
7429 (NARGS is always zero if RESTORE_P.)
7431 The SAVE and RESTORE instructions cannot save and restore all general
7432 registers, so there may be some registers left over for the caller to
7433 handle. Destructively modify *MASK_PTR so that it contains the registers
7434 that still need to be saved or restored. The caller can save these
7435 registers in the memory immediately below *OFFSET_PTR, which is a
7436 byte offset from the bottom of the allocated stack area. */
7439 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
7440 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
7444 HOST_WIDE_INT offset, top_offset;
7445 unsigned int i, regno;
7448 gcc_assert (cfun->machine->frame.num_fp == 0);
7450 /* Calculate the number of elements in the PARALLEL. We need one element
7451 for the stack adjustment, one for each argument register save, and one
7452 for each additional register move. */
7454 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7455 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
7458 /* Create the final PARALLEL. */
7459 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
7462 /* Add the stack pointer adjustment. */
7463 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7464 plus_constant (stack_pointer_rtx,
7465 restore_p ? size : -size));
7466 RTX_FRAME_RELATED_P (set) = 1;
7467 XVECEXP (pattern, 0, n++) = set;
7469 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7470 top_offset = restore_p ? size : 0;
7472 /* Save the arguments. */
7473 for (i = 0; i < nargs; i++)
7475 offset = top_offset + i * GET_MODE_SIZE (gpr_mode);
7476 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
7477 XVECEXP (pattern, 0, n++) = set;
7480 /* Then fill in the other register moves. */
7481 offset = top_offset;
7482 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7484 regno = mips16e_save_restore_regs[i];
7485 if (BITSET_P (*mask_ptr, regno))
7487 offset -= UNITS_PER_WORD;
7488 set = mips16e_save_restore_reg (restore_p, offset, regno);
7489 XVECEXP (pattern, 0, n++) = set;
7490 *mask_ptr &= ~(1 << regno);
7494 /* Tell the caller what offset it should use for the remaining registers. */
7495 *offset_ptr = size + (offset - top_offset) + size;
7497 gcc_assert (n == XVECLEN (pattern, 0));
7502 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
7503 pointer. Return true if PATTERN matches the kind of instruction
7504 generated by mips16e_build_save_restore. If INFO is nonnull,
7505 initialize it when returning true. */
7508 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
7509 struct mips16e_save_restore_info *info)
7511 unsigned int i, nargs, mask, extra;
7512 HOST_WIDE_INT top_offset, save_offset, offset;
7513 rtx set, reg, mem, base;
7516 if (!GENERATE_MIPS16E_SAVE_RESTORE)
7519 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7520 top_offset = adjust > 0 ? adjust : 0;
7522 /* Interpret all other members of the PARALLEL. */
7523 save_offset = top_offset - GET_MODE_SIZE (gpr_mode);
7527 for (n = 1; n < XVECLEN (pattern, 0); n++)
7529 /* Check that we have a SET. */
7530 set = XVECEXP (pattern, 0, n);
7531 if (GET_CODE (set) != SET)
7534 /* Check that the SET is a load (if restoring) or a store
7536 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
7540 /* Check that the address is the sum of the stack pointer and a
7541 possibly-zero constant offset. */
7542 mips_split_plus (XEXP (mem, 0), &base, &offset);
7543 if (base != stack_pointer_rtx)
7546 /* Check that SET's other operand is a register. */
7547 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
7551 /* Check for argument saves. */
7552 if (offset == top_offset + nargs * GET_MODE_SIZE (gpr_mode)
7553 && REGNO (reg) == GP_ARG_FIRST + nargs)
7555 else if (offset == save_offset)
7557 while (mips16e_save_restore_regs[i++] != REGNO (reg))
7558 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
7561 mask |= 1 << REGNO (reg);
7562 save_offset -= GET_MODE_SIZE (gpr_mode);
7568 /* Check that the restrictions on register ranges are met. */
7570 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7571 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
7572 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7573 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
7577 /* Make sure that the topmost argument register is not saved twice.
7578 The checks above ensure that the same is then true for the other
7579 argument registers. */
7580 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
7583 /* Pass back information, if requested. */
7586 info->nargs = nargs;
7588 info->size = (adjust > 0 ? adjust : -adjust);
7594 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
7595 for the register range [MIN_REG, MAX_REG]. Return a pointer to
7596 the null terminator. */
7599 mips16e_add_register_range (char *s, unsigned int min_reg,
7600 unsigned int max_reg)
7602 if (min_reg != max_reg)
7603 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
7605 s += sprintf (s, ",%s", reg_names[min_reg]);
7609 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
7610 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
7613 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
7615 static char buffer[300];
7617 struct mips16e_save_restore_info info;
7618 unsigned int i, end;
7621 /* Parse the pattern. */
7622 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
7625 /* Add the mnemonic. */
7626 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
7629 /* Save the arguments. */
7631 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
7632 reg_names[GP_ARG_FIRST + info.nargs - 1]);
7633 else if (info.nargs == 1)
7634 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
7636 /* Emit the amount of stack space to allocate or deallocate. */
7637 s += sprintf (s, "%d", (int) info.size);
7639 /* Save or restore $16. */
7640 if (BITSET_P (info.mask, 16))
7641 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
7643 /* Save or restore $17. */
7644 if (BITSET_P (info.mask, 17))
7645 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
7647 /* Save or restore registers in the range $s2...$s8, which
7648 mips16e_s2_s8_regs lists in decreasing order. Note that this
7649 is a software register range; the hardware registers are not
7650 numbered consecutively. */
7651 end = ARRAY_SIZE (mips16e_s2_s8_regs);
7652 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
7654 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
7655 mips16e_s2_s8_regs[i]);
7657 /* Save or restore registers in the range $a0...$a3. */
7658 end = ARRAY_SIZE (mips16e_a0_a3_regs);
7659 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
7661 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
7662 mips16e_a0_a3_regs[end - 1]);
7664 /* Save or restore $31. */
7665 if (BITSET_P (info.mask, 31))
7666 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
7671 /* Return true if the current function has an insn that implicitly
7675 mips_function_has_gp_insn (void)
7677 /* Don't bother rechecking if we found one last time. */
7678 if (!cfun->machine->has_gp_insn_p)
7682 push_topmost_sequence ();
7683 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7685 && GET_CODE (PATTERN (insn)) != USE
7686 && GET_CODE (PATTERN (insn)) != CLOBBER
7687 && (get_attr_got (insn) != GOT_UNSET
7688 || small_data_pattern (PATTERN (insn), VOIDmode)))
7690 pop_topmost_sequence ();
7692 cfun->machine->has_gp_insn_p = (insn != 0);
7694 return cfun->machine->has_gp_insn_p;
7698 /* Return the register that should be used as the global pointer
7699 within this function. Return 0 if the function doesn't need
7700 a global pointer. */
7703 mips_global_pointer (void)
7707 /* $gp is always available unless we're using a GOT. */
7708 if (!TARGET_USE_GOT)
7709 return GLOBAL_POINTER_REGNUM;
7711 /* We must always provide $gp when it is used implicitly. */
7712 if (!TARGET_EXPLICIT_RELOCS)
7713 return GLOBAL_POINTER_REGNUM;
7715 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
7717 if (current_function_profile)
7718 return GLOBAL_POINTER_REGNUM;
7720 /* If the function has a nonlocal goto, $gp must hold the correct
7721 global pointer for the target function. */
7722 if (current_function_has_nonlocal_goto)
7723 return GLOBAL_POINTER_REGNUM;
7725 /* If the gp is never referenced, there's no need to initialize it.
7726 Note that reload can sometimes introduce constant pool references
7727 into a function that otherwise didn't need them. For example,
7728 suppose we have an instruction like:
7730 (set (reg:DF R1) (float:DF (reg:SI R2)))
7732 If R2 turns out to be constant such as 1, the instruction may have a
7733 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
7734 using this constant if R2 doesn't get allocated to a register.
7736 In cases like these, reload will have added the constant to the pool
7737 but no instruction will yet refer to it. */
7738 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
7739 && !current_function_uses_const_pool
7740 && !mips_function_has_gp_insn ())
7743 /* We need a global pointer, but perhaps we can use a call-clobbered
7744 register instead of $gp. */
7745 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
7746 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7747 if (!df_regs_ever_live_p (regno)
7748 && call_really_used_regs[regno]
7749 && !fixed_regs[regno]
7750 && regno != PIC_FUNCTION_ADDR_REGNUM)
7753 return GLOBAL_POINTER_REGNUM;
7756 /* Return true if the current function returns its value in a floating-point
7757 register in MIPS16 mode. */
7760 mips16_cfun_returns_in_fpr_p (void)
7762 tree return_type = DECL_RESULT (current_function_decl);
7763 return (TARGET_MIPS16
7764 && TARGET_HARD_FLOAT_ABI
7765 && !aggregate_value_p (return_type, current_function_decl)
7766 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
7770 /* Return true if the current function must save REGNO. */
7773 mips_save_reg_p (unsigned int regno)
7775 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
7776 if we have not chosen a call-clobbered substitute. */
7777 if (regno == GLOBAL_POINTER_REGNUM)
7778 return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno;
7780 /* Check call-saved registers. */
7781 if ((current_function_saves_all_registers || df_regs_ever_live_p (regno))
7782 && !call_really_used_regs[regno])
7785 /* Save both registers in an FPR pair if either one is used. This is
7786 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
7787 register to be used without the even register. */
7788 if (FP_REG_P (regno)
7789 && MAX_FPRS_PER_FMT == 2
7790 && df_regs_ever_live_p (regno + 1)
7791 && !call_really_used_regs[regno + 1])
7794 /* We need to save the old frame pointer before setting up a new one. */
7795 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
7798 /* Check for registers that must be saved for FUNCTION_PROFILER. */
7799 if (current_function_profile && MIPS_SAVE_REG_FOR_PROFILING_P (regno))
7802 /* We need to save the incoming return address if it is ever clobbered
7803 within the function, if __builtin_eh_return is being used to set a
7804 different return address, or if a stub is being used to return a
7806 if (regno == GP_REG_FIRST + 31
7807 && (df_regs_ever_live_p (regno)
7808 || current_function_calls_eh_return
7809 || mips16_cfun_returns_in_fpr_p ()))
7815 /* Populate the current function's mips_frame_info structure.
7817 MIPS stack frames look like:
7819 +-------------------------------+
7821 | incoming stack arguments |
7823 +-------------------------------+
7825 | caller-allocated save area |
7826 A | for register arguments |
7828 +-------------------------------+ <-- incoming stack pointer
7830 | callee-allocated save area |
7831 B | for arguments that are |
7832 | split between registers and |
7835 +-------------------------------+ <-- arg_pointer_rtx
7837 C | callee-allocated save area |
7838 | for register varargs |
7840 +-------------------------------+ <-- frame_pointer_rtx + fp_sp_offset
7841 | | + UNITS_PER_HWFPVALUE
7844 +-------------------------------+ <-- frame_pointer_rtx + gp_sp_offset
7845 | | + UNITS_PER_WORD
7848 +-------------------------------+
7850 | local variables | | var_size
7852 +-------------------------------+
7854 | $gp save area | | cprestore_size
7856 P +-------------------------------+ <-- hard_frame_pointer_rtx for
7858 | outgoing stack arguments |
7860 +-------------------------------+
7862 | caller-allocated save area |
7863 | for register arguments |
7865 +-------------------------------+ <-- stack_pointer_rtx
7867 hard_frame_pointer_rtx for
7870 At least two of A, B and C will be empty.
7872 Dynamic stack allocations such as alloca insert data at point P.
7873 They decrease stack_pointer_rtx but leave frame_pointer_rtx and
7874 hard_frame_pointer_rtx unchanged. */
7877 mips_compute_frame_info (void)
7879 struct mips_frame_info *frame;
7880 HOST_WIDE_INT offset, size;
7881 unsigned int regno, i;
7883 frame = &cfun->machine->frame;
7884 memset (frame, 0, sizeof (*frame));
7885 size = get_frame_size ();
7887 cfun->machine->global_pointer = mips_global_pointer ();
7889 /* The first STARTING_FRAME_OFFSET bytes contain the outgoing argument
7890 area and the $gp save slot. This area isn't needed in leaf functions,
7891 but if the target-independent frame size is nonzero, we're committed
7892 to allocating it anyway. */
7893 if (size == 0 && current_function_is_leaf)
7895 /* The MIPS 3.0 linker does not like functions that dynamically
7896 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
7897 looks like we are trying to create a second frame pointer to the
7898 function, so allocate some stack space to make it happy. */
7899 if (current_function_calls_alloca)
7900 frame->args_size = REG_PARM_STACK_SPACE (cfun->decl);
7902 frame->args_size = 0;
7903 frame->cprestore_size = 0;
7907 frame->args_size = current_function_outgoing_args_size;
7908 frame->cprestore_size = STARTING_FRAME_OFFSET - frame->args_size;
7910 offset = frame->args_size + frame->cprestore_size;
7912 /* Move above the local variables. */
7913 frame->var_size = MIPS_STACK_ALIGN (size);
7914 offset += frame->var_size;
7916 /* Find out which GPRs we need to save. */
7917 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
7918 if (mips_save_reg_p (regno))
7921 frame->mask |= 1 << (regno - GP_REG_FIRST);
7924 /* If this function calls eh_return, we must also save and restore the
7925 EH data registers. */
7926 if (current_function_calls_eh_return)
7927 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; i++)
7930 frame->mask |= 1 << (EH_RETURN_DATA_REGNO (i) - GP_REG_FIRST);
7933 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
7934 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
7935 save all later registers too. */
7936 if (GENERATE_MIPS16E_SAVE_RESTORE)
7938 mips16e_mask_registers (&frame->mask, mips16e_s2_s8_regs,
7939 ARRAY_SIZE (mips16e_s2_s8_regs), &frame->num_gp);
7940 mips16e_mask_registers (&frame->mask, mips16e_a0_a3_regs,
7941 ARRAY_SIZE (mips16e_a0_a3_regs), &frame->num_gp);
7944 /* Move above the GPR save area. */
7945 if (frame->num_gp > 0)
7947 offset += MIPS_STACK_ALIGN (frame->num_gp * UNITS_PER_WORD);
7948 frame->gp_sp_offset = offset - UNITS_PER_WORD;
7951 /* Find out which FPRs we need to save. This loop must iterate over
7952 the same space as its companion in mips_for_each_saved_reg. */
7953 if (TARGET_HARD_FLOAT)
7954 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno += MAX_FPRS_PER_FMT)
7955 if (mips_save_reg_p (regno))
7957 frame->num_fp += MAX_FPRS_PER_FMT;
7958 frame->fmask |= ~(~0 << MAX_FPRS_PER_FMT) << (regno - FP_REG_FIRST);
7961 /* Move above the FPR save area. */
7962 if (frame->num_fp > 0)
7964 offset += MIPS_STACK_ALIGN (frame->num_fp * UNITS_PER_FPREG);
7965 frame->fp_sp_offset = offset - UNITS_PER_HWFPVALUE;
7968 /* Move above the callee-allocated varargs save area. */
7969 offset += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
7970 frame->arg_pointer_offset = offset;
7972 /* Move above the callee-allocated area for pretend stack arguments. */
7973 offset += current_function_pretend_args_size;
7974 frame->total_size = offset;
7976 /* Work out the offsets of the save areas from the top of the frame. */
7977 if (frame->gp_sp_offset > 0)
7978 frame->gp_save_offset = frame->gp_sp_offset - offset;
7979 if (frame->fp_sp_offset > 0)
7980 frame->fp_save_offset = frame->fp_sp_offset - offset;
7982 /* MIPS16 code offsets the frame pointer by the size of the outgoing
7983 arguments. This tends to increase the chances of using unextended
7984 instructions for local variables and incoming arguments. */
7986 frame->hard_frame_pointer_offset = frame->args_size;
7989 /* Return the style of GP load sequence that is being used for the
7990 current function. */
7992 enum mips_loadgp_style
7993 mips_current_loadgp_style (void)
7995 if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
8001 if (TARGET_ABSOLUTE_ABICALLS)
8002 return LOADGP_ABSOLUTE;
8004 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
8007 /* Implement FRAME_POINTER_REQUIRED. */
8010 mips_frame_pointer_required (void)
8012 /* If the function contains dynamic stack allocations, we need to
8013 use the frame pointer to access the static parts of the frame. */
8014 if (current_function_calls_alloca)
8017 /* In MIPS16 mode, we need a frame pointer for a large frame; otherwise,
8018 reload may be unable to compute the address of a local variable,
8019 since there is no way to add a large constant to the stack pointer
8020 without using a second temporary register. */
8023 mips_compute_frame_info ();
8024 if (!SMALL_OPERAND (cfun->machine->frame.total_size))
8031 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
8032 pointer or argument pointer. TO is either the stack pointer or
8033 hard frame pointer. */
8036 mips_initial_elimination_offset (int from, int to)
8038 HOST_WIDE_INT offset;
8040 mips_compute_frame_info ();
8042 /* Set OFFSET to the offset from the soft frame pointer, which is also
8043 the offset from the end-of-prologue stack pointer. */
8046 case FRAME_POINTER_REGNUM:
8050 case ARG_POINTER_REGNUM:
8051 offset = cfun->machine->frame.arg_pointer_offset;
8058 if (to == HARD_FRAME_POINTER_REGNUM)
8059 offset -= cfun->machine->frame.hard_frame_pointer_offset;
8064 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
8065 value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
8068 mips_extra_live_on_entry (bitmap regs)
8070 if (TARGET_USE_GOT && !TARGET_ABSOLUTE_ABICALLS)
8071 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
8074 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
8075 back to a previous frame. */
8078 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
8083 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
8086 /* Emit code to change the current function's return address to
8087 ADDRESS. SCRATCH is available as a scratch register, if needed.
8088 ADDRESS and SCRATCH are both word-mode GPRs. */
8091 mips_set_return_address (rtx address, rtx scratch)
8095 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
8096 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
8097 cfun->machine->frame.gp_sp_offset);
8099 mips_emit_move (gen_rtx_MEM (GET_MODE (address), slot_address), address);
8102 /* Restore $gp from its save slot. Valid only when using o32 or
8106 mips_restore_gp (void)
8110 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
8112 address = mips_add_offset (pic_offset_table_rtx,
8113 frame_pointer_needed
8114 ? hard_frame_pointer_rtx
8115 : stack_pointer_rtx,
8116 current_function_outgoing_args_size);
8117 slot = gen_rtx_MEM (Pmode, address);
8119 mips_emit_move (pic_offset_table_rtx, slot);
8120 if (!TARGET_EXPLICIT_RELOCS)
8121 emit_insn (gen_blockage ());
8124 /* A function to save or store a register. The first argument is the
8125 register and the second is the stack slot. */
8126 typedef void (*mips_save_restore_fn) (rtx, rtx);
8128 /* Use FN to save or restore register REGNO. MODE is the register's
8129 mode and OFFSET is the offset of its save slot from the current
8133 mips_save_restore_reg (enum machine_mode mode, int regno,
8134 HOST_WIDE_INT offset, mips_save_restore_fn fn)
8138 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
8140 fn (gen_rtx_REG (mode, regno), mem);
8144 /* Call FN for each register that is saved by the current function.
8145 SP_OFFSET is the offset of the current stack pointer from the start
8149 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
8151 enum machine_mode fpr_mode;
8152 HOST_WIDE_INT offset;
8155 /* Save registers starting from high to low. The debuggers prefer at least
8156 the return register be stored at func+4, and also it allows us not to
8157 need a nop in the epilogue if at least one register is reloaded in
8158 addition to return address. */
8159 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
8160 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
8161 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
8163 mips_save_restore_reg (gpr_mode, regno, offset, fn);
8164 offset -= GET_MODE_SIZE (gpr_mode);
8167 /* This loop must iterate over the same space as its companion in
8168 mips_compute_frame_info. */
8169 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
8170 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
8171 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
8172 regno >= FP_REG_FIRST;
8173 regno -= MAX_FPRS_PER_FMT)
8174 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
8176 mips_save_restore_reg (fpr_mode, regno, offset, fn);
8177 offset -= GET_MODE_SIZE (fpr_mode);
8181 /* If we're generating n32 or n64 abicalls, and the current function
8182 does not use $28 as its global pointer, emit a cplocal directive.
8183 Use pic_offset_table_rtx as the argument to the directive. */
8186 mips_output_cplocal (void)
8188 if (!TARGET_EXPLICIT_RELOCS
8189 && cfun->machine->global_pointer > 0
8190 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
8191 output_asm_insn (".cplocal %+", 0);
8194 /* Set up the stack and frame (if desired) for the function. */
8197 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8200 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
8202 #ifdef SDB_DEBUGGING_INFO
8203 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
8204 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
8207 /* In mips16 mode, we may need to generate a 32 bit to handle
8208 floating point arguments. The linker will arrange for any 32-bit
8209 functions to call this stub, which will then jump to the 16-bit
8212 && TARGET_HARD_FLOAT_ABI
8213 && current_function_args_info.fp_code != 0)
8214 build_mips16_function_stub ();
8216 /* Select the mips16 mode for this function. */
8218 fprintf (file, "\t.set\tmips16\n");
8220 fprintf (file, "\t.set\tnomips16\n");
8222 if (!FUNCTION_NAME_ALREADY_DECLARED)
8224 /* Get the function name the same way that toplev.c does before calling
8225 assemble_start_function. This is needed so that the name used here
8226 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8227 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8229 if (!flag_inhibit_size_directive)
8231 fputs ("\t.ent\t", file);
8232 assemble_name (file, fnname);
8236 assemble_name (file, fnname);
8237 fputs (":\n", file);
8240 /* Stop mips_file_end from treating this function as external. */
8241 if (TARGET_IRIX && mips_abi == ABI_32)
8242 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
8244 if (!flag_inhibit_size_directive)
8246 /* .frame FRAMEREG, FRAMESIZE, RETREG */
8248 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
8249 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
8250 ", args= " HOST_WIDE_INT_PRINT_DEC
8251 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
8252 (reg_names[(frame_pointer_needed)
8253 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
8254 (frame_pointer_needed
8255 ? tsize - cfun->machine->frame.hard_frame_pointer_offset
8257 reg_names[GP_REG_FIRST + 31],
8258 cfun->machine->frame.var_size,
8259 cfun->machine->frame.num_gp,
8260 cfun->machine->frame.num_fp,
8261 cfun->machine->frame.args_size,
8262 cfun->machine->frame.cprestore_size);
8264 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
8265 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
8266 cfun->machine->frame.mask,
8267 cfun->machine->frame.gp_save_offset);
8268 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
8269 cfun->machine->frame.fmask,
8270 cfun->machine->frame.fp_save_offset);
8273 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
8274 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
8277 if (mips_current_loadgp_style () == LOADGP_OLDABI)
8279 /* Handle the initialization of $gp for SVR4 PIC. */
8280 if (!cfun->machine->all_noreorder_p)
8281 output_asm_insn ("%(.cpload\t%^%)", 0);
8283 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
8285 else if (cfun->machine->all_noreorder_p)
8286 output_asm_insn ("%(%<", 0);
8288 /* Tell the assembler which register we're using as the global
8289 pointer. This is needed for thunks, since they can use either
8290 explicit relocs or assembler macros. */
8291 mips_output_cplocal ();
8294 /* Do any necessary cleanup after a function to restore stack, frame,
8297 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
8300 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
8301 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
8303 /* Reinstate the normal $gp. */
8304 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
8305 mips_output_cplocal ();
8307 if (cfun->machine->all_noreorder_p)
8309 /* Avoid using %>%) since it adds excess whitespace. */
8310 output_asm_insn (".set\tmacro", 0);
8311 output_asm_insn (".set\treorder", 0);
8312 set_noreorder = set_nomacro = 0;
8315 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
8319 /* Get the function name the same way that toplev.c does before calling
8320 assemble_start_function. This is needed so that the name used here
8321 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
8322 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8323 fputs ("\t.end\t", file);
8324 assemble_name (file, fnname);
8329 /* Save register REG to MEM. Make the instruction frame-related. */
8332 mips_save_reg (rtx reg, rtx mem)
8334 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
8338 if (mips_split_64bit_move_p (mem, reg))
8339 mips_split_doubleword_move (mem, reg);
8341 mips_emit_move (mem, reg);
8343 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
8344 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
8345 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
8350 && REGNO (reg) != GP_REG_FIRST + 31
8351 && !M16_REG_P (REGNO (reg)))
8353 /* Save a non-mips16 register by moving it through a temporary.
8354 We don't need to do this for $31 since there's a special
8355 instruction for it. */
8356 mips_emit_move (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
8357 mips_emit_move (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
8360 mips_emit_move (mem, reg);
8362 mips_set_frame_expr (mips_frame_set (mem, reg));
8366 /* The __gnu_local_gp symbol. */
8368 static GTY(()) rtx mips_gnu_local_gp;
8370 /* If we're generating n32 or n64 abicalls, emit instructions
8371 to set up the global pointer. */
8374 mips_emit_loadgp (void)
8376 rtx addr, offset, incoming_address, base, index;
8378 switch (mips_current_loadgp_style ())
8380 case LOADGP_ABSOLUTE:
8381 if (mips_gnu_local_gp == NULL)
8383 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
8384 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
8386 emit_insn (gen_loadgp_absolute (mips_gnu_local_gp));
8390 addr = XEXP (DECL_RTL (current_function_decl), 0);
8391 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
8392 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
8393 emit_insn (gen_loadgp_newabi (offset, incoming_address));
8394 if (!TARGET_EXPLICIT_RELOCS)
8395 emit_insn (gen_loadgp_blockage ());
8399 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
8400 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
8401 emit_insn (gen_loadgp_rtp (base, index));
8402 if (!TARGET_EXPLICIT_RELOCS)
8403 emit_insn (gen_loadgp_blockage ());
8411 /* Expand the prologue into a bunch of separate insns. */
8414 mips_expand_prologue (void)
8420 if (cfun->machine->global_pointer > 0)
8421 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8423 size = cfun->machine->frame.total_size;
8425 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
8426 bytes beforehand; this is enough to cover the register save area
8427 without going out of range. */
8428 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
8430 HOST_WIDE_INT step1;
8432 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
8434 if (GENERATE_MIPS16E_SAVE_RESTORE)
8436 HOST_WIDE_INT offset;
8437 unsigned int mask, regno;
8439 /* Try to merge argument stores into the save instruction. */
8440 nargs = mips16e_collect_argument_saves ();
8442 /* Build the save instruction. */
8443 mask = cfun->machine->frame.mask;
8444 insn = mips16e_build_save_restore (false, &mask, &offset,
8446 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8449 /* Check if we need to save other registers. */
8450 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8451 if (BITSET_P (mask, regno - GP_REG_FIRST))
8453 offset -= GET_MODE_SIZE (gpr_mode);
8454 mips_save_restore_reg (gpr_mode, regno, offset, mips_save_reg);
8459 insn = gen_add3_insn (stack_pointer_rtx,
8462 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8464 mips_for_each_saved_reg (size, mips_save_reg);
8468 /* Allocate the rest of the frame. */
8471 if (SMALL_OPERAND (-size))
8472 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
8474 GEN_INT (-size)))) = 1;
8477 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
8480 /* There are no instructions to add or subtract registers
8481 from the stack pointer, so use the frame pointer as a
8482 temporary. We should always be using a frame pointer
8483 in this case anyway. */
8484 gcc_assert (frame_pointer_needed);
8485 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8486 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
8487 hard_frame_pointer_rtx,
8488 MIPS_PROLOGUE_TEMP (Pmode)));
8489 mips_emit_move (stack_pointer_rtx, hard_frame_pointer_rtx);
8492 emit_insn (gen_sub3_insn (stack_pointer_rtx,
8494 MIPS_PROLOGUE_TEMP (Pmode)));
8496 /* Describe the combined effect of the previous instructions. */
8498 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
8499 plus_constant (stack_pointer_rtx, -size)));
8503 /* Set up the frame pointer, if we're using one. */
8504 if (frame_pointer_needed)
8506 HOST_WIDE_INT offset;
8508 offset = cfun->machine->frame.hard_frame_pointer_offset;
8511 insn = mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8512 RTX_FRAME_RELATED_P (insn) = 1;
8514 else if (SMALL_OPERAND (offset))
8516 insn = gen_add3_insn (hard_frame_pointer_rtx,
8517 stack_pointer_rtx, GEN_INT (offset));
8518 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
8522 mips_emit_move (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (offset));
8523 mips_emit_move (hard_frame_pointer_rtx, stack_pointer_rtx);
8524 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
8525 hard_frame_pointer_rtx,
8526 MIPS_PROLOGUE_TEMP (Pmode)));
8528 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
8529 plus_constant (stack_pointer_rtx, offset)));
8533 mips_emit_loadgp ();
8535 /* If generating o32/o64 abicalls, save $gp on the stack. */
8536 if (TARGET_ABICALLS && TARGET_OLDABI && !current_function_is_leaf)
8537 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
8539 /* If we are profiling, make sure no instructions are scheduled before
8540 the call to mcount. */
8542 if (current_function_profile)
8543 emit_insn (gen_blockage ());
8546 /* Emit instructions to restore register REG from slot MEM. */
8549 mips_restore_reg (rtx reg, rtx mem)
8551 /* There's no mips16 instruction to load $31 directly. Load into
8552 $7 instead and adjust the return insn appropriately. */
8553 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
8554 reg = gen_rtx_REG (GET_MODE (reg), 7);
8556 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
8558 /* Can't restore directly; move through a temporary. */
8559 mips_emit_move (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
8560 mips_emit_move (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
8563 mips_emit_move (reg, mem);
8567 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
8568 if this epilogue precedes a sibling call, false if it is for a normal
8569 "epilogue" pattern. */
8572 mips_expand_epilogue (int sibcall_p)
8574 HOST_WIDE_INT step1, step2;
8577 if (!sibcall_p && mips_can_use_return_insn ())
8579 emit_jump_insn (gen_return ());
8583 /* In mips16 mode, if the return value should go into a floating-point
8584 register, we need to call a helper routine to copy it over. */
8585 if (mips16_cfun_returns_in_fpr_p ())
8586 mips16_copy_fpr_return_value ();
8588 /* Split the frame into two. STEP1 is the amount of stack we should
8589 deallocate before restoring the registers. STEP2 is the amount we
8590 should deallocate afterwards.
8592 Start off by assuming that no registers need to be restored. */
8593 step1 = cfun->machine->frame.total_size;
8596 /* Work out which register holds the frame address. */
8597 if (!frame_pointer_needed)
8598 base = stack_pointer_rtx;
8601 base = hard_frame_pointer_rtx;
8602 step1 -= cfun->machine->frame.hard_frame_pointer_offset;
8605 /* If we need to restore registers, deallocate as much stack as
8606 possible in the second step without going out of range. */
8607 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
8609 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
8613 /* Set TARGET to BASE + STEP1. */
8619 /* Get an rtx for STEP1 that we can add to BASE. */
8620 adjust = GEN_INT (step1);
8621 if (!SMALL_OPERAND (step1))
8623 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), adjust);
8624 adjust = MIPS_EPILOGUE_TEMP (Pmode);
8627 /* Normal mode code can copy the result straight into $sp. */
8629 target = stack_pointer_rtx;
8631 emit_insn (gen_add3_insn (target, base, adjust));
8634 /* Copy TARGET into the stack pointer. */
8635 if (target != stack_pointer_rtx)
8636 mips_emit_move (stack_pointer_rtx, target);
8638 /* If we're using addressing macros, $gp is implicitly used by all
8639 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
8641 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
8642 emit_insn (gen_blockage ());
8644 if (GENERATE_MIPS16E_SAVE_RESTORE && cfun->machine->frame.mask != 0)
8646 unsigned int regno, mask;
8647 HOST_WIDE_INT offset;
8650 /* Generate the restore instruction. */
8651 mask = cfun->machine->frame.mask;
8652 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
8654 /* Restore any other registers manually. */
8655 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
8656 if (BITSET_P (mask, regno - GP_REG_FIRST))
8658 offset -= GET_MODE_SIZE (gpr_mode);
8659 mips_save_restore_reg (gpr_mode, regno, offset, mips_restore_reg);
8662 /* Restore the remaining registers and deallocate the final bit
8664 emit_insn (restore);
8668 /* Restore the registers. */
8669 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
8672 /* Deallocate the final bit of the frame. */
8674 emit_insn (gen_add3_insn (stack_pointer_rtx,
8679 /* Add in the __builtin_eh_return stack adjustment. We need to
8680 use a temporary in mips16 code. */
8681 if (current_function_calls_eh_return)
8685 mips_emit_move (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
8686 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
8687 MIPS_EPILOGUE_TEMP (Pmode),
8688 EH_RETURN_STACKADJ_RTX));
8689 mips_emit_move (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
8692 emit_insn (gen_add3_insn (stack_pointer_rtx,
8694 EH_RETURN_STACKADJ_RTX));
8699 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
8700 path will restore the return address into $7 rather than $31. */
8702 && !GENERATE_MIPS16E_SAVE_RESTORE
8703 && (cfun->machine->frame.mask & RA_MASK) != 0)
8704 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8705 GP_REG_FIRST + 7)));
8707 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8708 GP_REG_FIRST + 31)));
8712 /* Return nonzero if this function is known to have a null epilogue.
8713 This allows the optimizer to omit jumps to jumps if no stack
8717 mips_can_use_return_insn (void)
8719 if (! reload_completed)
8722 if (current_function_profile)
8725 /* In mips16 mode, a function that returns a floating point value
8726 needs to arrange to copy the return value into the floating point
8728 if (mips16_cfun_returns_in_fpr_p ())
8731 return cfun->machine->frame.total_size == 0;
8734 /* Return true if register REGNO can store a value of mode MODE.
8735 The result of this function is cached in mips_hard_regno_mode_ok. */
8738 mips_hard_regno_mode_ok_p (unsigned int regno, enum machine_mode mode)
8741 enum mode_class class;
8743 if (mode == CCV2mode)
8746 && (regno - ST_REG_FIRST) % 2 == 0);
8748 if (mode == CCV4mode)
8751 && (regno - ST_REG_FIRST) % 4 == 0);
8756 return regno == FPSW_REGNUM;
8758 return (ST_REG_P (regno)
8760 || FP_REG_P (regno));
8763 size = GET_MODE_SIZE (mode);
8764 class = GET_MODE_CLASS (mode);
8766 if (GP_REG_P (regno))
8767 return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD;
8769 if (FP_REG_P (regno)
8770 && (((regno - FP_REG_FIRST) % MAX_FPRS_PER_FMT) == 0
8771 || (MIN_FPRS_PER_FMT == 1 && size <= UNITS_PER_FPREG)))
8773 /* Allow TFmode for CCmode reloads. */
8774 if (mode == TFmode && ISA_HAS_8CC)
8777 if (class == MODE_FLOAT
8778 || class == MODE_COMPLEX_FLOAT
8779 || class == MODE_VECTOR_FLOAT)
8780 return size <= UNITS_PER_FPVALUE;
8782 /* Allow integer modes that fit into a single register. We need
8783 to put integers into FPRs when using instructions like CVT
8784 and TRUNC. There's no point allowing sizes smaller than a word,
8785 because the FPU has no appropriate load/store instructions. */
8786 if (class == MODE_INT)
8787 return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG;
8790 if (ACC_REG_P (regno)
8791 && (INTEGRAL_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode)))
8793 if (size <= UNITS_PER_WORD)
8796 if (size <= UNITS_PER_WORD * 2)
8797 return (DSP_ACC_REG_P (regno)
8798 ? ((regno - DSP_ACC_REG_FIRST) & 1) == 0
8799 : regno == MD_REG_FIRST);
8802 if (ALL_COP_REG_P (regno))
8803 return class == MODE_INT && size <= UNITS_PER_WORD;
8808 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
8809 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
8810 they only hold condition code modes, and CCmode is always considered to
8811 be 4 bytes wide. All other registers are word sized. */
8814 mips_hard_regno_nregs (int regno, enum machine_mode mode)
8816 if (ST_REG_P (regno))
8817 return ((GET_MODE_SIZE (mode) + 3) / 4);
8818 else if (! FP_REG_P (regno))
8819 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
8821 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
8824 /* Implement CLASS_MAX_NREGS.
8826 - UNITS_PER_FPREG controls the number of registers needed by FP_REGS.
8828 - ST_REGS are always hold CCmode values, and CCmode values are
8829 considered to be 4 bytes wide.
8831 All other register classes are covered by UNITS_PER_WORD. Note that
8832 this is true even for unions of integer and float registers when the
8833 latter are smaller than the former. The only supported combination
8834 in which case this occurs is -mgp64 -msingle-float, which has 64-bit
8835 words but 32-bit float registers. A word-based calculation is correct
8836 in that case since -msingle-float disallows multi-FPR values. */
8839 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
8840 enum machine_mode mode)
8842 if (class == ST_REGS)
8843 return (GET_MODE_SIZE (mode) + 3) / 4;
8844 else if (class == FP_REGS)
8845 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
8847 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
8850 /* Return true if registers of class CLASS cannot change from mode FROM
8854 mips_cannot_change_mode_class (enum machine_mode from ATTRIBUTE_UNUSED,
8855 enum machine_mode to ATTRIBUTE_UNUSED,
8856 enum reg_class class)
8858 /* There are several problems with changing the modes of values
8859 in floating-point registers:
8861 - When a multi-word value is stored in paired floating-point
8862 registers, the first register always holds the low word.
8863 We therefore can't allow FPRs to change between single-word
8864 and multi-word modes on big-endian targets.
8866 - GCC assumes that each word of a multiword register can be accessed
8867 individually using SUBREGs. This is not true for floating-point
8868 registers if they are bigger than a word.
8870 - Loading a 32-bit value into a 64-bit floating-point register
8871 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
8872 We can't allow FPRs to change from SImode to to a wider mode on
8875 - If the FPU has already interpreted a value in one format, we must
8876 not ask it to treat the value as having a different format.
8878 We therefore only allow changes between 4-byte and smaller integer
8879 values, all of which have the "W" format as far as the FPU is
8881 return (reg_classes_intersect_p (FP_REGS, class)
8882 && (GET_MODE_CLASS (from) != MODE_INT
8883 || GET_MODE_CLASS (to) != MODE_INT
8884 || GET_MODE_SIZE (from) > 4
8885 || GET_MODE_SIZE (to) > 4));
8888 /* Return true if moves in mode MODE can use the FPU's mov.fmt instruction. */
8891 mips_mode_ok_for_mov_fmt_p (enum machine_mode mode)
8896 return TARGET_HARD_FLOAT;
8899 return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT;
8902 return TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT;
8909 /* Implement PREFERRED_RELOAD_CLASS. */
8912 mips_preferred_reload_class (rtx x, enum reg_class class)
8914 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
8917 if (reg_class_subset_p (FP_REGS, class)
8918 && mips_mode_ok_for_mov_fmt_p (GET_MODE (x)))
8921 if (reg_class_subset_p (GR_REGS, class))
8924 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
8930 /* Return a number assessing the cost of moving a register in class
8931 FROM to class TO. The classes are expressed using the enumeration
8932 values such as `GENERAL_REGS'. A value of 2 is the default; other
8933 values are interpreted relative to that.
8935 It is not required that the cost always equal 2 when FROM is the
8936 same as TO; on some machines it is expensive to move between
8937 registers if they are not general registers.
8939 If reload sees an insn consisting of a single `set' between two
8940 hard registers, and if `REGISTER_MOVE_COST' applied to their
8941 classes returns a value of 2, reload does not check to ensure that
8942 the constraints of the insn are met. Setting a cost of other than
8943 2 will allow reload to verify that the constraints are met. You
8944 should do this if the `movM' pattern's constraints do not allow
8947 ??? We make the cost of moving from HI/LO into general
8948 registers the same as for one of moving general registers to
8949 HI/LO for TARGET_MIPS16 in order to prevent allocating a
8950 pseudo to HI/LO. This might hurt optimizations though, it
8951 isn't clear if it is wise. And it might not work in all cases. We
8952 could solve the DImode LO reg problem by using a multiply, just
8953 like reload_{in,out}si. We could solve the SImode/HImode HI reg
8954 problem by using divide instructions. divu puts the remainder in
8955 the HI reg, so doing a divide by -1 will move the value in the HI
8956 reg for all values except -1. We could handle that case by using a
8957 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
8958 a compare/branch to test the input value to see which instruction
8959 we need to use. This gets pretty messy, but it is feasible. */
8962 mips_register_move_cost (enum machine_mode mode,
8963 enum reg_class to, enum reg_class from)
8967 if (reg_class_subset_p (from, GENERAL_REGS)
8968 && reg_class_subset_p (to, GENERAL_REGS))
8970 if (reg_class_subset_p (from, M16_REGS)
8971 || reg_class_subset_p (to, M16_REGS))
8977 else if (reg_class_subset_p (from, GENERAL_REGS))
8979 if (reg_class_subset_p (to, GENERAL_REGS))
8981 if (reg_class_subset_p (to, FP_REGS))
8983 if (reg_class_subset_p (to, ALL_COP_AND_GR_REGS))
8985 if (reg_class_subset_p (to, ACC_REGS))
8988 else if (reg_class_subset_p (to, GENERAL_REGS))
8990 if (reg_class_subset_p (from, FP_REGS))
8992 if (reg_class_subset_p (from, ST_REGS))
8993 /* LUI followed by MOVF. */
8995 if (reg_class_subset_p (from, ALL_COP_AND_GR_REGS))
8997 if (reg_class_subset_p (from, ACC_REGS))
9000 else if (reg_class_subset_p (from, FP_REGS))
9002 if (reg_class_subset_p (to, FP_REGS)
9003 && mips_mode_ok_for_mov_fmt_p (mode))
9005 if (reg_class_subset_p (to, ST_REGS))
9006 /* An expensive sequence. */
9013 /* This function returns the register class required for a secondary
9014 register when copying between one of the registers in CLASS, and X,
9015 using MODE. If IN_P is nonzero, the copy is going from X to the
9016 register, otherwise the register is the source. A return value of
9017 NO_REGS means that no secondary register is required. */
9020 mips_secondary_reload_class (enum reg_class class,
9021 enum machine_mode mode, rtx x, int in_p)
9025 /* If X is a constant that cannot be loaded into $25, it must be loaded
9026 into some other GPR. No other register class allows a direct move. */
9027 if (mips_dangerous_for_la25_p (x))
9028 return reg_class_subset_p (class, LEA_REGS) ? NO_REGS : LEA_REGS;
9030 regno = true_regnum (x);
9033 /* In MIPS16 mode, every move must involve a member of M16_REGS. */
9034 if (!reg_class_subset_p (class, M16_REGS) && !M16_REG_P (regno))
9037 /* We can't really copy to HI or LO at all in MIPS16 mode. */
9038 if (in_p ? reg_classes_intersect_p (class, ACC_REGS) : ACC_REG_P (regno))
9044 /* Copying from accumulator registers to anywhere other than a general
9045 register requires a temporary general register. */
9046 if (reg_class_subset_p (class, ACC_REGS))
9047 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
9048 if (ACC_REG_P (regno))
9049 return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
9051 /* We can only copy a value to a condition code register from a
9052 floating point register, and even then we require a scratch
9053 floating point register. We can only copy a value out of a
9054 condition code register into a general register. */
9055 if (reg_class_subset_p (class, ST_REGS))
9059 return GP_REG_P (regno) ? NO_REGS : GR_REGS;
9061 if (ST_REG_P (regno))
9065 return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
9068 if (reg_class_subset_p (class, FP_REGS))
9071 && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8))
9072 /* In this case we can use lwc1, swc1, ldc1 or sdc1. We'll use
9073 pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported. */
9076 if (GP_REG_P (regno) || x == CONST0_RTX (mode))
9077 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
9080 if (CONSTANT_P (x) && !targetm.cannot_force_const_mem (x))
9081 /* We can force the constant to memory and use lwc1
9082 and ldc1. As above, we will use pairs of lwc1s if
9083 ldc1 is not supported. */
9086 if (FP_REG_P (regno) && mips_mode_ok_for_mov_fmt_p (mode))
9087 /* In this case we can use mov.fmt. */
9090 /* Otherwise, we need to reload through an integer register. */
9093 if (FP_REG_P (regno))
9094 return reg_class_subset_p (class, GR_REGS) ? NO_REGS : GR_REGS;
9099 /* SImode values are represented as sign-extended to DImode. */
9102 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
9104 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
9111 mips_valid_pointer_mode (enum machine_mode mode)
9113 return (mode == SImode || (TARGET_64BIT && mode == DImode));
9116 /* Target hook for vector_mode_supported_p. */
9119 mips_vector_mode_supported_p (enum machine_mode mode)
9124 return TARGET_PAIRED_SINGLE_FLOAT;
9141 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
9144 mips_scalar_mode_supported_p (enum machine_mode mode)
9146 if (ALL_FIXED_POINT_MODE_P (mode)
9147 && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
9150 return default_scalar_mode_supported_p (mode);
9152 /* This function does three things:
9154 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
9155 - Register the mips16 hardware floating point stubs.
9156 - Register the gofast functions if selected using --enable-gofast. */
9158 #include "config/gofast.h"
9161 mips_init_libfuncs (void)
9163 if (TARGET_FIX_VR4120)
9165 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
9166 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
9169 if (TARGET_MIPS16 && TARGET_HARD_FLOAT_ABI)
9171 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
9172 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
9173 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
9174 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
9176 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
9177 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
9178 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
9179 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
9180 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
9181 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
9182 set_optab_libfunc (unord_optab, SFmode, "__mips16_unordsf2");
9184 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
9185 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
9186 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__mips16_floatunsisf");
9188 if (TARGET_DOUBLE_FLOAT)
9190 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
9191 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
9192 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
9193 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
9195 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
9196 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
9197 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
9198 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
9199 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
9200 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
9201 set_optab_libfunc (unord_optab, DFmode, "__mips16_unorddf2");
9203 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
9204 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
9206 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
9207 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
9208 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__mips16_floatunsidf");
9212 gofast_maybe_init_libfuncs ();
9215 /* Return the length of INSN. LENGTH is the initial length computed by
9216 attributes in the machine-description file. */
9219 mips_adjust_insn_length (rtx insn, int length)
9221 /* A unconditional jump has an unfilled delay slot if it is not part
9222 of a sequence. A conditional jump normally has a delay slot, but
9223 does not on MIPS16. */
9224 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
9227 /* See how many nops might be needed to avoid hardware hazards. */
9228 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
9229 switch (get_attr_hazard (insn))
9243 /* All MIPS16 instructions are a measly two bytes. */
9251 /* Return an asm sequence to start a noat block and load the address
9252 of a label into $1. */
9255 mips_output_load_label (void)
9257 if (TARGET_EXPLICIT_RELOCS)
9261 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
9264 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
9267 if (ISA_HAS_LOAD_DELAY)
9268 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
9269 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
9273 if (Pmode == DImode)
9274 return "%[dla\t%@,%0";
9276 return "%[la\t%@,%0";
9280 /* Return the assembly code for INSN, which has the operands given by
9281 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
9282 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
9283 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
9284 version of BRANCH_IF_TRUE. */
9287 mips_output_conditional_branch (rtx insn, rtx *operands,
9288 const char *branch_if_true,
9289 const char *branch_if_false)
9291 unsigned int length;
9292 rtx taken, not_taken;
9294 length = get_attr_length (insn);
9297 /* Just a simple conditional branch. */
9298 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
9299 return branch_if_true;
9302 /* Generate a reversed branch around a direct jump. This fallback does
9303 not use branch-likely instructions. */
9304 mips_branch_likely = false;
9305 not_taken = gen_label_rtx ();
9306 taken = operands[1];
9308 /* Generate the reversed branch to NOT_TAKEN. */
9309 operands[1] = not_taken;
9310 output_asm_insn (branch_if_false, operands);
9312 /* If INSN has a delay slot, we must provide delay slots for both the
9313 branch to NOT_TAKEN and the conditional jump. We must also ensure
9314 that INSN's delay slot is executed in the appropriate cases. */
9317 /* This first delay slot will always be executed, so use INSN's
9318 delay slot if is not annulled. */
9319 if (!INSN_ANNULLED_BRANCH_P (insn))
9321 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9322 asm_out_file, optimize, 1, NULL);
9323 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9326 output_asm_insn ("nop", 0);
9327 fprintf (asm_out_file, "\n");
9330 /* Output the unconditional branch to TAKEN. */
9332 output_asm_insn ("j\t%0%/", &taken);
9335 output_asm_insn (mips_output_load_label (), &taken);
9336 output_asm_insn ("jr\t%@%]%/", 0);
9339 /* Now deal with its delay slot; see above. */
9342 /* This delay slot will only be executed if the branch is taken.
9343 Use INSN's delay slot if is annulled. */
9344 if (INSN_ANNULLED_BRANCH_P (insn))
9346 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9347 asm_out_file, optimize, 1, NULL);
9348 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9351 output_asm_insn ("nop", 0);
9352 fprintf (asm_out_file, "\n");
9355 /* Output NOT_TAKEN. */
9356 (*targetm.asm_out.internal_label) (asm_out_file, "L",
9357 CODE_LABEL_NUMBER (not_taken));
9361 /* Return the assembly code for INSN, which branches to OPERANDS[1]
9362 if some ordered condition is true. The condition is given by
9363 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
9364 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
9365 its second is always zero. */
9368 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
9370 const char *branch[2];
9372 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
9373 Make BRANCH[0] branch on the inverse condition. */
9374 switch (GET_CODE (operands[0]))
9376 /* These cases are equivalent to comparisons against zero. */
9378 inverted_p = !inverted_p;
9381 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
9382 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
9385 /* These cases are always true or always false. */
9387 inverted_p = !inverted_p;
9390 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
9391 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
9395 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
9396 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
9399 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
9402 /* Used to output div or ddiv instruction DIVISION, which has the operands
9403 given by OPERANDS. Add in a divide-by-zero check if needed.
9405 When working around R4000 and R4400 errata, we need to make sure that
9406 the division is not immediately followed by a shift[1][2]. We also
9407 need to stop the division from being put into a branch delay slot[3].
9408 The easiest way to avoid both problems is to add a nop after the
9409 division. When a divide-by-zero check is needed, this nop can be
9410 used to fill the branch delay slot.
9412 [1] If a double-word or a variable shift executes immediately
9413 after starting an integer division, the shift may give an
9414 incorrect result. See quotations of errata #16 and #28 from
9415 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9416 in mips.md for details.
9418 [2] A similar bug to [1] exists for all revisions of the
9419 R4000 and the R4400 when run in an MC configuration.
9420 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
9422 "19. In this following sequence:
9424 ddiv (or ddivu or div or divu)
9425 dsll32 (or dsrl32, dsra32)
9427 if an MPT stall occurs, while the divide is slipping the cpu
9428 pipeline, then the following double shift would end up with an
9431 Workaround: The compiler needs to avoid generating any
9432 sequence with divide followed by extended double shift."
9434 This erratum is also present in "MIPS R4400MC Errata, Processor
9435 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
9436 & 3.0" as errata #10 and #4, respectively.
9438 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9439 (also valid for MIPS R4000MC processors):
9441 "52. R4000SC: This bug does not apply for the R4000PC.
9443 There are two flavors of this bug:
9445 1) If the instruction just after divide takes an RF exception
9446 (tlb-refill, tlb-invalid) and gets an instruction cache
9447 miss (both primary and secondary) and the line which is
9448 currently in secondary cache at this index had the first
9449 data word, where the bits 5..2 are set, then R4000 would
9450 get a wrong result for the div.
9455 ------------------- # end-of page. -tlb-refill
9460 ------------------- # end-of page. -tlb-invalid
9463 2) If the divide is in the taken branch delay slot, where the
9464 target takes RF exception and gets an I-cache miss for the
9465 exception vector or where I-cache miss occurs for the
9466 target address, under the above mentioned scenarios, the
9467 div would get wrong results.
9470 j r2 # to next page mapped or unmapped
9471 div r8,r9 # this bug would be there as long
9472 # as there is an ICache miss and
9473 nop # the "data pattern" is present
9476 beq r0, r0, NextPage # to Next page
9480 This bug is present for div, divu, ddiv, and ddivu
9483 Workaround: For item 1), OS could make sure that the next page
9484 after the divide instruction is also mapped. For item 2), the
9485 compiler could make sure that the divide instruction is not in
9486 the branch delay slot."
9488 These processors have PRId values of 0x00004220 and 0x00004300 for
9489 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
9492 mips_output_division (const char *division, rtx *operands)
9497 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
9499 output_asm_insn (s, operands);
9502 if (TARGET_CHECK_ZERO_DIV)
9506 output_asm_insn (s, operands);
9507 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
9509 else if (GENERATE_DIVIDE_TRAPS)
9511 output_asm_insn (s, operands);
9516 output_asm_insn ("%(bne\t%2,%.,1f", operands);
9517 output_asm_insn (s, operands);
9518 s = "break\t7%)\n1:";
9524 /* Return true if INSN is a multiply-add or multiply-subtract
9525 instruction and PREV assigns to the accumulator operand. */
9528 mips_linked_madd_p (rtx prev, rtx insn)
9532 x = single_set (insn);
9538 if (GET_CODE (x) == PLUS
9539 && GET_CODE (XEXP (x, 0)) == MULT
9540 && reg_set_p (XEXP (x, 1), prev))
9543 if (GET_CODE (x) == MINUS
9544 && GET_CODE (XEXP (x, 1)) == MULT
9545 && reg_set_p (XEXP (x, 0), prev))
9551 /* Implements a store data bypass check. We need this because the cprestore
9552 pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
9553 default routine to abort. We just return false for that case. */
9554 /* ??? Should try to give a better result here than assuming false. */
9557 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
9559 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
9562 return ! store_data_bypass_p (out_insn, in_insn);
9565 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
9566 dependencies have no cost, except on the 20Kc where output-dependence
9567 is treated like input-dependence. */
9570 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
9571 rtx dep ATTRIBUTE_UNUSED, int cost)
9573 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
9576 if (REG_NOTE_KIND (link) != 0)
9581 /* Return the number of instructions that can be issued per cycle. */
9584 mips_issue_rate (void)
9588 case PROCESSOR_74KC:
9589 case PROCESSOR_74KF2_1:
9590 case PROCESSOR_74KF1_1:
9591 case PROCESSOR_74KF3_2:
9592 /* The 74k is not strictly quad-issue cpu, but can be seen as one
9593 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
9594 but in reality only a maximum of 3 insns can be issued as the
9595 floating point load/stores also require a slot in the AGEN pipe. */
9598 case PROCESSOR_20KC:
9599 case PROCESSOR_R4130:
9600 case PROCESSOR_R5400:
9601 case PROCESSOR_R5500:
9602 case PROCESSOR_R7000:
9603 case PROCESSOR_R9000:
9607 case PROCESSOR_SB1A:
9608 /* This is actually 4, but we get better performance if we claim 3.
9609 This is partly because of unwanted speculative code motion with the
9610 larger number, and partly because in most common cases we can't
9611 reach the theoretical max of 4. */
9619 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
9620 be as wide as the scheduling freedom in the DFA. */
9623 mips_multipass_dfa_lookahead (void)
9625 /* Can schedule up to 4 of the 6 function units in any one cycle. */
9632 /* Remove the instruction at index LOWER from ready queue READY and
9633 reinsert it in front of the instruction at index HIGHER. LOWER must
9637 mips_promote_ready (rtx *ready, int lower, int higher)
9642 new_head = ready[lower];
9643 for (i = lower; i < higher; i++)
9644 ready[i] = ready[i + 1];
9645 ready[i] = new_head;
9648 /* If the priority of the instruction at POS2 in the ready queue READY
9649 is within LIMIT units of that of the instruction at POS1, swap the
9650 instructions if POS2 is not already less than POS1. */
9653 mips_maybe_swap_ready (rtx *ready, int pos1, int pos2, int limit)
9656 && INSN_PRIORITY (ready[pos1]) + limit >= INSN_PRIORITY (ready[pos2]))
9660 ready[pos1] = ready[pos2];
9665 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
9666 that may clobber hi or lo. */
9668 static rtx mips_macc_chains_last_hilo;
9670 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
9671 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
9674 mips_macc_chains_record (rtx insn)
9676 if (get_attr_may_clobber_hilo (insn))
9677 mips_macc_chains_last_hilo = insn;
9680 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
9681 has NREADY elements, looking for a multiply-add or multiply-subtract
9682 instruction that is cumulative with mips_macc_chains_last_hilo.
9683 If there is one, promote it ahead of anything else that might
9684 clobber hi or lo. */
9687 mips_macc_chains_reorder (rtx *ready, int nready)
9691 if (mips_macc_chains_last_hilo != 0)
9692 for (i = nready - 1; i >= 0; i--)
9693 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
9695 for (j = nready - 1; j > i; j--)
9696 if (recog_memoized (ready[j]) >= 0
9697 && get_attr_may_clobber_hilo (ready[j]))
9699 mips_promote_ready (ready, i, j);
9706 /* The last instruction to be scheduled. */
9708 static rtx vr4130_last_insn;
9710 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
9711 points to an rtx that is initially an instruction. Nullify the rtx
9712 if the instruction uses the value of register X. */
9715 vr4130_true_reg_dependence_p_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
9717 rtx *insn_ptr = data;
9720 && reg_referenced_p (x, PATTERN (*insn_ptr)))
9724 /* Return true if there is true register dependence between vr4130_last_insn
9728 vr4130_true_reg_dependence_p (rtx insn)
9730 note_stores (PATTERN (vr4130_last_insn),
9731 vr4130_true_reg_dependence_p_1, &insn);
9735 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
9736 the ready queue and that INSN2 is the instruction after it, return
9737 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
9738 in which INSN1 and INSN2 can probably issue in parallel, but for
9739 which (INSN2, INSN1) should be less sensitive to instruction
9740 alignment than (INSN1, INSN2). See 4130.md for more details. */
9743 vr4130_swap_insns_p (rtx insn1, rtx insn2)
9745 sd_iterator_def sd_it;
9748 /* Check for the following case:
9750 1) there is some other instruction X with an anti dependence on INSN1;
9751 2) X has a higher priority than INSN2; and
9752 3) X is an arithmetic instruction (and thus has no unit restrictions).
9754 If INSN1 is the last instruction blocking X, it would better to
9755 choose (INSN1, X) over (INSN2, INSN1). */
9756 FOR_EACH_DEP (insn1, SD_LIST_FORW, sd_it, dep)
9757 if (DEP_TYPE (dep) == REG_DEP_ANTI
9758 && INSN_PRIORITY (DEP_CON (dep)) > INSN_PRIORITY (insn2)
9759 && recog_memoized (DEP_CON (dep)) >= 0
9760 && get_attr_vr4130_class (DEP_CON (dep)) == VR4130_CLASS_ALU)
9763 if (vr4130_last_insn != 0
9764 && recog_memoized (insn1) >= 0
9765 && recog_memoized (insn2) >= 0)
9767 /* See whether INSN1 and INSN2 use different execution units,
9768 or if they are both ALU-type instructions. If so, they can
9769 probably execute in parallel. */
9770 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
9771 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
9772 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
9774 /* If only one of the instructions has a dependence on
9775 vr4130_last_insn, prefer to schedule the other one first. */
9776 bool dep1 = vr4130_true_reg_dependence_p (insn1);
9777 bool dep2 = vr4130_true_reg_dependence_p (insn2);
9781 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
9782 is not an ALU-type instruction and if INSN1 uses the same
9783 execution unit. (Note that if this condition holds, we already
9784 know that INSN2 uses a different execution unit.) */
9785 if (class1 != VR4130_CLASS_ALU
9786 && recog_memoized (vr4130_last_insn) >= 0
9787 && class1 == get_attr_vr4130_class (vr4130_last_insn))
9794 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
9795 queue with at least two instructions. Swap the first two if
9796 vr4130_swap_insns_p says that it could be worthwhile. */
9799 vr4130_reorder (rtx *ready, int nready)
9801 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
9802 mips_promote_ready (ready, nready - 2, nready - 1);
9805 /* Record whether last 74k AGEN instruction was a load or store. */
9807 static enum attr_type mips_last_74k_agen_insn = TYPE_UNKNOWN;
9809 /* Initialize mips_last_74k_agen_insn from INSN. A null argument
9810 resets to TYPE_UNKNOWN state. */
9813 mips_74k_agen_init (rtx insn)
9815 if (!insn || !NONJUMP_INSN_P (insn))
9816 mips_last_74k_agen_insn = TYPE_UNKNOWN;
9817 else if (USEFUL_INSN_P (insn))
9819 enum attr_type type = get_attr_type (insn);
9820 if (type == TYPE_LOAD || type == TYPE_STORE)
9821 mips_last_74k_agen_insn = type;
9825 /* A TUNE_74K helper function. The 74K AGEN pipeline likes multiple
9826 loads to be grouped together, and multiple stores to be grouped
9827 together. Swap things around in the ready queue to make this happen. */
9830 mips_74k_agen_reorder (rtx *ready, int nready)
9833 int store_pos, load_pos;
9838 for (i = nready - 1; i >= 0; i--)
9840 rtx insn = ready[i];
9841 if (USEFUL_INSN_P (insn))
9842 switch (get_attr_type (insn))
9845 if (store_pos == -1)
9859 if (load_pos == -1 || store_pos == -1)
9862 switch (mips_last_74k_agen_insn)
9865 /* Prefer to schedule loads since they have a higher latency. */
9867 /* Swap loads to the front of the queue. */
9868 mips_maybe_swap_ready (ready, load_pos, store_pos, 4);
9871 /* Swap stores to the front of the queue. */
9872 mips_maybe_swap_ready (ready, store_pos, load_pos, 4);
9879 /* Implement TARGET_SCHED_INIT. */
9882 mips_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9883 int max_ready ATTRIBUTE_UNUSED)
9885 mips_macc_chains_last_hilo = 0;
9886 vr4130_last_insn = 0;
9887 mips_74k_agen_init (NULL_RTX);
9890 /* Implement TARGET_SCHED_REORDER and TARG_SCHED_REORDER2. */
9893 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9894 rtx *ready, int *nreadyp, int cycle ATTRIBUTE_UNUSED)
9896 if (!reload_completed
9899 mips_macc_chains_reorder (ready, *nreadyp);
9900 if (reload_completed
9902 && !TARGET_VR4130_ALIGN
9904 vr4130_reorder (ready, *nreadyp);
9906 mips_74k_agen_reorder (ready, *nreadyp);
9907 return mips_issue_rate ();
9910 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
9913 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9917 mips_74k_agen_init (insn);
9918 switch (GET_CODE (PATTERN (insn)))
9922 /* Don't count USEs and CLOBBERs against the issue rate. */
9927 if (!reload_completed && TUNE_MACC_CHAINS)
9928 mips_macc_chains_record (insn);
9929 vr4130_last_insn = insn;
9935 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
9936 return the first operand of the associated "pref" or "prefx" insn. */
9939 mips_prefetch_cookie (rtx write, rtx locality)
9941 /* store_streamed / load_streamed. */
9942 if (INTVAL (locality) <= 0)
9943 return GEN_INT (INTVAL (write) + 4);
9946 if (INTVAL (locality) <= 2)
9949 /* store_retained / load_retained. */
9950 return GEN_INT (INTVAL (write) + 6);
9953 /* MIPS builtin function support. */
9955 struct builtin_description
9957 /* The code of the main .md file instruction. See mips_builtin_type
9958 for more information. */
9959 enum insn_code icode;
9961 /* The floating-point comparison code to use with ICODE, if any. */
9962 enum mips_fp_condition cond;
9964 /* The name of the builtin function. */
9967 /* Specifies how the function should be expanded. */
9968 enum mips_builtin_type builtin_type;
9970 /* The function's prototype. */
9971 enum mips_function_type function_type;
9973 /* The target flags required for this function. */
9977 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
9978 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
9979 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
9980 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
9981 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
9983 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
9985 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
9986 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
9987 "__builtin_mips_" #INSN "_" #COND "_s", \
9988 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
9989 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
9990 "__builtin_mips_" #INSN "_" #COND "_d", \
9991 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
9993 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
9994 The lower and upper forms require TARGET_FLAGS while the any and all
9995 forms require MASK_MIPS3D. */
9996 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
9997 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9998 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
9999 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
10000 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10001 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
10002 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
10003 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10004 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
10005 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
10006 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10007 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
10008 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
10010 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
10011 require MASK_MIPS3D. */
10012 #define CMP_4S_BUILTINS(INSN, COND) \
10013 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
10014 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
10015 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10017 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
10018 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
10019 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10022 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
10023 instruction requires TARGET_FLAGS. */
10024 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
10025 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10026 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
10027 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10029 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10030 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
10031 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10034 /* Define all the builtins related to c.cond.fmt condition COND. */
10035 #define CMP_BUILTINS(COND) \
10036 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
10037 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
10038 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
10039 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
10040 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
10041 CMP_4S_BUILTINS (c, COND), \
10042 CMP_4S_BUILTINS (cabs, COND)
10044 static const struct builtin_description mips_bdesc[] =
10046 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10047 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10048 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10049 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10050 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
10051 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10052 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10053 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10055 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
10056 MASK_PAIRED_SINGLE_FLOAT),
10057 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10058 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10059 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10060 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10062 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
10063 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
10064 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10065 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
10066 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
10067 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10069 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
10070 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
10071 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10072 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
10073 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
10074 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10076 MIPS_FP_CONDITIONS (CMP_BUILTINS)
10079 /* Builtin functions for the SB-1 processor. */
10081 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
10083 static const struct builtin_description sb1_bdesc[] =
10085 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
10088 /* Builtin functions for DSP ASE. */
10090 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
10091 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
10092 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
10093 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
10094 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
10096 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
10097 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
10098 builtin_description fields. */
10099 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
10100 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
10101 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
10103 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
10104 branch instruction. TARGET_FLAGS is a builtin_description field. */
10105 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
10106 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
10107 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
10109 static const struct builtin_description dsp_bdesc[] =
10111 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10112 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10113 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10114 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10115 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10116 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10117 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10118 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10119 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10120 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10121 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10122 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10123 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10124 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
10125 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
10126 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
10127 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
10128 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
10129 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
10130 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
10131 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
10132 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
10133 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10134 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10135 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10136 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10137 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10138 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10139 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10140 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10141 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
10142 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10143 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10144 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10145 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
10146 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10147 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10148 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10149 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
10150 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
10151 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10152 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
10153 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
10154 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
10155 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10156 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
10157 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
10158 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10159 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10160 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10161 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10162 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10163 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10164 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10165 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10166 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10167 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10168 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10169 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10170 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
10171 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
10172 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_POINTER_SI, MASK_DSP),
10173 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_POINTER_SI, MASK_DSP),
10174 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_POINTER_SI, MASK_DSP),
10175 BPOSGE_BUILTIN (32, MASK_DSP),
10177 /* The following are for the MIPS DSP ASE REV 2. */
10178 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, MASK_DSPR2),
10179 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10180 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10181 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10182 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10183 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
10184 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
10185 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10186 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10187 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10188 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10189 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10190 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10191 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10192 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10193 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10194 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
10195 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
10196 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
10197 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
10198 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
10199 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSPR2),
10200 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10201 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10202 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10203 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10204 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10205 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10206 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10207 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10208 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10209 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10210 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10211 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2)
10214 static const struct builtin_description dsp_32only_bdesc[] =
10216 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10217 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10218 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10219 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10220 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10221 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10222 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10223 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
10224 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
10225 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10226 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10227 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10228 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10229 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10230 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10231 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10232 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10233 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10234 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10235 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
10236 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
10238 /* The following are for the MIPS DSP ASE REV 2. */
10239 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10240 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10241 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
10242 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
10243 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
10244 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
10245 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10246 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, MASK_DSPR2),
10247 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, MASK_DSPR2),
10248 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10249 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10250 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10251 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10252 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10253 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2)
10256 /* This helps provide a mapping from builtin function codes to bdesc
10261 /* The builtin function table that this entry describes. */
10262 const struct builtin_description *bdesc;
10264 /* The number of entries in the builtin function table. */
10267 /* The target processor that supports these builtin functions.
10268 PROCESSOR_MAX means we enable them for all processors. */
10269 enum processor_type proc;
10271 /* If the target has these flags, this builtin function table
10272 will not be supported. */
10273 int unsupported_target_flags;
10276 static const struct bdesc_map bdesc_arrays[] =
10278 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX, 0 },
10279 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1, 0 },
10280 { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX, 0 },
10281 { dsp_32only_bdesc, ARRAY_SIZE (dsp_32only_bdesc), PROCESSOR_MAX,
10285 /* MODE is a vector mode whose elements have type TYPE. Return the type
10286 of the vector itself. */
10289 mips_builtin_vector_type (tree type, enum machine_mode mode)
10291 static tree types[(int) MAX_MACHINE_MODE];
10293 if (types[(int) mode] == NULL_TREE)
10294 types[(int) mode] = build_vector_type_for_mode (type, mode);
10295 return types[(int) mode];
10298 /* Source-level argument types. */
10299 #define MIPS_ATYPE_VOID void_type_node
10300 #define MIPS_ATYPE_INT integer_type_node
10301 #define MIPS_ATYPE_POINTER ptr_type_node
10303 /* Standard mode-based argument types. */
10304 #define MIPS_ATYPE_SI intSI_type_node
10305 #define MIPS_ATYPE_USI unsigned_intSI_type_node
10306 #define MIPS_ATYPE_DI intDI_type_node
10307 #define MIPS_ATYPE_SF float_type_node
10308 #define MIPS_ATYPE_DF double_type_node
10310 /* Vector argument types. */
10311 #define MIPS_ATYPE_V2SF mips_builtin_vector_type (float_type_node, V2SFmode)
10312 #define MIPS_ATYPE_V2HI mips_builtin_vector_type (intHI_type_node, V2HImode)
10313 #define MIPS_ATYPE_V4QI mips_builtin_vector_type (intQI_type_node, V4QImode)
10315 /* MIPS_FTYPE_ATYPESN takes N MIPS_FTYPES-like type codes and lists
10316 their associated MIPS_ATYPEs. */
10317 #define MIPS_FTYPE_ATYPES1(A, B) \
10318 MIPS_ATYPE_##A, MIPS_ATYPE_##B
10320 #define MIPS_FTYPE_ATYPES2(A, B, C) \
10321 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C
10323 #define MIPS_FTYPE_ATYPES3(A, B, C, D) \
10324 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D
10326 #define MIPS_FTYPE_ATYPES4(A, B, C, D, E) \
10327 MIPS_ATYPE_##A, MIPS_ATYPE_##B, MIPS_ATYPE_##C, MIPS_ATYPE_##D, \
10330 /* Return the function type associated with function prototype TYPE. */
10333 mips_build_function_type (enum mips_function_type type)
10335 static tree types[(int) MIPS_MAX_FTYPE_MAX];
10337 if (types[(int) type] == NULL_TREE)
10340 #define DEF_MIPS_FTYPE(NUM, ARGS) \
10341 case MIPS_FTYPE_NAME##NUM ARGS: \
10342 types[(int) type] \
10343 = build_function_type_list (MIPS_FTYPE_ATYPES##NUM ARGS, \
10346 #include "config/mips/mips-ftypes.def"
10347 #undef DEF_MIPS_FTYPE
10349 gcc_unreachable ();
10352 return types[(int) type];
10355 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
10358 mips_init_builtins (void)
10360 const struct builtin_description *d;
10361 const struct bdesc_map *m;
10362 unsigned int offset;
10364 /* Iterate through all of the bdesc arrays, initializing all of the
10365 builtin functions. */
10368 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
10370 if ((m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
10371 && (m->unsupported_target_flags & target_flags) == 0)
10372 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
10373 if ((d->target_flags & target_flags) == d->target_flags)
10374 add_builtin_function (d->name,
10375 mips_build_function_type (d->function_type),
10376 d - m->bdesc + offset,
10377 BUILT_IN_MD, NULL, NULL);
10382 /* Take the argument ARGNUM of the arglist of EXP and convert it into a form
10383 suitable for input operand OP of instruction ICODE. Return the value. */
10386 mips_prepare_builtin_arg (enum insn_code icode,
10387 unsigned int op, tree exp, unsigned int argnum)
10390 enum machine_mode mode;
10392 value = expand_normal (CALL_EXPR_ARG (exp, argnum));
10393 mode = insn_data[icode].operand[op].mode;
10394 if (!insn_data[icode].operand[op].predicate (value, mode))
10396 value = copy_to_mode_reg (mode, value);
10397 /* Check the predicate again. */
10398 if (!insn_data[icode].operand[op].predicate (value, mode))
10400 error ("invalid argument to builtin function");
10408 /* Return an rtx suitable for output operand OP of instruction ICODE.
10409 If TARGET is non-null, try to use it where possible. */
10412 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
10414 enum machine_mode mode;
10416 mode = insn_data[icode].operand[op].mode;
10417 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
10418 target = gen_reg_rtx (mode);
10423 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
10424 .md pattern and CALL is the function expr with arguments. TARGET,
10425 if nonnull, suggests a good place to put the result.
10426 HAS_TARGET indicates the function must return something. */
10429 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
10432 rtx ops[MAX_RECOG_OPERANDS];
10438 /* We save target to ops[0]. */
10439 ops[0] = mips_prepare_builtin_target (icode, 0, target);
10443 /* We need to test if the arglist is not zero. Some instructions have extra
10444 clobber registers. */
10445 for (; i < insn_data[icode].n_operands && i <= call_expr_nargs (exp); i++, j++)
10446 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
10451 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
10455 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
10459 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
10463 gcc_unreachable ();
10468 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
10469 function (TYPE says which). EXP is the tree for the function
10470 function, ICODE is the instruction that should be used to compare
10471 the first two arguments, and COND is the condition it should test.
10472 TARGET, if nonnull, suggests a good place to put the result. */
10475 mips_expand_builtin_movtf (enum mips_builtin_type type,
10476 enum insn_code icode, enum mips_fp_condition cond,
10477 rtx target, tree exp)
10479 rtx cmp_result, op0, op1;
10481 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10482 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
10483 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
10484 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
10486 icode = CODE_FOR_mips_cond_move_tf_ps;
10487 target = mips_prepare_builtin_target (icode, 0, target);
10488 if (type == MIPS_BUILTIN_MOVT)
10490 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
10491 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
10495 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
10496 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
10498 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
10502 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
10503 into TARGET otherwise. Return TARGET. */
10506 mips_builtin_branch_and_move (rtx condition, rtx target,
10507 rtx value_if_true, rtx value_if_false)
10509 rtx true_label, done_label;
10511 true_label = gen_label_rtx ();
10512 done_label = gen_label_rtx ();
10514 /* First assume that CONDITION is false. */
10515 mips_emit_move (target, value_if_false);
10517 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
10518 emit_jump_insn (gen_condjump (condition, true_label));
10519 emit_jump_insn (gen_jump (done_label));
10522 /* Fix TARGET if CONDITION is true. */
10523 emit_label (true_label);
10524 mips_emit_move (target, value_if_true);
10526 emit_label (done_label);
10530 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
10531 of the comparison instruction and COND is the condition it should test.
10532 EXP is the function call and arguments and TARGET, if nonnull,
10533 suggests a good place to put the boolean result. */
10536 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
10537 enum insn_code icode, enum mips_fp_condition cond,
10538 rtx target, tree exp)
10540 rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
10544 if (target == 0 || GET_MODE (target) != SImode)
10545 target = gen_reg_rtx (SImode);
10547 /* Prepare the operands to the comparison. */
10548 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10549 for (i = 1; i < insn_data[icode].n_operands - 1; i++, j++)
10550 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
10552 switch (insn_data[icode].n_operands)
10555 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond)));
10559 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2],
10560 ops[3], ops[4], GEN_INT (cond)));
10564 gcc_unreachable ();
10567 /* If the comparison sets more than one register, we define the result
10568 to be 0 if all registers are false and -1 if all registers are true.
10569 The value of the complete result is indeterminate otherwise. */
10570 switch (builtin_type)
10572 case MIPS_BUILTIN_CMP_ALL:
10573 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
10574 return mips_builtin_branch_and_move (condition, target,
10575 const0_rtx, const1_rtx);
10577 case MIPS_BUILTIN_CMP_UPPER:
10578 case MIPS_BUILTIN_CMP_LOWER:
10579 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
10580 condition = gen_single_cc (cmp_result, offset);
10581 return mips_builtin_branch_and_move (condition, target,
10582 const1_rtx, const0_rtx);
10585 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
10586 return mips_builtin_branch_and_move (condition, target,
10587 const1_rtx, const0_rtx);
10591 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
10592 suggests a good place to put the boolean result. */
10595 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
10597 rtx condition, cmp_result;
10600 if (target == 0 || GET_MODE (target) != SImode)
10601 target = gen_reg_rtx (SImode);
10603 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
10605 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
10610 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
10611 return mips_builtin_branch_and_move (condition, target,
10612 const1_rtx, const0_rtx);
10615 /* EXP is a CALL_EXPR that calls the function described by BDESC.
10616 Expand the call and return an rtx for its return value.
10617 TARGET, if nonnull, suggests a good place to put this value. */
10620 mips_expand_builtin_1 (const struct builtin_description *bdesc,
10621 tree exp, rtx target)
10623 switch (bdesc->builtin_type)
10625 case MIPS_BUILTIN_DIRECT:
10626 return mips_expand_builtin_direct (bdesc->icode, target, exp, true);
10628 case MIPS_BUILTIN_DIRECT_NO_TARGET:
10629 return mips_expand_builtin_direct (bdesc->icode, target, exp, false);
10631 case MIPS_BUILTIN_MOVT:
10632 case MIPS_BUILTIN_MOVF:
10633 return mips_expand_builtin_movtf (bdesc->builtin_type, bdesc->icode,
10634 bdesc->cond, target, exp);
10636 case MIPS_BUILTIN_CMP_ANY:
10637 case MIPS_BUILTIN_CMP_ALL:
10638 case MIPS_BUILTIN_CMP_UPPER:
10639 case MIPS_BUILTIN_CMP_LOWER:
10640 case MIPS_BUILTIN_CMP_SINGLE:
10641 return mips_expand_builtin_compare (bdesc->builtin_type, bdesc->icode,
10642 bdesc->cond, target, exp);
10644 case MIPS_BUILTIN_BPOSGE32:
10645 return mips_expand_builtin_bposge (bdesc->builtin_type, target);
10647 gcc_unreachable ();
10650 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
10653 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10654 enum machine_mode mode ATTRIBUTE_UNUSED,
10655 int ignore ATTRIBUTE_UNUSED)
10658 unsigned int fcode;
10659 const struct bdesc_map *m;
10661 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10662 fcode = DECL_FUNCTION_CODE (fndecl);
10666 error ("built-in function %qs not supported for MIPS16",
10667 IDENTIFIER_POINTER (DECL_NAME (fndecl)));
10671 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
10673 if (fcode < m->size)
10674 return mips_expand_builtin_1 (m->bdesc + fcode, exp, target);
10677 gcc_unreachable ();
10680 /* An entry in the mips16 constant pool. VALUE is the pool constant,
10681 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
10683 struct mips16_constant {
10684 struct mips16_constant *next;
10687 enum machine_mode mode;
10690 /* Information about an incomplete mips16 constant pool. FIRST is the
10691 first constant, HIGHEST_ADDRESS is the highest address that the first
10692 byte of the pool can have, and INSN_ADDRESS is the current instruction
10695 struct mips16_constant_pool {
10696 struct mips16_constant *first;
10697 int highest_address;
10701 /* Add constant VALUE to POOL and return its label. MODE is the
10702 value's mode (used for CONST_INTs, etc.). */
10705 add_constant (struct mips16_constant_pool *pool,
10706 rtx value, enum machine_mode mode)
10708 struct mips16_constant **p, *c;
10709 bool first_of_size_p;
10711 /* See whether the constant is already in the pool. If so, return the
10712 existing label, otherwise leave P pointing to the place where the
10713 constant should be added.
10715 Keep the pool sorted in increasing order of mode size so that we can
10716 reduce the number of alignments needed. */
10717 first_of_size_p = true;
10718 for (p = &pool->first; *p != 0; p = &(*p)->next)
10720 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
10721 return (*p)->label;
10722 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
10724 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
10725 first_of_size_p = false;
10728 /* In the worst case, the constant needed by the earliest instruction
10729 will end up at the end of the pool. The entire pool must then be
10730 accessible from that instruction.
10732 When adding the first constant, set the pool's highest address to
10733 the address of the first out-of-range byte. Adjust this address
10734 downwards each time a new constant is added. */
10735 if (pool->first == 0)
10736 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
10737 is the address of the instruction with the lowest two bits clear.
10738 The base PC value for ld has the lowest three bits clear. Assume
10739 the worst case here. */
10740 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
10741 pool->highest_address -= GET_MODE_SIZE (mode);
10742 if (first_of_size_p)
10743 /* Take into account the worst possible padding due to alignment. */
10744 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
10746 /* Create a new entry. */
10747 c = (struct mips16_constant *) xmalloc (sizeof *c);
10750 c->label = gen_label_rtx ();
10757 /* Output constant VALUE after instruction INSN and return the last
10758 instruction emitted. MODE is the mode of the constant. */
10761 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
10763 if (SCALAR_INT_MODE_P (mode)
10764 || ALL_SCALAR_FRACT_MODE_P (mode)
10765 || ALL_SCALAR_ACCUM_MODE_P (mode))
10767 rtx size = GEN_INT (GET_MODE_SIZE (mode));
10768 return emit_insn_after (gen_consttable_int (value, size), insn);
10771 if (SCALAR_FLOAT_MODE_P (mode))
10772 return emit_insn_after (gen_consttable_float (value), insn);
10774 if (VECTOR_MODE_P (mode))
10778 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
10779 insn = dump_constants_1 (GET_MODE_INNER (mode),
10780 CONST_VECTOR_ELT (value, i), insn);
10784 gcc_unreachable ();
10788 /* Dump out the constants in CONSTANTS after INSN. */
10791 dump_constants (struct mips16_constant *constants, rtx insn)
10793 struct mips16_constant *c, *next;
10797 for (c = constants; c != NULL; c = next)
10799 /* If necessary, increase the alignment of PC. */
10800 if (align < GET_MODE_SIZE (c->mode))
10802 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
10803 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
10805 align = GET_MODE_SIZE (c->mode);
10807 insn = emit_label_after (c->label, insn);
10808 insn = dump_constants_1 (c->mode, c->value, insn);
10814 emit_barrier_after (insn);
10817 /* Return the length of instruction INSN. */
10820 mips16_insn_length (rtx insn)
10824 rtx body = PATTERN (insn);
10825 if (GET_CODE (body) == ADDR_VEC)
10826 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
10827 if (GET_CODE (body) == ADDR_DIFF_VEC)
10828 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
10830 return get_attr_length (insn);
10833 /* If *X is a symbolic constant that refers to the constant pool, add
10834 the constant to POOL and rewrite *X to use the constant's label. */
10837 mips16_rewrite_pool_constant (struct mips16_constant_pool *pool, rtx *x)
10839 rtx base, offset, label;
10841 split_const (*x, &base, &offset);
10842 if (GET_CODE (base) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (base))
10844 label = add_constant (pool, get_pool_constant (base),
10845 get_pool_mode (base));
10846 base = gen_rtx_LABEL_REF (Pmode, label);
10847 *x = mips_unspec_address_offset (base, offset, SYMBOL_PC_RELATIVE);
10851 /* This structure is used to communicate with mips16_rewrite_pool_refs.
10852 INSN is the instruction we're rewriting and POOL points to the current
10854 struct mips16_rewrite_pool_refs_info {
10856 struct mips16_constant_pool *pool;
10859 /* Rewrite *X so that constant pool references refer to the constant's
10860 label instead. DATA points to a mips16_rewrite_pool_refs_info
10864 mips16_rewrite_pool_refs (rtx *x, void *data)
10866 struct mips16_rewrite_pool_refs_info *info = data;
10868 if (force_to_mem_operand (*x, Pmode))
10870 rtx mem = force_const_mem (GET_MODE (*x), *x);
10871 validate_change (info->insn, x, mem, false);
10876 mips16_rewrite_pool_constant (info->pool, &XEXP (*x, 0));
10880 if (TARGET_MIPS16_TEXT_LOADS)
10881 mips16_rewrite_pool_constant (info->pool, x);
10883 return GET_CODE (*x) == CONST ? -1 : 0;
10886 /* Build MIPS16 constant pools. */
10889 mips16_lay_out_constants (void)
10891 struct mips16_constant_pool pool;
10892 struct mips16_rewrite_pool_refs_info info;
10895 if (!TARGET_MIPS16_PCREL_LOADS)
10899 memset (&pool, 0, sizeof (pool));
10900 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
10902 /* Rewrite constant pool references in INSN. */
10907 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &info);
10910 pool.insn_address += mips16_insn_length (insn);
10912 if (pool.first != NULL)
10914 /* If there are no natural barriers between the first user of
10915 the pool and the highest acceptable address, we'll need to
10916 create a new instruction to jump around the constant pool.
10917 In the worst case, this instruction will be 4 bytes long.
10919 If it's too late to do this transformation after INSN,
10920 do it immediately before INSN. */
10921 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
10925 label = gen_label_rtx ();
10927 jump = emit_jump_insn_before (gen_jump (label), insn);
10928 JUMP_LABEL (jump) = label;
10929 LABEL_NUSES (label) = 1;
10930 barrier = emit_barrier_after (jump);
10932 emit_label_after (label, barrier);
10933 pool.insn_address += 4;
10936 /* See whether the constant pool is now out of range of the first
10937 user. If so, output the constants after the previous barrier.
10938 Note that any instructions between BARRIER and INSN (inclusive)
10939 will use negative offsets to refer to the pool. */
10940 if (pool.insn_address > pool.highest_address)
10942 dump_constants (pool.first, barrier);
10946 else if (BARRIER_P (insn))
10950 dump_constants (pool.first, get_last_insn ());
10953 /* A temporary variable used by for_each_rtx callbacks, etc. */
10954 static rtx mips_sim_insn;
10956 /* A structure representing the state of the processor pipeline.
10957 Used by the mips_sim_* family of functions. */
10959 /* The maximum number of instructions that can be issued in a cycle.
10960 (Caches mips_issue_rate.) */
10961 unsigned int issue_rate;
10963 /* The current simulation time. */
10966 /* How many more instructions can be issued in the current cycle. */
10967 unsigned int insns_left;
10969 /* LAST_SET[X].INSN is the last instruction to set register X.
10970 LAST_SET[X].TIME is the time at which that instruction was issued.
10971 INSN is null if no instruction has yet set register X. */
10975 } last_set[FIRST_PSEUDO_REGISTER];
10977 /* The pipeline's current DFA state. */
10981 /* Reset STATE to the initial simulation state. */
10984 mips_sim_reset (struct mips_sim *state)
10987 state->insns_left = state->issue_rate;
10988 memset (&state->last_set, 0, sizeof (state->last_set));
10989 state_reset (state->dfa_state);
10992 /* Initialize STATE before its first use. DFA_STATE points to an
10993 allocated but uninitialized DFA state. */
10996 mips_sim_init (struct mips_sim *state, state_t dfa_state)
10998 state->issue_rate = mips_issue_rate ();
10999 state->dfa_state = dfa_state;
11000 mips_sim_reset (state);
11003 /* Advance STATE by one clock cycle. */
11006 mips_sim_next_cycle (struct mips_sim *state)
11009 state->insns_left = state->issue_rate;
11010 state_transition (state->dfa_state, 0);
11013 /* Advance simulation state STATE until instruction INSN can read
11017 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
11021 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
11022 if (state->last_set[REGNO (reg) + i].insn != 0)
11026 t = state->last_set[REGNO (reg) + i].time;
11027 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
11028 while (state->time < t)
11029 mips_sim_next_cycle (state);
11033 /* A for_each_rtx callback. If *X is a register, advance simulation state
11034 DATA until mips_sim_insn can read the register's value. */
11037 mips_sim_wait_regs_2 (rtx *x, void *data)
11040 mips_sim_wait_reg (data, mips_sim_insn, *x);
11044 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
11047 mips_sim_wait_regs_1 (rtx *x, void *data)
11049 for_each_rtx (x, mips_sim_wait_regs_2, data);
11052 /* Advance simulation state STATE until all of INSN's register
11053 dependencies are satisfied. */
11056 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
11058 mips_sim_insn = insn;
11059 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
11062 /* Advance simulation state STATE until the units required by
11063 instruction INSN are available. */
11066 mips_sim_wait_units (struct mips_sim *state, rtx insn)
11070 tmp_state = alloca (state_size ());
11071 while (state->insns_left == 0
11072 || (memcpy (tmp_state, state->dfa_state, state_size ()),
11073 state_transition (tmp_state, insn) >= 0))
11074 mips_sim_next_cycle (state);
11077 /* Advance simulation state STATE until INSN is ready to issue. */
11080 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
11082 mips_sim_wait_regs (state, insn);
11083 mips_sim_wait_units (state, insn);
11086 /* mips_sim_insn has just set X. Update the LAST_SET array
11087 in simulation state DATA. */
11090 mips_sim_record_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data)
11092 struct mips_sim *state;
11097 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
11099 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
11100 state->last_set[REGNO (x) + i].time = state->time;
11104 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
11105 can issue immediately (i.e., that mips_sim_wait_insn has already
11109 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
11111 state_transition (state->dfa_state, insn);
11112 state->insns_left--;
11114 mips_sim_insn = insn;
11115 note_stores (PATTERN (insn), mips_sim_record_set, state);
11118 /* Simulate issuing a NOP in state STATE. */
11121 mips_sim_issue_nop (struct mips_sim *state)
11123 if (state->insns_left == 0)
11124 mips_sim_next_cycle (state);
11125 state->insns_left--;
11128 /* Update simulation state STATE so that it's ready to accept the instruction
11129 after INSN. INSN should be part of the main rtl chain, not a member of a
11133 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
11135 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
11137 mips_sim_issue_nop (state);
11139 switch (GET_CODE (SEQ_BEGIN (insn)))
11143 /* We can't predict the processor state after a call or label. */
11144 mips_sim_reset (state);
11148 /* The delay slots of branch likely instructions are only executed
11149 when the branch is taken. Therefore, if the caller has simulated
11150 the delay slot instruction, STATE does not really reflect the state
11151 of the pipeline for the instruction after the delay slot. Also,
11152 branch likely instructions tend to incur a penalty when not taken,
11153 so there will probably be an extra delay between the branch and
11154 the instruction after the delay slot. */
11155 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
11156 mips_sim_reset (state);
11164 /* The VR4130 pipeline issues aligned pairs of instructions together,
11165 but it stalls the second instruction if it depends on the first.
11166 In order to cut down the amount of logic required, this dependence
11167 check is not based on a full instruction decode. Instead, any non-SPECIAL
11168 instruction is assumed to modify the register specified by bits 20-16
11169 (which is usually the "rt" field).
11171 In beq, beql, bne and bnel instructions, the rt field is actually an
11172 input, so we can end up with a false dependence between the branch
11173 and its delay slot. If this situation occurs in instruction INSN,
11174 try to avoid it by swapping rs and rt. */
11177 vr4130_avoid_branch_rt_conflict (rtx insn)
11181 first = SEQ_BEGIN (insn);
11182 second = SEQ_END (insn);
11184 && NONJUMP_INSN_P (second)
11185 && GET_CODE (PATTERN (first)) == SET
11186 && GET_CODE (SET_DEST (PATTERN (first))) == PC
11187 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
11189 /* Check for the right kind of condition. */
11190 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
11191 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
11192 && REG_P (XEXP (cond, 0))
11193 && REG_P (XEXP (cond, 1))
11194 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
11195 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
11197 /* SECOND mentions the rt register but not the rs register. */
11198 rtx tmp = XEXP (cond, 0);
11199 XEXP (cond, 0) = XEXP (cond, 1);
11200 XEXP (cond, 1) = tmp;
11205 /* Implement -mvr4130-align. Go through each basic block and simulate the
11206 processor pipeline. If we find that a pair of instructions could execute
11207 in parallel, and the first of those instruction is not 8-byte aligned,
11208 insert a nop to make it aligned. */
11211 vr4130_align_insns (void)
11213 struct mips_sim state;
11214 rtx insn, subinsn, last, last2, next;
11219 /* LAST is the last instruction before INSN to have a nonzero length.
11220 LAST2 is the last such instruction before LAST. */
11224 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
11227 mips_sim_init (&state, alloca (state_size ()));
11228 for (insn = get_insns (); insn != 0; insn = next)
11230 unsigned int length;
11232 next = NEXT_INSN (insn);
11234 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
11235 This isn't really related to the alignment pass, but we do it on
11236 the fly to avoid a separate instruction walk. */
11237 vr4130_avoid_branch_rt_conflict (insn);
11239 if (USEFUL_INSN_P (insn))
11240 FOR_EACH_SUBINSN (subinsn, insn)
11242 mips_sim_wait_insn (&state, subinsn);
11244 /* If we want this instruction to issue in parallel with the
11245 previous one, make sure that the previous instruction is
11246 aligned. There are several reasons why this isn't worthwhile
11247 when the second instruction is a call:
11249 - Calls are less likely to be performance critical,
11250 - There's a good chance that the delay slot can execute
11251 in parallel with the call.
11252 - The return address would then be unaligned.
11254 In general, if we're going to insert a nop between instructions
11255 X and Y, it's better to insert it immediately after X. That
11256 way, if the nop makes Y aligned, it will also align any labels
11257 between X and Y. */
11258 if (state.insns_left != state.issue_rate
11259 && !CALL_P (subinsn))
11261 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
11263 /* SUBINSN is the first instruction in INSN and INSN is
11264 aligned. We want to align the previous instruction
11265 instead, so insert a nop between LAST2 and LAST.
11267 Note that LAST could be either a single instruction
11268 or a branch with a delay slot. In the latter case,
11269 LAST, like INSN, is already aligned, but the delay
11270 slot must have some extra delay that stops it from
11271 issuing at the same time as the branch. We therefore
11272 insert a nop before the branch in order to align its
11274 emit_insn_after (gen_nop (), last2);
11277 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
11279 /* SUBINSN is the delay slot of INSN, but INSN is
11280 currently unaligned. Insert a nop between
11281 LAST and INSN to align it. */
11282 emit_insn_after (gen_nop (), last);
11286 mips_sim_issue_insn (&state, subinsn);
11288 mips_sim_finish_insn (&state, insn);
11290 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
11291 length = get_attr_length (insn);
11294 /* If the instruction is an asm statement or multi-instruction
11295 mips.md patern, the length is only an estimate. Insert an
11296 8 byte alignment after it so that the following instructions
11297 can be handled correctly. */
11298 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
11299 && (recog_memoized (insn) < 0 || length >= 8))
11301 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
11302 next = NEXT_INSN (next);
11303 mips_sim_next_cycle (&state);
11306 else if (length & 4)
11307 aligned_p = !aligned_p;
11312 /* See whether INSN is an aligned label. */
11313 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
11319 /* Subroutine of mips_reorg. If there is a hazard between INSN
11320 and a previous instruction, avoid it by inserting nops after
11323 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
11324 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
11325 before using the value of that register. *HILO_DELAY counts the
11326 number of instructions since the last hilo hazard (that is,
11327 the number of instructions since the last mflo or mfhi).
11329 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
11330 for the next instruction.
11332 LO_REG is an rtx for the LO register, used in dependence checking. */
11335 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
11336 rtx *delayed_reg, rtx lo_reg)
11339 int nops, ninsns, hazard_set;
11341 if (!INSN_P (insn))
11344 pattern = PATTERN (insn);
11346 /* Do not put the whole function in .set noreorder if it contains
11347 an asm statement. We don't know whether there will be hazards
11348 between the asm statement and the gcc-generated code. */
11349 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
11350 cfun->machine->all_noreorder_p = false;
11352 /* Ignore zero-length instructions (barriers and the like). */
11353 ninsns = get_attr_length (insn) / 4;
11357 /* Work out how many nops are needed. Note that we only care about
11358 registers that are explicitly mentioned in the instruction's pattern.
11359 It doesn't matter that calls use the argument registers or that they
11360 clobber hi and lo. */
11361 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
11362 nops = 2 - *hilo_delay;
11363 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
11368 /* Insert the nops between this instruction and the previous one.
11369 Each new nop takes us further from the last hilo hazard. */
11370 *hilo_delay += nops;
11372 emit_insn_after (gen_hazard_nop (), after);
11374 /* Set up the state for the next instruction. */
11375 *hilo_delay += ninsns;
11377 if (INSN_CODE (insn) >= 0)
11378 switch (get_attr_hazard (insn))
11388 hazard_set = (int) get_attr_hazard_set (insn);
11389 if (hazard_set == 0)
11390 set = single_set (insn);
11393 gcc_assert (GET_CODE (PATTERN (insn)) == PARALLEL);
11394 set = XVECEXP (PATTERN (insn), 0, hazard_set - 1);
11396 gcc_assert (set && GET_CODE (set) == SET);
11397 *delayed_reg = SET_DEST (set);
11403 /* Go through the instruction stream and insert nops where necessary.
11404 See if the whole function can then be put into .set noreorder &
11408 mips_avoid_hazards (void)
11410 rtx insn, last_insn, lo_reg, delayed_reg;
11413 /* Force all instructions to be split into their final form. */
11414 split_all_insns_noflow ();
11416 /* Recalculate instruction lengths without taking nops into account. */
11417 cfun->machine->ignore_hazard_length_p = true;
11418 shorten_branches (get_insns ());
11420 cfun->machine->all_noreorder_p = true;
11422 /* Profiled functions can't be all noreorder because the profiler
11423 support uses assembler macros. */
11424 if (current_function_profile)
11425 cfun->machine->all_noreorder_p = false;
11427 /* Code compiled with -mfix-vr4120 can't be all noreorder because
11428 we rely on the assembler to work around some errata. */
11429 if (TARGET_FIX_VR4120)
11430 cfun->machine->all_noreorder_p = false;
11432 /* The same is true for -mfix-vr4130 if we might generate mflo or
11433 mfhi instructions. Note that we avoid using mflo and mfhi if
11434 the VR4130 macc and dmacc instructions are available instead;
11435 see the *mfhilo_{si,di}_macc patterns. */
11436 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
11437 cfun->machine->all_noreorder_p = false;
11442 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
11444 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
11447 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
11448 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
11449 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
11450 &hilo_delay, &delayed_reg, lo_reg);
11452 mips_avoid_hazard (last_insn, insn, &hilo_delay,
11453 &delayed_reg, lo_reg);
11460 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
11465 mips16_lay_out_constants ();
11466 if (TARGET_EXPLICIT_RELOCS)
11468 if (mips_flag_delayed_branch)
11469 dbr_schedule (get_insns ());
11470 mips_avoid_hazards ();
11471 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
11472 vr4130_align_insns ();
11476 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
11477 in order to avoid duplicating too much logic from elsewhere. */
11480 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
11481 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
11484 rtx this, temp1, temp2, insn, fnaddr;
11485 bool use_sibcall_p;
11487 /* Pretend to be a post-reload pass while generating rtl. */
11488 reload_completed = 1;
11490 /* Mark the end of the (empty) prologue. */
11491 emit_note (NOTE_INSN_PROLOGUE_END);
11493 /* Determine if we can use a sibcall to call FUNCTION directly. */
11494 fnaddr = XEXP (DECL_RTL (function), 0);
11495 use_sibcall_p = (mips_function_ok_for_sibcall (function, NULL)
11496 && const_call_insn_operand (fnaddr, Pmode));
11498 /* Determine if we need to load FNADDR from the GOT. */
11499 if (!use_sibcall_p)
11500 switch (mips_classify_symbol (fnaddr, SYMBOL_CONTEXT_LEA))
11502 case SYMBOL_GOT_PAGE_OFST:
11503 case SYMBOL_GOT_DISP:
11504 /* Pick a global pointer. Use a call-clobbered register if
11505 TARGET_CALL_SAVED_GP. */
11506 cfun->machine->global_pointer =
11507 TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
11508 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
11510 /* Set up the global pointer for n32 or n64 abicalls. */
11511 mips_emit_loadgp ();
11518 /* We need two temporary registers in some cases. */
11519 temp1 = gen_rtx_REG (Pmode, 2);
11520 temp2 = gen_rtx_REG (Pmode, 3);
11522 /* Find out which register contains the "this" pointer. */
11523 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
11524 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
11526 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
11528 /* Add DELTA to THIS. */
11531 rtx offset = GEN_INT (delta);
11532 if (!SMALL_OPERAND (delta))
11534 mips_emit_move (temp1, offset);
11537 emit_insn (gen_add3_insn (this, this, offset));
11540 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
11541 if (vcall_offset != 0)
11545 /* Set TEMP1 to *THIS. */
11546 mips_emit_move (temp1, gen_rtx_MEM (Pmode, this));
11548 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
11549 addr = mips_add_offset (temp2, temp1, vcall_offset);
11551 /* Load the offset and add it to THIS. */
11552 mips_emit_move (temp1, gen_rtx_MEM (Pmode, addr));
11553 emit_insn (gen_add3_insn (this, this, temp1));
11556 /* Jump to the target function. Use a sibcall if direct jumps are
11557 allowed, otherwise load the address into a register first. */
11560 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
11561 SIBLING_CALL_P (insn) = 1;
11565 /* This is messy. gas treats "la $25,foo" as part of a call
11566 sequence and may allow a global "foo" to be lazily bound.
11567 The general move patterns therefore reject this combination.
11569 In this context, lazy binding would actually be OK
11570 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
11571 TARGET_CALL_SAVED_GP; see mips_load_call_address.
11572 We must therefore load the address via a temporary
11573 register if mips_dangerous_for_la25_p.
11575 If we jump to the temporary register rather than $25, the assembler
11576 can use the move insn to fill the jump's delay slot. */
11577 if (TARGET_USE_PIC_FN_ADDR_REG
11578 && !mips_dangerous_for_la25_p (fnaddr))
11579 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
11580 mips_load_call_address (temp1, fnaddr, true);
11582 if (TARGET_USE_PIC_FN_ADDR_REG
11583 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
11584 mips_emit_move (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
11585 emit_jump_insn (gen_indirect_jump (temp1));
11588 /* Run just enough of rest_of_compilation. This sequence was
11589 "borrowed" from alpha.c. */
11590 insn = get_insns ();
11591 insn_locators_alloc ();
11592 split_all_insns_noflow ();
11593 mips16_lay_out_constants ();
11594 shorten_branches (insn);
11595 final_start_function (insn, file, 1);
11596 final (insn, file, 1);
11597 final_end_function ();
11599 /* Clean up the vars set above. Note that final_end_function resets
11600 the global pointer for us. */
11601 reload_completed = 0;
11604 static GTY(()) int was_mips16_p = -1;
11606 /* Set up the target-dependent global state so that it matches the
11607 current function's ISA mode. */
11610 mips_set_mips16_mode (int mips16_p)
11612 if (mips16_p == was_mips16_p)
11615 /* Restore base settings of various flags. */
11616 target_flags = mips_base_target_flags;
11617 flag_delayed_branch = mips_flag_delayed_branch;
11618 flag_schedule_insns = mips_base_schedule_insns;
11619 flag_reorder_blocks_and_partition = mips_base_reorder_blocks_and_partition;
11620 flag_move_loop_invariants = mips_base_move_loop_invariants;
11621 align_loops = mips_base_align_loops;
11622 align_jumps = mips_base_align_jumps;
11623 align_functions = mips_base_align_functions;
11627 /* Select mips16 instruction set. */
11628 target_flags |= MASK_MIPS16;
11630 /* Don't run the scheduler before reload, since it tends to
11631 increase register pressure. */
11632 flag_schedule_insns = 0;
11634 /* Don't do hot/cold partitioning. The constant layout code expects
11635 the whole function to be in a single section. */
11636 flag_reorder_blocks_and_partition = 0;
11638 /* Don't move loop invariants, because it tends to increase
11639 register pressure. It also introduces an extra move in cases
11640 where the constant is the first operand in a two-operand binary
11641 instruction, or when it forms a register argument to a functon
11643 flag_move_loop_invariants = 0;
11645 /* Silently disable -mexplicit-relocs since it doesn't apply
11646 to mips16 code. Even so, it would overly pedantic to warn
11647 about "-mips16 -mexplicit-relocs", especially given that
11648 we use a %gprel() operator. */
11649 target_flags &= ~MASK_EXPLICIT_RELOCS;
11651 /* Experiments suggest we get the best overall results from using
11652 the range of an unextended lw or sw. Code that makes heavy use
11653 of byte or short accesses can do better with ranges of 0...31
11654 and 0...63 respectively, but most code is sensitive to the range
11655 of lw and sw instead. */
11656 targetm.min_anchor_offset = 0;
11657 targetm.max_anchor_offset = 127;
11659 if (flag_pic || TARGET_ABICALLS)
11660 sorry ("MIPS16 PIC");
11664 /* Reset to select base non-mips16 ISA. */
11665 target_flags &= ~MASK_MIPS16;
11667 /* When using explicit relocs, we call dbr_schedule from within
11669 if (TARGET_EXPLICIT_RELOCS)
11670 flag_delayed_branch = 0;
11672 /* Provide default values for align_* for 64-bit targets. */
11675 if (align_loops == 0)
11677 if (align_jumps == 0)
11679 if (align_functions == 0)
11680 align_functions = 8;
11683 targetm.min_anchor_offset = -32768;
11684 targetm.max_anchor_offset = 32767;
11687 /* (Re)initialize mips target internals for new ISA. */
11688 mips_init_split_addresses ();
11689 mips_init_relocs ();
11691 if (was_mips16_p >= 0)
11692 /* Reinitialize target-dependent state. */
11695 was_mips16_p = TARGET_MIPS16;
11698 /* Implement TARGET_SET_CURRENT_FUNCTION. Decide whether the current
11699 function should use the MIPS16 ISA and switch modes accordingly. */
11702 mips_set_current_function (tree fndecl)
11704 mips_set_mips16_mode (mips_use_mips16_mode_p (fndecl));
11707 /* Allocate a chunk of memory for per-function machine-dependent data. */
11708 static struct machine_function *
11709 mips_init_machine_status (void)
11711 return ((struct machine_function *)
11712 ggc_alloc_cleared (sizeof (struct machine_function)));
11715 /* Return the processor associated with the given ISA level, or null
11716 if the ISA isn't valid. */
11718 static const struct mips_cpu_info *
11719 mips_cpu_info_from_isa (int isa)
11723 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
11724 if (mips_cpu_info_table[i].isa == isa)
11725 return mips_cpu_info_table + i;
11730 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
11731 with a final "000" replaced by "k". Ignore case.
11733 Note: this function is shared between GCC and GAS. */
11736 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
11738 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
11739 given++, canonical++;
11741 return ((*given == 0 && *canonical == 0)
11742 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
11746 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
11747 CPU name. We've traditionally allowed a lot of variation here.
11749 Note: this function is shared between GCC and GAS. */
11752 mips_matching_cpu_name_p (const char *canonical, const char *given)
11754 /* First see if the name matches exactly, or with a final "000"
11755 turned into "k". */
11756 if (mips_strict_matching_cpu_name_p (canonical, given))
11759 /* If not, try comparing based on numerical designation alone.
11760 See if GIVEN is an unadorned number, or 'r' followed by a number. */
11761 if (TOLOWER (*given) == 'r')
11763 if (!ISDIGIT (*given))
11766 /* Skip over some well-known prefixes in the canonical name,
11767 hoping to find a number there too. */
11768 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
11770 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
11772 else if (TOLOWER (canonical[0]) == 'r')
11775 return mips_strict_matching_cpu_name_p (canonical, given);
11779 /* Return the mips_cpu_info entry for the processor or ISA given
11780 by CPU_STRING. Return null if the string isn't recognized.
11782 A similar function exists in GAS. */
11784 static const struct mips_cpu_info *
11785 mips_parse_cpu (const char *cpu_string)
11790 /* In the past, we allowed upper-case CPU names, but it doesn't
11791 work well with the multilib machinery. */
11792 for (s = cpu_string; *s != 0; s++)
11795 warning (0, "the cpu name must be lower case");
11799 /* 'from-abi' selects the most compatible architecture for the given
11800 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
11801 EABIs, we have to decide whether we're using the 32-bit or 64-bit
11802 version. Look first at the -mgp options, if given, otherwise base
11803 the choice on MASK_64BIT in TARGET_DEFAULT. */
11804 if (strcasecmp (cpu_string, "from-abi") == 0)
11805 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
11806 : ABI_NEEDS_64BIT_REGS ? 3
11807 : (TARGET_64BIT ? 3 : 1));
11809 /* 'default' has traditionally been a no-op. Probably not very useful. */
11810 if (strcasecmp (cpu_string, "default") == 0)
11813 for (i = 0; i < ARRAY_SIZE (mips_cpu_info_table); i++)
11814 if (mips_matching_cpu_name_p (mips_cpu_info_table[i].name, cpu_string))
11815 return mips_cpu_info_table + i;
11821 /* Set up globals to generate code for the ISA or processor
11822 described by INFO. */
11825 mips_set_architecture (const struct mips_cpu_info *info)
11829 mips_arch_info = info;
11830 mips_arch = info->cpu;
11831 mips_isa = info->isa;
11836 /* Likewise for tuning. */
11839 mips_set_tune (const struct mips_cpu_info *info)
11843 mips_tune_info = info;
11844 mips_tune = info->cpu;
11848 /* Implement TARGET_HANDLE_OPTION. */
11851 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
11856 if (strcmp (arg, "32") == 0)
11858 else if (strcmp (arg, "o64") == 0)
11859 mips_abi = ABI_O64;
11860 else if (strcmp (arg, "n32") == 0)
11861 mips_abi = ABI_N32;
11862 else if (strcmp (arg, "64") == 0)
11864 else if (strcmp (arg, "eabi") == 0)
11865 mips_abi = ABI_EABI;
11872 return mips_parse_cpu (arg) != 0;
11875 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
11876 return mips_isa_info != 0;
11878 case OPT_mno_flush_func:
11879 mips_cache_flush_func = NULL;
11882 case OPT_mcode_readable_:
11883 if (strcmp (arg, "yes") == 0)
11884 mips_code_readable = CODE_READABLE_YES;
11885 else if (strcmp (arg, "pcrel") == 0)
11886 mips_code_readable = CODE_READABLE_PCREL;
11887 else if (strcmp (arg, "no") == 0)
11888 mips_code_readable = CODE_READABLE_NO;
11898 /* Set up the threshold for data to go into the small data area, instead
11899 of the normal data area, and detect any conflicts in the switches. */
11902 override_options (void)
11904 int i, start, regno;
11905 enum machine_mode mode;
11907 #ifdef SUBTARGET_OVERRIDE_OPTIONS
11908 SUBTARGET_OVERRIDE_OPTIONS;
11911 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
11913 /* The following code determines the architecture and register size.
11914 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
11915 The GAS and GCC code should be kept in sync as much as possible. */
11917 if (mips_arch_string != 0)
11918 mips_set_architecture (mips_parse_cpu (mips_arch_string));
11920 if (mips_isa_info != 0)
11922 if (mips_arch_info == 0)
11923 mips_set_architecture (mips_isa_info);
11924 else if (mips_arch_info->isa != mips_isa_info->isa)
11925 error ("-%s conflicts with the other architecture options, "
11926 "which specify a %s processor",
11927 mips_isa_info->name,
11928 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
11931 if (mips_arch_info == 0)
11933 #ifdef MIPS_CPU_STRING_DEFAULT
11934 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
11936 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
11940 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
11941 error ("-march=%s is not compatible with the selected ABI",
11942 mips_arch_info->name);
11944 /* Optimize for mips_arch, unless -mtune selects a different processor. */
11945 if (mips_tune_string != 0)
11946 mips_set_tune (mips_parse_cpu (mips_tune_string));
11948 if (mips_tune_info == 0)
11949 mips_set_tune (mips_arch_info);
11951 /* Set cost structure for the processor. */
11953 mips_cost = &mips_rtx_cost_optimize_size;
11955 mips_cost = &mips_rtx_cost_data[mips_tune];
11957 /* If the user hasn't specified a branch cost, use the processor's
11959 if (mips_branch_cost == 0)
11960 mips_branch_cost = mips_cost->branch_cost;
11962 if ((target_flags_explicit & MASK_64BIT) != 0)
11964 /* The user specified the size of the integer registers. Make sure
11965 it agrees with the ABI and ISA. */
11966 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
11967 error ("-mgp64 used with a 32-bit processor");
11968 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
11969 error ("-mgp32 used with a 64-bit ABI");
11970 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
11971 error ("-mgp64 used with a 32-bit ABI");
11975 /* Infer the integer register size from the ABI and processor.
11976 Restrict ourselves to 32-bit registers if that's all the
11977 processor has, or if the ABI cannot handle 64-bit registers. */
11978 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
11979 target_flags &= ~MASK_64BIT;
11981 target_flags |= MASK_64BIT;
11984 if ((target_flags_explicit & MASK_FLOAT64) != 0)
11986 /* Really, -mfp32 and -mfp64 are ornamental options. There's
11987 only one right answer here. */
11988 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
11989 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
11990 else if (!TARGET_64BIT && TARGET_FLOAT64
11991 && !(ISA_HAS_MXHC1 && mips_abi == ABI_32))
11992 error ("-mgp32 and -mfp64 can only be combined if the target"
11993 " supports the mfhc1 and mthc1 instructions");
11994 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
11995 error ("unsupported combination: %s", "-mfp64 -msingle-float");
11999 /* -msingle-float selects 32-bit float registers. Otherwise the
12000 float registers should be the same size as the integer ones. */
12001 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
12002 target_flags |= MASK_FLOAT64;
12004 target_flags &= ~MASK_FLOAT64;
12007 /* End of code shared with GAS. */
12009 if ((target_flags_explicit & MASK_LONG64) == 0)
12011 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
12012 target_flags |= MASK_LONG64;
12014 target_flags &= ~MASK_LONG64;
12017 if (!TARGET_OLDABI)
12018 flag_pcc_struct_return = 0;
12020 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
12022 /* If neither -mbranch-likely nor -mno-branch-likely was given
12023 on the command line, set MASK_BRANCHLIKELY based on the target
12024 architecture and tuning flags. Annulled delay slots are a
12025 size win, so we only consider the processor-specific tuning
12026 for !optimize_size. */
12027 if (ISA_HAS_BRANCHLIKELY
12029 || (mips_tune_info->tune_flags & PTF_AVOID_BRANCHLIKELY) == 0))
12030 target_flags |= MASK_BRANCHLIKELY;
12032 target_flags &= ~MASK_BRANCHLIKELY;
12034 else if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
12035 warning (0, "the %qs architecture does not support branch-likely"
12036 " instructions", mips_arch_info->name);
12038 /* The effect of -mabicalls isn't defined for the EABI. */
12039 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
12041 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
12042 target_flags &= ~MASK_ABICALLS;
12045 /* MIPS16 cannot generate PIC yet. */
12046 if (TARGET_MIPS16 && (flag_pic || TARGET_ABICALLS))
12048 sorry ("MIPS16 PIC");
12049 target_flags &= ~MASK_ABICALLS;
12050 flag_pic = flag_pie = flag_shlib = 0;
12053 if (TARGET_ABICALLS)
12054 /* We need to set flag_pic for executables as well as DSOs
12055 because we may reference symbols that are not defined in
12056 the final executable. (MIPS does not use things like
12057 copy relocs, for example.)
12059 Also, there is a body of code that uses __PIC__ to distinguish
12060 between -mabicalls and -mno-abicalls code. */
12063 /* -mvr4130-align is a "speed over size" optimization: it usually produces
12064 faster code, but at the expense of more nops. Enable it at -O3 and
12066 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
12067 target_flags |= MASK_VR4130_ALIGN;
12069 /* Prefer a call to memcpy over inline code when optimizing for size,
12070 though see MOVE_RATIO in mips.h. */
12071 if (optimize_size && (target_flags_explicit & MASK_MEMCPY) == 0)
12072 target_flags |= MASK_MEMCPY;
12074 /* If we have a nonzero small-data limit, check that the -mgpopt
12075 setting is consistent with the other target flags. */
12076 if (mips_section_threshold > 0)
12080 if (!TARGET_MIPS16 && !TARGET_EXPLICIT_RELOCS)
12081 error ("%<-mno-gpopt%> needs %<-mexplicit-relocs%>");
12083 TARGET_LOCAL_SDATA = false;
12084 TARGET_EXTERN_SDATA = false;
12088 if (TARGET_VXWORKS_RTP)
12089 warning (0, "cannot use small-data accesses for %qs", "-mrtp");
12091 if (TARGET_ABICALLS)
12092 warning (0, "cannot use small-data accesses for %qs",
12097 #ifdef MIPS_TFMODE_FORMAT
12098 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
12101 /* Make sure that the user didn't turn off paired single support when
12102 MIPS-3D support is requested. */
12103 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
12104 && !TARGET_PAIRED_SINGLE_FLOAT)
12105 error ("-mips3d requires -mpaired-single");
12107 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
12109 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
12111 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
12112 and TARGET_HARD_FLOAT_ABI are both true. */
12113 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT_ABI))
12114 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
12116 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
12118 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
12119 error ("-mips3d/-mpaired-single must be used with -mips64");
12121 /* If TARGET_DSPR2, enable MASK_DSP. */
12123 target_flags |= MASK_DSP;
12125 mips_init_print_operand_punct ();
12127 /* Set up array to map GCC register number to debug register number.
12128 Ignore the special purpose register numbers. */
12130 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
12132 mips_dbx_regno[i] = INVALID_REGNUM;
12133 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
12134 mips_dwarf_regno[i] = i;
12136 mips_dwarf_regno[i] = INVALID_REGNUM;
12139 start = GP_DBX_FIRST - GP_REG_FIRST;
12140 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
12141 mips_dbx_regno[i] = i + start;
12143 start = FP_DBX_FIRST - FP_REG_FIRST;
12144 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
12145 mips_dbx_regno[i] = i + start;
12147 /* HI and LO debug registers use big-endian ordering. */
12148 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
12149 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
12150 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
12151 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
12152 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
12154 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
12155 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
12158 /* Set up mips_hard_regno_mode_ok. */
12159 for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
12160 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
12161 mips_hard_regno_mode_ok[(int)mode][regno]
12162 = mips_hard_regno_mode_ok_p (regno, mode);
12164 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
12165 initialized yet, so we can't use that here. */
12166 gpr_mode = TARGET_64BIT ? DImode : SImode;
12168 /* Function to allocate machine-dependent function status. */
12169 init_machine_status = &mips_init_machine_status;
12171 /* Default to working around R4000 errata only if the processor
12172 was selected explicitly. */
12173 if ((target_flags_explicit & MASK_FIX_R4000) == 0
12174 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
12175 target_flags |= MASK_FIX_R4000;
12177 /* Default to working around R4400 errata only if the processor
12178 was selected explicitly. */
12179 if ((target_flags_explicit & MASK_FIX_R4400) == 0
12180 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
12181 target_flags |= MASK_FIX_R4400;
12183 /* Save base state of options. */
12184 mips_base_mips16 = TARGET_MIPS16;
12185 mips_base_target_flags = target_flags;
12186 mips_flag_delayed_branch = flag_delayed_branch;
12187 mips_base_schedule_insns = flag_schedule_insns;
12188 mips_base_reorder_blocks_and_partition = flag_reorder_blocks_and_partition;
12189 mips_base_move_loop_invariants = flag_move_loop_invariants;
12190 mips_base_align_loops = align_loops;
12191 mips_base_align_jumps = align_jumps;
12192 mips_base_align_functions = align_functions;
12194 /* Now select the mips16 or 32-bit instruction set, as requested. */
12195 mips_set_mips16_mode (mips_base_mips16);
12198 /* Swap the register information for registers I and I + 1, which
12199 currently have the wrong endianness. Note that the registers'
12200 fixedness and call-clobberedness might have been set on the
12204 mips_swap_registers (unsigned int i)
12209 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
12210 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
12212 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
12213 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
12214 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
12215 SWAP_STRING (reg_names[i], reg_names[i + 1]);
12221 /* Implement CONDITIONAL_REGISTER_USAGE. */
12224 mips_conditional_register_usage (void)
12230 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
12231 fixed_regs[regno] = call_used_regs[regno] = 1;
12233 if (!TARGET_HARD_FLOAT)
12237 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
12238 fixed_regs[regno] = call_used_regs[regno] = 1;
12239 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
12240 fixed_regs[regno] = call_used_regs[regno] = 1;
12242 else if (! ISA_HAS_8CC)
12246 /* We only have a single condition code register. We
12247 implement this by hiding all the condition code registers,
12248 and generating RTL that refers directly to ST_REG_FIRST. */
12249 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
12250 fixed_regs[regno] = call_used_regs[regno] = 1;
12252 /* In mips16 mode, we permit the $t temporary registers to be used
12253 for reload. We prohibit the unused $s registers, since they
12254 are caller saved, and saving them via a mips16 register would
12255 probably waste more time than just reloading the value. */
12258 fixed_regs[18] = call_used_regs[18] = 1;
12259 fixed_regs[19] = call_used_regs[19] = 1;
12260 fixed_regs[20] = call_used_regs[20] = 1;
12261 fixed_regs[21] = call_used_regs[21] = 1;
12262 fixed_regs[22] = call_used_regs[22] = 1;
12263 fixed_regs[23] = call_used_regs[23] = 1;
12264 fixed_regs[26] = call_used_regs[26] = 1;
12265 fixed_regs[27] = call_used_regs[27] = 1;
12266 fixed_regs[30] = call_used_regs[30] = 1;
12268 /* fp20-23 are now caller saved. */
12269 if (mips_abi == ABI_64)
12272 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
12273 call_really_used_regs[regno] = call_used_regs[regno] = 1;
12275 /* Odd registers from fp21 to fp31 are now caller saved. */
12276 if (mips_abi == ABI_N32)
12279 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
12280 call_really_used_regs[regno] = call_used_regs[regno] = 1;
12282 /* Make sure that double-register accumulator values are correctly
12283 ordered for the current endianness. */
12284 if (TARGET_LITTLE_ENDIAN)
12287 mips_swap_registers (MD_REG_FIRST);
12288 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
12289 mips_swap_registers (regno);
12293 /* On the mips16, we want to allocate $24 (T_REG) before other
12294 registers for instructions for which it is possible. This helps
12295 avoid shuffling registers around in order to set up for an xor,
12296 encouraging the compiler to use a cmp instead. */
12299 mips_order_regs_for_local_alloc (void)
12303 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
12304 reg_alloc_order[i] = i;
12308 /* It really doesn't matter where we put register 0, since it is
12309 a fixed register anyhow. */
12310 reg_alloc_order[0] = 24;
12311 reg_alloc_order[24] = 0;
12315 /* Initialize the GCC target structure. */
12316 #undef TARGET_ASM_ALIGNED_HI_OP
12317 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
12318 #undef TARGET_ASM_ALIGNED_SI_OP
12319 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
12320 #undef TARGET_ASM_ALIGNED_DI_OP
12321 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
12323 #undef TARGET_ASM_FUNCTION_PROLOGUE
12324 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
12325 #undef TARGET_ASM_FUNCTION_EPILOGUE
12326 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
12327 #undef TARGET_ASM_SELECT_RTX_SECTION
12328 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
12329 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
12330 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
12332 #undef TARGET_SCHED_INIT
12333 #define TARGET_SCHED_INIT mips_sched_init
12334 #undef TARGET_SCHED_REORDER
12335 #define TARGET_SCHED_REORDER mips_sched_reorder
12336 #undef TARGET_SCHED_REORDER2
12337 #define TARGET_SCHED_REORDER2 mips_sched_reorder
12338 #undef TARGET_SCHED_VARIABLE_ISSUE
12339 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
12340 #undef TARGET_SCHED_ADJUST_COST
12341 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
12342 #undef TARGET_SCHED_ISSUE_RATE
12343 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
12344 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
12345 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
12346 mips_multipass_dfa_lookahead
12348 #undef TARGET_DEFAULT_TARGET_FLAGS
12349 #define TARGET_DEFAULT_TARGET_FLAGS \
12351 | TARGET_CPU_DEFAULT \
12352 | TARGET_ENDIAN_DEFAULT \
12353 | TARGET_FP_EXCEPTIONS_DEFAULT \
12354 | MASK_CHECK_ZERO_DIV \
12356 #undef TARGET_HANDLE_OPTION
12357 #define TARGET_HANDLE_OPTION mips_handle_option
12359 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
12360 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
12362 #undef TARGET_INSERT_ATTRIBUTES
12363 #define TARGET_INSERT_ATTRIBUTES mips_insert_attributes
12364 #undef TARGET_MERGE_DECL_ATTRIBUTES
12365 #define TARGET_MERGE_DECL_ATTRIBUTES mips_merge_decl_attributes
12366 #undef TARGET_SET_CURRENT_FUNCTION
12367 #define TARGET_SET_CURRENT_FUNCTION mips_set_current_function
12369 #undef TARGET_VALID_POINTER_MODE
12370 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
12371 #undef TARGET_RTX_COSTS
12372 #define TARGET_RTX_COSTS mips_rtx_costs
12373 #undef TARGET_ADDRESS_COST
12374 #define TARGET_ADDRESS_COST mips_address_cost
12376 #undef TARGET_IN_SMALL_DATA_P
12377 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
12379 #undef TARGET_MACHINE_DEPENDENT_REORG
12380 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
12382 #undef TARGET_ASM_FILE_START
12383 #define TARGET_ASM_FILE_START mips_file_start
12384 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
12385 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
12387 #undef TARGET_INIT_LIBFUNCS
12388 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
12390 #undef TARGET_BUILD_BUILTIN_VA_LIST
12391 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
12392 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
12393 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
12395 #undef TARGET_PROMOTE_FUNCTION_ARGS
12396 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
12397 #undef TARGET_PROMOTE_FUNCTION_RETURN
12398 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
12399 #undef TARGET_PROMOTE_PROTOTYPES
12400 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
12402 #undef TARGET_RETURN_IN_MEMORY
12403 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
12404 #undef TARGET_RETURN_IN_MSB
12405 #define TARGET_RETURN_IN_MSB mips_return_in_msb
12407 #undef TARGET_ASM_OUTPUT_MI_THUNK
12408 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
12409 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
12410 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
12412 #undef TARGET_SETUP_INCOMING_VARARGS
12413 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
12414 #undef TARGET_STRICT_ARGUMENT_NAMING
12415 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
12416 #undef TARGET_MUST_PASS_IN_STACK
12417 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
12418 #undef TARGET_PASS_BY_REFERENCE
12419 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
12420 #undef TARGET_CALLEE_COPIES
12421 #define TARGET_CALLEE_COPIES mips_callee_copies
12422 #undef TARGET_ARG_PARTIAL_BYTES
12423 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
12425 #undef TARGET_MODE_REP_EXTENDED
12426 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
12428 #undef TARGET_VECTOR_MODE_SUPPORTED_P
12429 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
12431 #undef TARGET_SCALAR_MODE_SUPPORTED_P
12432 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
12434 #undef TARGET_INIT_BUILTINS
12435 #define TARGET_INIT_BUILTINS mips_init_builtins
12436 #undef TARGET_EXPAND_BUILTIN
12437 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
12439 #undef TARGET_HAVE_TLS
12440 #define TARGET_HAVE_TLS HAVE_AS_TLS
12442 #undef TARGET_CANNOT_FORCE_CONST_MEM
12443 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
12445 #undef TARGET_ENCODE_SECTION_INFO
12446 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
12448 #undef TARGET_ATTRIBUTE_TABLE
12449 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
12450 /* All our function attributes are related to how out-of-line copies should
12451 be compiled or called. They don't in themselves prevent inlining. */
12452 #undef TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P
12453 #define TARGET_FUNCTION_ATTRIBUTE_INLINABLE_P hook_bool_const_tree_true
12455 #undef TARGET_EXTRA_LIVE_ON_ENTRY
12456 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
12458 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
12459 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
12460 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
12461 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
12463 #undef TARGET_COMP_TYPE_ATTRIBUTES
12464 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
12466 #ifdef HAVE_AS_DTPRELWORD
12467 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
12468 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
12470 #undef TARGET_DWARF_REGISTER_SPAN
12471 #define TARGET_DWARF_REGISTER_SPAN mips_dwarf_register_span
12473 struct gcc_target targetm = TARGET_INITIALIZER;
12475 #include "gt-mips.h"