1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by A. Lichnewsky, lich@inria.inria.fr.
5 Changes by Michael Meissner, meissner@osf.org.
6 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
7 Brendan Eich, brendan@microunity.com.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING. If not, write to
23 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
24 Boston, MA 02110-1301, USA. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
61 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
62 #define UNSPEC_ADDRESS_P(X) \
63 (GET_CODE (X) == UNSPEC \
64 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
65 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
67 /* Extract the symbol or label from UNSPEC wrapper X. */
68 #define UNSPEC_ADDRESS(X) \
71 /* Extract the symbol type from UNSPEC wrapper X. */
72 #define UNSPEC_ADDRESS_TYPE(X) \
73 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
75 /* The maximum distance between the top of the stack frame and the
76 value $sp has when we save & restore registers.
78 Use a maximum gap of 0x100 in the mips16 case. We can then use
79 unextended instructions to save and restore registers, and to
80 allocate and deallocate the top part of the frame.
82 The value in the !mips16 case must be a SMALL_OPERAND and must
83 preserve the maximum stack alignment. */
84 #define MIPS_MAX_FIRST_STACK_STEP (TARGET_MIPS16 ? 0x100 : 0x7ff0)
86 /* True if INSN is a mips.md pattern or asm statement. */
87 #define USEFUL_INSN_P(INSN) \
89 && GET_CODE (PATTERN (INSN)) != USE \
90 && GET_CODE (PATTERN (INSN)) != CLOBBER \
91 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
92 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
94 /* If INSN is a delayed branch sequence, return the first instruction
95 in the sequence, otherwise return INSN itself. */
96 #define SEQ_BEGIN(INSN) \
97 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
98 ? XVECEXP (PATTERN (INSN), 0, 0) \
101 /* Likewise for the last instruction in a delayed branch sequence. */
102 #define SEQ_END(INSN) \
103 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
104 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
107 /* Execute the following loop body with SUBINSN set to each instruction
108 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
109 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
110 for ((SUBINSN) = SEQ_BEGIN (INSN); \
111 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
112 (SUBINSN) = NEXT_INSN (SUBINSN))
114 /* Classifies an address.
117 A natural register + offset address. The register satisfies
118 mips_valid_base_register_p and the offset is a const_arith_operand.
121 A LO_SUM rtx. The first operand is a valid base register and
122 the second operand is a symbolic address.
125 A signed 16-bit constant address.
128 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
129 enum mips_address_type {
136 /* Classifies the prototype of a builtin function. */
137 enum mips_function_type
139 MIPS_V2SF_FTYPE_V2SF,
140 MIPS_V2SF_FTYPE_V2SF_V2SF,
141 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
142 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
143 MIPS_V2SF_FTYPE_SF_SF,
144 MIPS_INT_FTYPE_V2SF_V2SF,
145 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
146 MIPS_INT_FTYPE_SF_SF,
147 MIPS_INT_FTYPE_DF_DF,
154 /* For MIPS DSP ASE */
156 MIPS_DI_FTYPE_DI_SI_SI,
157 MIPS_DI_FTYPE_DI_V2HI_V2HI,
158 MIPS_DI_FTYPE_DI_V4QI_V4QI,
160 MIPS_SI_FTYPE_PTR_SI,
164 MIPS_SI_FTYPE_V2HI_V2HI,
166 MIPS_SI_FTYPE_V4QI_V4QI,
169 MIPS_V2HI_FTYPE_SI_SI,
170 MIPS_V2HI_FTYPE_V2HI,
171 MIPS_V2HI_FTYPE_V2HI_SI,
172 MIPS_V2HI_FTYPE_V2HI_V2HI,
173 MIPS_V2HI_FTYPE_V4QI,
174 MIPS_V2HI_FTYPE_V4QI_V2HI,
176 MIPS_V4QI_FTYPE_V2HI_V2HI,
177 MIPS_V4QI_FTYPE_V4QI_SI,
178 MIPS_V4QI_FTYPE_V4QI_V4QI,
179 MIPS_VOID_FTYPE_SI_SI,
180 MIPS_VOID_FTYPE_V2HI_V2HI,
181 MIPS_VOID_FTYPE_V4QI_V4QI,
183 /* For MIPS DSP REV 2 ASE. */
184 MIPS_V4QI_FTYPE_V4QI,
185 MIPS_SI_FTYPE_SI_SI_SI,
186 MIPS_DI_FTYPE_DI_USI_USI,
188 MIPS_DI_FTYPE_USI_USI,
189 MIPS_V2HI_FTYPE_SI_SI_SI,
195 /* Specifies how a builtin function should be converted into rtl. */
196 enum mips_builtin_type
198 /* The builtin corresponds directly to an .md pattern. The return
199 value is mapped to operand 0 and the arguments are mapped to
200 operands 1 and above. */
203 /* The builtin corresponds directly to an .md pattern. There is no return
204 value and the arguments are mapped to operands 0 and above. */
205 MIPS_BUILTIN_DIRECT_NO_TARGET,
207 /* The builtin corresponds to a comparison instruction followed by
208 a mips_cond_move_tf_ps pattern. The first two arguments are the
209 values to compare and the second two arguments are the vector
210 operands for the movt.ps or movf.ps instruction (in assembly order). */
214 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
215 of this instruction is the result of the comparison, which has mode
216 CCV2 or CCV4. The function arguments are mapped to operands 1 and
217 above. The function's return value is an SImode boolean that is
218 true under the following conditions:
220 MIPS_BUILTIN_CMP_ANY: one of the registers is true
221 MIPS_BUILTIN_CMP_ALL: all of the registers are true
222 MIPS_BUILTIN_CMP_LOWER: the first register is true
223 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
224 MIPS_BUILTIN_CMP_ANY,
225 MIPS_BUILTIN_CMP_ALL,
226 MIPS_BUILTIN_CMP_UPPER,
227 MIPS_BUILTIN_CMP_LOWER,
229 /* As above, but the instruction only sets a single $fcc register. */
230 MIPS_BUILTIN_CMP_SINGLE,
232 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
233 MIPS_BUILTIN_BPOSGE32
236 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
237 #define MIPS_FP_CONDITIONS(MACRO) \
255 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
256 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
257 enum mips_fp_condition {
258 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
261 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
262 #define STRINGIFY(X) #X
263 static const char *const mips_fp_conditions[] = {
264 MIPS_FP_CONDITIONS (STRINGIFY)
267 /* A function to save or store a register. The first argument is the
268 register and the second is the stack slot. */
269 typedef void (*mips_save_restore_fn) (rtx, rtx);
271 struct mips16_constant;
272 struct mips_arg_info;
273 struct mips_address_info;
274 struct mips_integer_op;
277 static enum mips_symbol_type mips_classify_symbol (rtx);
278 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
279 static bool mips_symbolic_address_p (enum mips_symbol_type, enum machine_mode);
280 static bool mips_classify_address (struct mips_address_info *, rtx,
281 enum machine_mode, int);
282 static bool mips_cannot_force_const_mem (rtx);
283 static bool mips_use_blocks_for_constant_p (enum machine_mode, rtx);
284 static int mips_symbol_insns (enum mips_symbol_type);
285 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
286 static rtx mips_force_temporary (rtx, rtx);
287 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
288 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
289 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
290 static unsigned int mips_build_lower (struct mips_integer_op *,
291 unsigned HOST_WIDE_INT);
292 static unsigned int mips_build_integer (struct mips_integer_op *,
293 unsigned HOST_WIDE_INT);
294 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
295 static int m16_check_op (rtx, int, int, int);
296 static bool mips_rtx_costs (rtx, int, int, int *);
297 static int mips_address_cost (rtx);
298 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
299 static void mips_load_call_address (rtx, rtx, int);
300 static bool mips_function_ok_for_sibcall (tree, tree);
301 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
302 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
303 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
304 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
305 tree, int, struct mips_arg_info *);
306 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
307 static void mips_set_architecture (const struct mips_cpu_info *);
308 static void mips_set_tune (const struct mips_cpu_info *);
309 static bool mips_handle_option (size_t, const char *, int);
310 static struct machine_function *mips_init_machine_status (void);
311 static void print_operand_reloc (FILE *, rtx, const char **);
312 static void mips_file_start (void);
313 static bool mips_rewrite_small_data_p (rtx);
314 static int mips_small_data_pattern_1 (rtx *, void *);
315 static int mips_rewrite_small_data_1 (rtx *, void *);
316 static bool mips_function_has_gp_insn (void);
317 static unsigned int mips_global_pointer (void);
318 static bool mips_save_reg_p (unsigned int);
319 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
320 mips_save_restore_fn);
321 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
322 static void mips_output_cplocal (void);
323 static void mips_emit_loadgp (void);
324 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
325 static void mips_set_frame_expr (rtx);
326 static rtx mips_frame_set (rtx, rtx);
327 static void mips_save_reg (rtx, rtx);
328 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
329 static void mips_restore_reg (rtx, rtx);
330 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
331 HOST_WIDE_INT, tree);
332 static int symbolic_expression_p (rtx);
333 static section *mips_select_rtx_section (enum machine_mode, rtx,
334 unsigned HOST_WIDE_INT);
335 static section *mips_function_rodata_section (tree);
336 static bool mips_in_small_data_p (tree);
337 static bool mips_use_anchors_for_symbol_p (rtx);
338 static int mips_fpr_return_fields (tree, tree *);
339 static bool mips_return_in_msb (tree);
340 static rtx mips_return_fpr_pair (enum machine_mode mode,
341 enum machine_mode mode1, HOST_WIDE_INT,
342 enum machine_mode mode2, HOST_WIDE_INT);
343 static rtx mips16_gp_pseudo_reg (void);
344 static void mips16_fp_args (FILE *, int, int);
345 static void build_mips16_function_stub (FILE *);
346 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
347 static void dump_constants (struct mips16_constant *, rtx);
348 static int mips16_insn_length (rtx);
349 static int mips16_rewrite_pool_refs (rtx *, void *);
350 static void mips16_lay_out_constants (void);
351 static void mips_sim_reset (struct mips_sim *);
352 static void mips_sim_init (struct mips_sim *, state_t);
353 static void mips_sim_next_cycle (struct mips_sim *);
354 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
355 static int mips_sim_wait_regs_2 (rtx *, void *);
356 static void mips_sim_wait_regs_1 (rtx *, void *);
357 static void mips_sim_wait_regs (struct mips_sim *, rtx);
358 static void mips_sim_wait_units (struct mips_sim *, rtx);
359 static void mips_sim_wait_insn (struct mips_sim *, rtx);
360 static void mips_sim_record_set (rtx, rtx, void *);
361 static void mips_sim_issue_insn (struct mips_sim *, rtx);
362 static void mips_sim_issue_nop (struct mips_sim *);
363 static void mips_sim_finish_insn (struct mips_sim *, rtx);
364 static void vr4130_avoid_branch_rt_conflict (rtx);
365 static void vr4130_align_insns (void);
366 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
367 static void mips_avoid_hazards (void);
368 static void mips_reorg (void);
369 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
370 static bool mips_matching_cpu_name_p (const char *, const char *);
371 static const struct mips_cpu_info *mips_parse_cpu (const char *);
372 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
373 static bool mips_return_in_memory (tree, tree);
374 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
375 static void mips_macc_chains_record (rtx);
376 static void mips_macc_chains_reorder (rtx *, int);
377 static void vr4130_true_reg_dependence_p_1 (rtx, rtx, void *);
378 static bool vr4130_true_reg_dependence_p (rtx);
379 static bool vr4130_swap_insns_p (rtx, rtx);
380 static void vr4130_reorder (rtx *, int);
381 static void mips_promote_ready (rtx *, int, int);
382 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
383 static int mips_variable_issue (FILE *, int, rtx, int);
384 static int mips_adjust_cost (rtx, rtx, rtx, int);
385 static int mips_issue_rate (void);
386 static int mips_multipass_dfa_lookahead (void);
387 static void mips_init_libfuncs (void);
388 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
390 static tree mips_build_builtin_va_list (void);
391 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
392 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
394 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
396 static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
398 static bool mips_valid_pointer_mode (enum machine_mode);
399 static bool mips_vector_mode_supported_p (enum machine_mode);
400 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree, unsigned int);
401 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
402 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
403 static void mips_init_builtins (void);
404 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree, bool);
405 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
406 enum insn_code, enum mips_fp_condition,
408 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
409 enum insn_code, enum mips_fp_condition,
411 static rtx mips_expand_builtin_bposge (enum mips_builtin_type, rtx);
412 static void mips_encode_section_info (tree, rtx, int);
413 static void mips_extra_live_on_entry (bitmap);
414 static int mips_mode_rep_extended (enum machine_mode, enum machine_mode);
416 /* Structure to be filled in by compute_frame_size with register
417 save masks, and offsets for the current function. */
419 struct mips_frame_info GTY(())
421 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
422 HOST_WIDE_INT var_size; /* # bytes that variables take up */
423 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
424 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
425 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
426 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
427 unsigned int mask; /* mask of saved gp registers */
428 unsigned int fmask; /* mask of saved fp registers */
429 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
430 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
431 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
432 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
433 bool initialized; /* true if frame size already calculated */
434 int num_gp; /* number of gp registers saved */
435 int num_fp; /* number of fp registers saved */
438 struct machine_function GTY(()) {
439 /* Pseudo-reg holding the value of $28 in a mips16 function which
440 refers to GP relative global variables. */
441 rtx mips16_gp_pseudo_rtx;
443 /* The number of extra stack bytes taken up by register varargs.
444 This area is allocated by the callee at the very top of the frame. */
447 /* Current frame information, calculated by compute_frame_size. */
448 struct mips_frame_info frame;
450 /* The register to use as the global pointer within this function. */
451 unsigned int global_pointer;
453 /* True if mips_adjust_insn_length should ignore an instruction's
455 bool ignore_hazard_length_p;
457 /* True if the whole function is suitable for .set noreorder and
459 bool all_noreorder_p;
461 /* True if the function is known to have an instruction that needs $gp. */
465 /* Information about a single argument. */
468 /* True if the argument is passed in a floating-point register, or
469 would have been if we hadn't run out of registers. */
472 /* The number of words passed in registers, rounded up. */
473 unsigned int reg_words;
475 /* For EABI, the offset of the first register from GP_ARG_FIRST or
476 FP_ARG_FIRST. For other ABIs, the offset of the first register from
477 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
478 comment for details).
480 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
482 unsigned int reg_offset;
484 /* The number of words that must be passed on the stack, rounded up. */
485 unsigned int stack_words;
487 /* The offset from the start of the stack overflow area of the argument's
488 first stack word. Only meaningful when STACK_WORDS is nonzero. */
489 unsigned int stack_offset;
493 /* Information about an address described by mips_address_type.
499 REG is the base register and OFFSET is the constant offset.
502 REG is the register that contains the high part of the address,
503 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
504 is the type of OFFSET's symbol.
507 SYMBOL_TYPE is the type of symbol being referenced. */
509 struct mips_address_info
511 enum mips_address_type type;
514 enum mips_symbol_type symbol_type;
518 /* One stage in a constant building sequence. These sequences have
522 A = A CODE[1] VALUE[1]
523 A = A CODE[2] VALUE[2]
526 where A is an accumulator, each CODE[i] is a binary rtl operation
527 and each VALUE[i] is a constant integer. */
528 struct mips_integer_op {
530 unsigned HOST_WIDE_INT value;
534 /* The largest number of operations needed to load an integer constant.
535 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
536 When the lowest bit is clear, we can try, but reject a sequence with
537 an extra SLL at the end. */
538 #define MIPS_MAX_INTEGER_OPS 7
541 /* Global variables for machine-dependent things. */
543 /* Threshold for data being put into the small data/bss area, instead
544 of the normal data area. */
545 int mips_section_threshold = -1;
547 /* Count the number of .file directives, so that .loc is up to date. */
548 int num_source_filenames = 0;
550 /* Count the number of sdb related labels are generated (to find block
551 start and end boundaries). */
552 int sdb_label_count = 0;
554 /* Next label # for each statement for Silicon Graphics IRIS systems. */
557 /* Name of the file containing the current function. */
558 const char *current_function_file = "";
560 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
566 /* The next branch instruction is a branch likely, not branch normal. */
567 int mips_branch_likely;
569 /* The operands passed to the last cmpMM expander. */
572 /* The target cpu for code generation. */
573 enum processor_type mips_arch;
574 const struct mips_cpu_info *mips_arch_info;
576 /* The target cpu for optimization and scheduling. */
577 enum processor_type mips_tune;
578 const struct mips_cpu_info *mips_tune_info;
580 /* Which instruction set architecture to use. */
583 /* Which ABI to use. */
584 int mips_abi = MIPS_ABI_DEFAULT;
586 /* Cost information to use. */
587 const struct mips_rtx_cost_data *mips_cost;
589 /* Whether we are generating mips16 hard float code. In mips16 mode
590 we always set TARGET_SOFT_FLOAT; this variable is nonzero if
591 -msoft-float was not specified by the user, which means that we
592 should arrange to call mips32 hard floating point code. */
593 int mips16_hard_float;
595 /* The architecture selected by -mipsN. */
596 static const struct mips_cpu_info *mips_isa_info;
598 /* If TRUE, we split addresses into their high and low parts in the RTL. */
599 int mips_split_addresses;
601 /* Mode used for saving/restoring general purpose registers. */
602 static enum machine_mode gpr_mode;
604 /* Array giving truth value on whether or not a given hard register
605 can support a given mode. */
606 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
608 /* List of all MIPS punctuation characters used by print_operand. */
609 char mips_print_operand_punct[256];
611 /* Map GCC register number to debugger register number. */
612 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
614 /* A copy of the original flag_delayed_branch: see override_options. */
615 static int mips_flag_delayed_branch;
617 static GTY (()) int mips_output_filename_first_time = 1;
619 /* mips_split_p[X] is true if symbols of type X can be split by
620 mips_split_symbol(). */
621 bool mips_split_p[NUM_SYMBOL_TYPES];
623 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
624 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
625 if they are matched by a special .md file pattern. */
626 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
628 /* Likewise for HIGHs. */
629 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
631 /* Map hard register number to register class */
632 const enum reg_class mips_regno_to_class[] =
634 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
635 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
636 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
637 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
638 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
639 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
640 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
641 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
642 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
643 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
644 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
645 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
646 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
647 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
648 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
649 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
650 HI_REG, LO_REG, NO_REGS, ST_REGS,
651 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
652 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
653 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
654 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
655 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
656 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
657 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
658 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
659 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
660 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
661 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
662 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
663 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
664 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
665 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
666 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
667 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
668 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
669 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
670 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
671 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
672 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
673 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
674 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
675 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
676 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
677 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
678 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
679 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
680 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
683 /* Table of machine dependent attributes. */
684 const struct attribute_spec mips_attribute_table[] =
686 { "long_call", 0, 0, false, true, true, NULL },
687 { NULL, 0, 0, false, false, false, NULL }
690 /* A table describing all the processors gcc knows about. Names are
691 matched in the order listed. The first mention of an ISA level is
692 taken as the canonical name for that ISA.
694 To ease comparison, please keep this table in the same order as
695 gas's mips_cpu_info_table[]. */
696 const struct mips_cpu_info mips_cpu_info_table[] = {
697 /* Entries for generic ISAs */
698 { "mips1", PROCESSOR_R3000, 1 },
699 { "mips2", PROCESSOR_R6000, 2 },
700 { "mips3", PROCESSOR_R4000, 3 },
701 { "mips4", PROCESSOR_R8000, 4 },
702 { "mips32", PROCESSOR_4KC, 32 },
703 { "mips32r2", PROCESSOR_M4K, 33 },
704 { "mips64", PROCESSOR_5KC, 64 },
707 { "r3000", PROCESSOR_R3000, 1 },
708 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
709 { "r3900", PROCESSOR_R3900, 1 },
712 { "r6000", PROCESSOR_R6000, 2 },
715 { "r4000", PROCESSOR_R4000, 3 },
716 { "vr4100", PROCESSOR_R4100, 3 },
717 { "vr4111", PROCESSOR_R4111, 3 },
718 { "vr4120", PROCESSOR_R4120, 3 },
719 { "vr4130", PROCESSOR_R4130, 3 },
720 { "vr4300", PROCESSOR_R4300, 3 },
721 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
722 { "r4600", PROCESSOR_R4600, 3 },
723 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
724 { "r4650", PROCESSOR_R4650, 3 },
727 { "r8000", PROCESSOR_R8000, 4 },
728 { "vr5000", PROCESSOR_R5000, 4 },
729 { "vr5400", PROCESSOR_R5400, 4 },
730 { "vr5500", PROCESSOR_R5500, 4 },
731 { "rm7000", PROCESSOR_R7000, 4 },
732 { "rm9000", PROCESSOR_R9000, 4 },
735 { "4kc", PROCESSOR_4KC, 32 },
736 { "4km", PROCESSOR_4KC, 32 }, /* = 4kc */
737 { "4kp", PROCESSOR_4KP, 32 },
739 /* MIPS32 Release 2 */
740 { "m4k", PROCESSOR_M4K, 33 },
741 { "4kec", PROCESSOR_4KC, 33 },
742 { "4kem", PROCESSOR_4KC, 33 },
743 { "4kep", PROCESSOR_4KP, 33 },
744 { "24kc", PROCESSOR_24KC, 33 }, /* 24K no FPU */
745 { "24kf", PROCESSOR_24KF, 33 }, /* 24K 1:2 FPU */
746 { "24kx", PROCESSOR_24KX, 33 }, /* 24K 1:1 FPU */
747 { "24kec", PROCESSOR_24KC, 33 }, /* 24K with DSP */
748 { "24kef", PROCESSOR_24KF, 33 },
749 { "24kex", PROCESSOR_24KX, 33 },
750 { "34kc", PROCESSOR_24KC, 33 }, /* 34K with MT/DSP */
751 { "34kf", PROCESSOR_24KF, 33 },
752 { "34kx", PROCESSOR_24KX, 33 },
755 { "5kc", PROCESSOR_5KC, 64 },
756 { "5kf", PROCESSOR_5KF, 64 },
757 { "20kc", PROCESSOR_20KC, 64 },
758 { "sb1", PROCESSOR_SB1, 64 },
759 { "sb1a", PROCESSOR_SB1A, 64 },
760 { "sr71000", PROCESSOR_SR71000, 64 },
766 /* Default costs. If these are used for a processor we should look
767 up the actual costs. */
768 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
769 COSTS_N_INSNS (7), /* fp_mult_sf */ \
770 COSTS_N_INSNS (8), /* fp_mult_df */ \
771 COSTS_N_INSNS (23), /* fp_div_sf */ \
772 COSTS_N_INSNS (36), /* fp_div_df */ \
773 COSTS_N_INSNS (10), /* int_mult_si */ \
774 COSTS_N_INSNS (10), /* int_mult_di */ \
775 COSTS_N_INSNS (69), /* int_div_si */ \
776 COSTS_N_INSNS (69), /* int_div_di */ \
777 2, /* branch_cost */ \
778 4 /* memory_latency */
780 /* Need to replace these with the costs of calling the appropriate
782 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
783 COSTS_N_INSNS (256), /* fp_mult_sf */ \
784 COSTS_N_INSNS (256), /* fp_mult_df */ \
785 COSTS_N_INSNS (256), /* fp_div_sf */ \
786 COSTS_N_INSNS (256) /* fp_div_df */
788 static struct mips_rtx_cost_data const mips_rtx_cost_optimize_size =
790 COSTS_N_INSNS (1), /* fp_add */
791 COSTS_N_INSNS (1), /* fp_mult_sf */
792 COSTS_N_INSNS (1), /* fp_mult_df */
793 COSTS_N_INSNS (1), /* fp_div_sf */
794 COSTS_N_INSNS (1), /* fp_div_df */
795 COSTS_N_INSNS (1), /* int_mult_si */
796 COSTS_N_INSNS (1), /* int_mult_di */
797 COSTS_N_INSNS (1), /* int_div_si */
798 COSTS_N_INSNS (1), /* int_div_di */
800 4 /* memory_latency */
803 static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
806 COSTS_N_INSNS (2), /* fp_add */
807 COSTS_N_INSNS (4), /* fp_mult_sf */
808 COSTS_N_INSNS (5), /* fp_mult_df */
809 COSTS_N_INSNS (12), /* fp_div_sf */
810 COSTS_N_INSNS (19), /* fp_div_df */
811 COSTS_N_INSNS (12), /* int_mult_si */
812 COSTS_N_INSNS (12), /* int_mult_di */
813 COSTS_N_INSNS (35), /* int_div_si */
814 COSTS_N_INSNS (35), /* int_div_di */
816 4 /* memory_latency */
821 COSTS_N_INSNS (6), /* int_mult_si */
822 COSTS_N_INSNS (6), /* int_mult_di */
823 COSTS_N_INSNS (36), /* int_div_si */
824 COSTS_N_INSNS (36), /* int_div_di */
826 4 /* memory_latency */
830 COSTS_N_INSNS (36), /* int_mult_si */
831 COSTS_N_INSNS (36), /* int_mult_di */
832 COSTS_N_INSNS (37), /* int_div_si */
833 COSTS_N_INSNS (37), /* int_div_di */
835 4 /* memory_latency */
839 COSTS_N_INSNS (4), /* int_mult_si */
840 COSTS_N_INSNS (11), /* int_mult_di */
841 COSTS_N_INSNS (36), /* int_div_si */
842 COSTS_N_INSNS (68), /* int_div_di */
844 4 /* memory_latency */
847 COSTS_N_INSNS (4), /* fp_add */
848 COSTS_N_INSNS (4), /* fp_mult_sf */
849 COSTS_N_INSNS (5), /* fp_mult_df */
850 COSTS_N_INSNS (17), /* fp_div_sf */
851 COSTS_N_INSNS (32), /* fp_div_df */
852 COSTS_N_INSNS (4), /* int_mult_si */
853 COSTS_N_INSNS (11), /* int_mult_di */
854 COSTS_N_INSNS (36), /* int_div_si */
855 COSTS_N_INSNS (68), /* int_div_di */
857 4 /* memory_latency */
864 COSTS_N_INSNS (5), /* int_mult_si */
865 COSTS_N_INSNS (5), /* int_mult_di */
866 COSTS_N_INSNS (41), /* int_div_si */
867 COSTS_N_INSNS (41), /* int_div_di */
869 4 /* memory_latency */
872 COSTS_N_INSNS (8), /* fp_add */
873 COSTS_N_INSNS (8), /* fp_mult_sf */
874 COSTS_N_INSNS (10), /* fp_mult_df */
875 COSTS_N_INSNS (34), /* fp_div_sf */
876 COSTS_N_INSNS (64), /* fp_div_df */
877 COSTS_N_INSNS (5), /* int_mult_si */
878 COSTS_N_INSNS (5), /* int_mult_di */
879 COSTS_N_INSNS (41), /* int_div_si */
880 COSTS_N_INSNS (41), /* int_div_di */
882 4 /* memory_latency */
885 COSTS_N_INSNS (4), /* fp_add */
886 COSTS_N_INSNS (4), /* fp_mult_sf */
887 COSTS_N_INSNS (5), /* fp_mult_df */
888 COSTS_N_INSNS (17), /* fp_div_sf */
889 COSTS_N_INSNS (32), /* fp_div_df */
890 COSTS_N_INSNS (5), /* int_mult_si */
891 COSTS_N_INSNS (5), /* int_mult_di */
892 COSTS_N_INSNS (41), /* int_div_si */
893 COSTS_N_INSNS (41), /* int_div_di */
895 4 /* memory_latency */
901 COSTS_N_INSNS (2), /* fp_add */
902 COSTS_N_INSNS (4), /* fp_mult_sf */
903 COSTS_N_INSNS (5), /* fp_mult_df */
904 COSTS_N_INSNS (12), /* fp_div_sf */
905 COSTS_N_INSNS (19), /* fp_div_df */
906 COSTS_N_INSNS (2), /* int_mult_si */
907 COSTS_N_INSNS (2), /* int_mult_di */
908 COSTS_N_INSNS (35), /* int_div_si */
909 COSTS_N_INSNS (35), /* int_div_di */
911 4 /* memory_latency */
914 COSTS_N_INSNS (3), /* fp_add */
915 COSTS_N_INSNS (5), /* fp_mult_sf */
916 COSTS_N_INSNS (6), /* fp_mult_df */
917 COSTS_N_INSNS (15), /* fp_div_sf */
918 COSTS_N_INSNS (16), /* fp_div_df */
919 COSTS_N_INSNS (17), /* int_mult_si */
920 COSTS_N_INSNS (17), /* int_mult_di */
921 COSTS_N_INSNS (38), /* int_div_si */
922 COSTS_N_INSNS (38), /* int_div_di */
924 6 /* memory_latency */
927 COSTS_N_INSNS (6), /* fp_add */
928 COSTS_N_INSNS (7), /* fp_mult_sf */
929 COSTS_N_INSNS (8), /* fp_mult_df */
930 COSTS_N_INSNS (23), /* fp_div_sf */
931 COSTS_N_INSNS (36), /* fp_div_df */
932 COSTS_N_INSNS (10), /* int_mult_si */
933 COSTS_N_INSNS (10), /* int_mult_di */
934 COSTS_N_INSNS (69), /* int_div_si */
935 COSTS_N_INSNS (69), /* int_div_di */
937 6 /* memory_latency */
949 /* The only costs that appear to be updated here are
950 integer multiplication. */
952 COSTS_N_INSNS (4), /* int_mult_si */
953 COSTS_N_INSNS (6), /* int_mult_di */
954 COSTS_N_INSNS (69), /* int_div_si */
955 COSTS_N_INSNS (69), /* int_div_di */
957 4 /* memory_latency */
969 COSTS_N_INSNS (6), /* fp_add */
970 COSTS_N_INSNS (4), /* fp_mult_sf */
971 COSTS_N_INSNS (5), /* fp_mult_df */
972 COSTS_N_INSNS (23), /* fp_div_sf */
973 COSTS_N_INSNS (36), /* fp_div_df */
974 COSTS_N_INSNS (5), /* int_mult_si */
975 COSTS_N_INSNS (5), /* int_mult_di */
976 COSTS_N_INSNS (36), /* int_div_si */
977 COSTS_N_INSNS (36), /* int_div_di */
979 4 /* memory_latency */
982 COSTS_N_INSNS (6), /* fp_add */
983 COSTS_N_INSNS (5), /* fp_mult_sf */
984 COSTS_N_INSNS (6), /* fp_mult_df */
985 COSTS_N_INSNS (30), /* fp_div_sf */
986 COSTS_N_INSNS (59), /* fp_div_df */
987 COSTS_N_INSNS (3), /* int_mult_si */
988 COSTS_N_INSNS (4), /* int_mult_di */
989 COSTS_N_INSNS (42), /* int_div_si */
990 COSTS_N_INSNS (74), /* int_div_di */
992 4 /* memory_latency */
995 COSTS_N_INSNS (6), /* fp_add */
996 COSTS_N_INSNS (5), /* fp_mult_sf */
997 COSTS_N_INSNS (6), /* fp_mult_df */
998 COSTS_N_INSNS (30), /* fp_div_sf */
999 COSTS_N_INSNS (59), /* fp_div_df */
1000 COSTS_N_INSNS (5), /* int_mult_si */
1001 COSTS_N_INSNS (9), /* int_mult_di */
1002 COSTS_N_INSNS (42), /* int_div_si */
1003 COSTS_N_INSNS (74), /* int_div_di */
1004 1, /* branch_cost */
1005 4 /* memory_latency */
1008 /* The only costs that are changed here are
1009 integer multiplication. */
1010 COSTS_N_INSNS (6), /* fp_add */
1011 COSTS_N_INSNS (7), /* fp_mult_sf */
1012 COSTS_N_INSNS (8), /* fp_mult_df */
1013 COSTS_N_INSNS (23), /* fp_div_sf */
1014 COSTS_N_INSNS (36), /* fp_div_df */
1015 COSTS_N_INSNS (5), /* int_mult_si */
1016 COSTS_N_INSNS (9), /* int_mult_di */
1017 COSTS_N_INSNS (69), /* int_div_si */
1018 COSTS_N_INSNS (69), /* int_div_di */
1019 1, /* branch_cost */
1020 4 /* memory_latency */
1026 /* The only costs that are changed here are
1027 integer multiplication. */
1028 COSTS_N_INSNS (6), /* fp_add */
1029 COSTS_N_INSNS (7), /* fp_mult_sf */
1030 COSTS_N_INSNS (8), /* fp_mult_df */
1031 COSTS_N_INSNS (23), /* fp_div_sf */
1032 COSTS_N_INSNS (36), /* fp_div_df */
1033 COSTS_N_INSNS (3), /* int_mult_si */
1034 COSTS_N_INSNS (8), /* int_mult_di */
1035 COSTS_N_INSNS (69), /* int_div_si */
1036 COSTS_N_INSNS (69), /* int_div_di */
1037 1, /* branch_cost */
1038 4 /* memory_latency */
1041 /* These costs are the same as the SB-1A below. */
1042 COSTS_N_INSNS (4), /* fp_add */
1043 COSTS_N_INSNS (4), /* fp_mult_sf */
1044 COSTS_N_INSNS (4), /* fp_mult_df */
1045 COSTS_N_INSNS (24), /* fp_div_sf */
1046 COSTS_N_INSNS (32), /* fp_div_df */
1047 COSTS_N_INSNS (3), /* int_mult_si */
1048 COSTS_N_INSNS (4), /* int_mult_di */
1049 COSTS_N_INSNS (36), /* int_div_si */
1050 COSTS_N_INSNS (68), /* int_div_di */
1051 1, /* branch_cost */
1052 4 /* memory_latency */
1055 /* These costs are the same as the SB-1 above. */
1056 COSTS_N_INSNS (4), /* fp_add */
1057 COSTS_N_INSNS (4), /* fp_mult_sf */
1058 COSTS_N_INSNS (4), /* fp_mult_df */
1059 COSTS_N_INSNS (24), /* fp_div_sf */
1060 COSTS_N_INSNS (32), /* fp_div_df */
1061 COSTS_N_INSNS (3), /* int_mult_si */
1062 COSTS_N_INSNS (4), /* int_mult_di */
1063 COSTS_N_INSNS (36), /* int_div_si */
1064 COSTS_N_INSNS (68), /* int_div_di */
1065 1, /* branch_cost */
1066 4 /* memory_latency */
1074 /* Nonzero if -march should decide the default value of MASK_SOFT_FLOAT. */
1075 #ifndef MIPS_MARCH_CONTROLS_SOFT_FLOAT
1076 #define MIPS_MARCH_CONTROLS_SOFT_FLOAT 0
1079 /* Initialize the GCC target structure. */
1080 #undef TARGET_ASM_ALIGNED_HI_OP
1081 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1082 #undef TARGET_ASM_ALIGNED_SI_OP
1083 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1084 #undef TARGET_ASM_ALIGNED_DI_OP
1085 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1087 #undef TARGET_ASM_FUNCTION_PROLOGUE
1088 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
1089 #undef TARGET_ASM_FUNCTION_EPILOGUE
1090 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
1091 #undef TARGET_ASM_SELECT_RTX_SECTION
1092 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
1093 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
1094 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
1096 #undef TARGET_SCHED_REORDER
1097 #define TARGET_SCHED_REORDER mips_sched_reorder
1098 #undef TARGET_SCHED_VARIABLE_ISSUE
1099 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
1100 #undef TARGET_SCHED_ADJUST_COST
1101 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
1102 #undef TARGET_SCHED_ISSUE_RATE
1103 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
1104 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1105 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1106 mips_multipass_dfa_lookahead
1108 #undef TARGET_DEFAULT_TARGET_FLAGS
1109 #define TARGET_DEFAULT_TARGET_FLAGS \
1111 | TARGET_CPU_DEFAULT \
1112 | TARGET_ENDIAN_DEFAULT \
1113 | TARGET_FP_EXCEPTIONS_DEFAULT \
1114 | MASK_CHECK_ZERO_DIV \
1116 #undef TARGET_HANDLE_OPTION
1117 #define TARGET_HANDLE_OPTION mips_handle_option
1119 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1120 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
1122 #undef TARGET_VALID_POINTER_MODE
1123 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
1124 #undef TARGET_RTX_COSTS
1125 #define TARGET_RTX_COSTS mips_rtx_costs
1126 #undef TARGET_ADDRESS_COST
1127 #define TARGET_ADDRESS_COST mips_address_cost
1129 #undef TARGET_IN_SMALL_DATA_P
1130 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
1132 #undef TARGET_MACHINE_DEPENDENT_REORG
1133 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
1135 #undef TARGET_ASM_FILE_START
1136 #define TARGET_ASM_FILE_START mips_file_start
1137 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
1138 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
1140 #undef TARGET_INIT_LIBFUNCS
1141 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
1143 #undef TARGET_BUILD_BUILTIN_VA_LIST
1144 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
1145 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1146 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
1148 #undef TARGET_PROMOTE_FUNCTION_ARGS
1149 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
1150 #undef TARGET_PROMOTE_FUNCTION_RETURN
1151 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
1152 #undef TARGET_PROMOTE_PROTOTYPES
1153 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1155 #undef TARGET_RETURN_IN_MEMORY
1156 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
1157 #undef TARGET_RETURN_IN_MSB
1158 #define TARGET_RETURN_IN_MSB mips_return_in_msb
1160 #undef TARGET_ASM_OUTPUT_MI_THUNK
1161 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
1162 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1163 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
1165 #undef TARGET_SETUP_INCOMING_VARARGS
1166 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
1167 #undef TARGET_STRICT_ARGUMENT_NAMING
1168 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
1169 #undef TARGET_MUST_PASS_IN_STACK
1170 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
1171 #undef TARGET_PASS_BY_REFERENCE
1172 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
1173 #undef TARGET_CALLEE_COPIES
1174 #define TARGET_CALLEE_COPIES mips_callee_copies
1175 #undef TARGET_ARG_PARTIAL_BYTES
1176 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
1178 #undef TARGET_MODE_REP_EXTENDED
1179 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
1181 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1182 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
1184 #undef TARGET_INIT_BUILTINS
1185 #define TARGET_INIT_BUILTINS mips_init_builtins
1186 #undef TARGET_EXPAND_BUILTIN
1187 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
1189 #undef TARGET_HAVE_TLS
1190 #define TARGET_HAVE_TLS HAVE_AS_TLS
1192 #undef TARGET_CANNOT_FORCE_CONST_MEM
1193 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
1195 #undef TARGET_ENCODE_SECTION_INFO
1196 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
1198 #undef TARGET_ATTRIBUTE_TABLE
1199 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
1201 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1202 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
1204 #undef TARGET_MIN_ANCHOR_OFFSET
1205 #define TARGET_MIN_ANCHOR_OFFSET -32768
1206 #undef TARGET_MAX_ANCHOR_OFFSET
1207 #define TARGET_MAX_ANCHOR_OFFSET 32767
1208 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1209 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
1210 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
1211 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
1213 struct gcc_target targetm = TARGET_INITIALIZER;
1215 /* Return true if SYMBOL_REF X is associated with a global symbol
1216 (in the STB_GLOBAL sense). */
1219 mips_global_symbol_p (rtx x)
1223 decl = SYMBOL_REF_DECL (x);
1225 return !SYMBOL_REF_LOCAL_P (x);
1227 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1228 or weak symbols. Relocations in the object file will be against
1229 the target symbol, so it's that symbol's binding that matters here. */
1230 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1233 /* Return true if SYMBOL_REF X binds locally. */
1236 mips_symbol_binds_local_p (rtx x)
1238 return (SYMBOL_REF_DECL (x)
1239 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1240 : SYMBOL_REF_LOCAL_P (x));
1243 /* Classify symbol X, which must be a SYMBOL_REF or a LABEL_REF. */
1245 static enum mips_symbol_type
1246 mips_classify_symbol (rtx x)
1248 if (GET_CODE (x) == LABEL_REF)
1251 return SYMBOL_CONSTANT_POOL;
1252 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1253 return SYMBOL_GOT_PAGE_OFST;
1254 return SYMBOL_GENERAL;
1257 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1259 if (SYMBOL_REF_TLS_MODEL (x))
1262 if (CONSTANT_POOL_ADDRESS_P (x))
1265 return SYMBOL_CONSTANT_POOL;
1267 if (GET_MODE_SIZE (get_pool_mode (x)) <= mips_section_threshold)
1268 return SYMBOL_SMALL_DATA;
1271 /* Do not use small-data accesses for weak symbols; they may end up
1273 if (SYMBOL_REF_SMALL_P (x)
1274 && !SYMBOL_REF_WEAK (x))
1275 return SYMBOL_SMALL_DATA;
1277 if (TARGET_ABICALLS)
1279 /* Don't use GOT accesses for locally-binding symbols; we can use
1280 %hi and %lo instead. */
1281 if (TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x))
1282 return SYMBOL_GENERAL;
1284 /* There are three cases to consider:
1286 - o32 PIC (either with or without explicit relocs)
1287 - n32/n64 PIC without explicit relocs
1288 - n32/n64 PIC with explicit relocs
1290 In the first case, both local and global accesses will use an
1291 R_MIPS_GOT16 relocation. We must correctly predict which of
1292 the two semantics (local or global) the assembler and linker
1293 will apply. The choice depends on the symbol's binding rather
1294 than its visibility.
1296 In the second case, the assembler will not use R_MIPS_GOT16
1297 relocations, but it chooses between local and global accesses
1298 in the same way as for o32 PIC.
1300 In the third case we have more freedom since both forms of
1301 access will work for any kind of symbol. However, there seems
1302 little point in doing things differently. */
1303 if (mips_global_symbol_p (x))
1304 return SYMBOL_GOT_DISP;
1306 return SYMBOL_GOT_PAGE_OFST;
1309 return SYMBOL_GENERAL;
1312 /* Return true if X is a symbolic constant that can be calculated in
1313 the same way as a bare symbol. If it is, store the type of the
1314 symbol in *SYMBOL_TYPE. */
1317 mips_symbolic_constant_p (rtx x, enum mips_symbol_type *symbol_type)
1321 split_const (x, &x, &offset);
1322 if (UNSPEC_ADDRESS_P (x))
1323 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1324 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1326 *symbol_type = mips_classify_symbol (x);
1327 if (*symbol_type == SYMBOL_TLS)
1333 if (offset == const0_rtx)
1336 /* Check whether a nonzero offset is valid for the underlying
1338 switch (*symbol_type)
1340 case SYMBOL_GENERAL:
1341 case SYMBOL_64_HIGH:
1344 /* If the target has 64-bit pointers and the object file only
1345 supports 32-bit symbols, the values of those symbols will be
1346 sign-extended. In this case we can't allow an arbitrary offset
1347 in case the 32-bit value X + OFFSET has a different sign from X. */
1348 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1349 return offset_within_block_p (x, INTVAL (offset));
1351 /* In other cases the relocations can handle any offset. */
1354 case SYMBOL_CONSTANT_POOL:
1355 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1356 In this case, we no longer have access to the underlying constant,
1357 but the original symbol-based access was known to be valid. */
1358 if (GET_CODE (x) == LABEL_REF)
1363 case SYMBOL_SMALL_DATA:
1364 /* Make sure that the offset refers to something within the
1365 same object block. This should guarantee that the final
1366 PC- or GP-relative offset is within the 16-bit limit. */
1367 return offset_within_block_p (x, INTVAL (offset));
1369 case SYMBOL_GOT_PAGE_OFST:
1370 case SYMBOL_GOTOFF_PAGE:
1371 /* If the symbol is global, the GOT entry will contain the symbol's
1372 address, and we will apply a 16-bit offset after loading it.
1373 If the symbol is local, the linker should provide enough local
1374 GOT entries for a 16-bit offset, but larger offsets may lead
1376 return SMALL_INT (offset);
1378 case SYMBOL_GOT_DISP:
1379 case SYMBOL_GOTOFF_DISP:
1380 case SYMBOL_GOTOFF_CALL:
1381 case SYMBOL_GOTOFF_LOADGP:
1386 case SYMBOL_GOTTPREL:
1394 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1397 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1399 if (!HARD_REGISTER_NUM_P (regno))
1403 regno = reg_renumber[regno];
1406 /* These fake registers will be eliminated to either the stack or
1407 hard frame pointer, both of which are usually valid base registers.
1408 Reload deals with the cases where the eliminated form isn't valid. */
1409 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1412 /* In mips16 mode, the stack pointer can only address word and doubleword
1413 values, nothing smaller. There are two problems here:
1415 (a) Instantiating virtual registers can introduce new uses of the
1416 stack pointer. If these virtual registers are valid addresses,
1417 the stack pointer should be too.
1419 (b) Most uses of the stack pointer are not made explicit until
1420 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1421 We don't know until that stage whether we'll be eliminating to the
1422 stack pointer (which needs the restriction) or the hard frame
1423 pointer (which doesn't).
1425 All in all, it seems more consistent to only enforce this restriction
1426 during and after reload. */
1427 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1428 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1430 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1434 /* Return true if X is a valid base register for the given mode.
1435 Allow only hard registers if STRICT. */
1438 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1440 if (!strict && GET_CODE (x) == SUBREG)
1444 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1448 /* Return true if symbols of type SYMBOL_TYPE can directly address a value
1449 with mode MODE. This is used for both symbolic and LO_SUM addresses. */
1452 mips_symbolic_address_p (enum mips_symbol_type symbol_type,
1453 enum machine_mode mode)
1455 switch (symbol_type)
1457 case SYMBOL_GENERAL:
1458 return !TARGET_MIPS16;
1460 case SYMBOL_SMALL_DATA:
1463 case SYMBOL_CONSTANT_POOL:
1464 /* PC-relative addressing is only available for lw and ld. */
1465 return GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1467 case SYMBOL_GOT_PAGE_OFST:
1470 case SYMBOL_GOT_DISP:
1471 /* The address will have to be loaded from the GOT first. */
1474 case SYMBOL_GOTOFF_PAGE:
1475 case SYMBOL_GOTOFF_DISP:
1476 case SYMBOL_GOTOFF_CALL:
1477 case SYMBOL_GOTOFF_LOADGP:
1482 case SYMBOL_GOTTPREL:
1484 case SYMBOL_64_HIGH:
1493 /* Return true if X is a valid address for machine mode MODE. If it is,
1494 fill in INFO appropriately. STRICT is true if we should only accept
1495 hard base registers. */
1498 mips_classify_address (struct mips_address_info *info, rtx x,
1499 enum machine_mode mode, int strict)
1501 switch (GET_CODE (x))
1505 info->type = ADDRESS_REG;
1507 info->offset = const0_rtx;
1508 return mips_valid_base_register_p (info->reg, mode, strict);
1511 info->type = ADDRESS_REG;
1512 info->reg = XEXP (x, 0);
1513 info->offset = XEXP (x, 1);
1514 return (mips_valid_base_register_p (info->reg, mode, strict)
1515 && const_arith_operand (info->offset, VOIDmode));
1518 info->type = ADDRESS_LO_SUM;
1519 info->reg = XEXP (x, 0);
1520 info->offset = XEXP (x, 1);
1521 return (mips_valid_base_register_p (info->reg, mode, strict)
1522 && mips_symbolic_constant_p (info->offset, &info->symbol_type)
1523 && mips_symbolic_address_p (info->symbol_type, mode)
1524 && mips_lo_relocs[info->symbol_type] != 0);
1527 /* Small-integer addresses don't occur very often, but they
1528 are legitimate if $0 is a valid base register. */
1529 info->type = ADDRESS_CONST_INT;
1530 return !TARGET_MIPS16 && SMALL_INT (x);
1535 info->type = ADDRESS_SYMBOLIC;
1536 return (mips_symbolic_constant_p (x, &info->symbol_type)
1537 && mips_symbolic_address_p (info->symbol_type, mode)
1538 && !mips_split_p[info->symbol_type]);
1545 /* Return true if X is a thread-local symbol. */
1548 mips_tls_operand_p (rtx x)
1550 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1553 /* Return true if X can not be forced into a constant pool. */
1556 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1558 return mips_tls_operand_p (*x);
1561 /* Return true if X can not be forced into a constant pool. */
1564 mips_cannot_force_const_mem (rtx x)
1570 /* As an optimization, reject constants that mips_legitimize_move
1573 Suppose we have a multi-instruction sequence that loads constant C
1574 into register R. If R does not get allocated a hard register, and
1575 R is used in an operand that allows both registers and memory
1576 references, reload will consider forcing C into memory and using
1577 one of the instruction's memory alternatives. Returning false
1578 here will force it to use an input reload instead. */
1579 if (GET_CODE (x) == CONST_INT)
1582 split_const (x, &base, &offset);
1583 if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
1587 if (TARGET_HAVE_TLS && for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
1593 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. MIPS16 uses per-function
1594 constant pools, but normal-mode code doesn't need to. */
1597 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1598 rtx x ATTRIBUTE_UNUSED)
1600 return !TARGET_MIPS16;
1603 /* Return the number of instructions needed to load a symbol of the
1604 given type into a register. If valid in an address, the same number
1605 of instructions are needed for loads and stores. Treat extended
1606 mips16 instructions as two instructions. */
1609 mips_symbol_insns (enum mips_symbol_type type)
1613 case SYMBOL_GENERAL:
1614 /* In mips16 code, general symbols must be fetched from the
1619 /* When using 64-bit symbols, we need 5 preparatory instructions,
1622 lui $at,%highest(symbol)
1623 daddiu $at,$at,%higher(symbol)
1625 daddiu $at,$at,%hi(symbol)
1628 The final address is then $at + %lo(symbol). With 32-bit
1629 symbols we just need a preparatory lui. */
1630 return (ABI_HAS_64BIT_SYMBOLS ? 6 : 2);
1632 case SYMBOL_SMALL_DATA:
1635 case SYMBOL_CONSTANT_POOL:
1636 /* This case is for mips16 only. Assume we'll need an
1637 extended instruction. */
1640 case SYMBOL_GOT_PAGE_OFST:
1641 case SYMBOL_GOT_DISP:
1642 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1643 the local/global classification is accurate. See override_options
1646 The worst cases are:
1648 (1) For local symbols when generating o32 or o64 code. The assembler
1654 ...and the final address will be $at + %lo(symbol).
1656 (2) For global symbols when -mxgot. The assembler will use:
1658 lui $at,%got_hi(symbol)
1661 ...and the final address will be $at + %got_lo(symbol). */
1664 case SYMBOL_GOTOFF_PAGE:
1665 case SYMBOL_GOTOFF_DISP:
1666 case SYMBOL_GOTOFF_CALL:
1667 case SYMBOL_GOTOFF_LOADGP:
1668 case SYMBOL_64_HIGH:
1674 case SYMBOL_GOTTPREL:
1676 /* Check whether the offset is a 16- or 32-bit value. */
1677 return mips_split_p[type] ? 2 : 1;
1680 /* We don't treat a bare TLS symbol as a constant. */
1686 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1689 mips_stack_address_p (rtx x, enum machine_mode mode)
1691 struct mips_address_info addr;
1693 return (mips_classify_address (&addr, x, mode, false)
1694 && addr.type == ADDRESS_REG
1695 && addr.reg == stack_pointer_rtx);
1698 /* Return true if a value at OFFSET bytes from BASE can be accessed
1699 using an unextended mips16 instruction. MODE is the mode of the
1702 Usually the offset in an unextended instruction is a 5-bit field.
1703 The offset is unsigned and shifted left once for HIs, twice
1704 for SIs, and so on. An exception is SImode accesses off the
1705 stack pointer, which have an 8-bit immediate field. */
1708 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
1711 && GET_CODE (offset) == CONST_INT
1712 && INTVAL (offset) >= 0
1713 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
1715 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1716 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
1717 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
1723 /* Return the number of instructions needed to load or store a value
1724 of mode MODE at X. Return 0 if X isn't valid for MODE.
1726 For mips16 code, count extended instructions as two instructions. */
1729 mips_address_insns (rtx x, enum machine_mode mode)
1731 struct mips_address_info addr;
1734 if (mode == BLKmode)
1735 /* BLKmode is used for single unaligned loads and stores. */
1738 /* Each word of a multi-word value will be accessed individually. */
1739 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1741 if (mips_classify_address (&addr, x, mode, false))
1746 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
1750 case ADDRESS_LO_SUM:
1751 return (TARGET_MIPS16 ? factor * 2 : factor);
1753 case ADDRESS_CONST_INT:
1756 case ADDRESS_SYMBOLIC:
1757 return factor * mips_symbol_insns (addr.symbol_type);
1763 /* Likewise for constant X. */
1766 mips_const_insns (rtx x)
1768 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
1769 enum mips_symbol_type symbol_type;
1772 switch (GET_CODE (x))
1776 || !mips_symbolic_constant_p (XEXP (x, 0), &symbol_type)
1777 || !mips_split_p[symbol_type])
1784 /* Unsigned 8-bit constants can be loaded using an unextended
1785 LI instruction. Unsigned 16-bit constants can be loaded
1786 using an extended LI. Negative constants must be loaded
1787 using LI and then negated. */
1788 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
1789 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
1790 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
1791 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
1794 return mips_build_integer (codes, INTVAL (x));
1798 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
1804 /* See if we can refer to X directly. */
1805 if (mips_symbolic_constant_p (x, &symbol_type))
1806 return mips_symbol_insns (symbol_type);
1808 /* Otherwise try splitting the constant into a base and offset.
1809 16-bit offsets can be added using an extra addiu. Larger offsets
1810 must be calculated separately and then added to the base. */
1811 split_const (x, &x, &offset);
1814 int n = mips_const_insns (x);
1817 if (SMALL_INT (offset))
1820 return n + 1 + mips_build_integer (codes, INTVAL (offset));
1827 return mips_symbol_insns (mips_classify_symbol (x));
1835 /* Return the number of instructions needed for memory reference X.
1836 Count extended mips16 instructions as two instructions. */
1839 mips_fetch_insns (rtx x)
1841 gcc_assert (MEM_P (x));
1842 return mips_address_insns (XEXP (x, 0), GET_MODE (x));
1846 /* Return the number of instructions needed for an integer division. */
1849 mips_idiv_insns (void)
1854 if (TARGET_CHECK_ZERO_DIV)
1856 if (GENERATE_DIVIDE_TRAPS)
1862 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
1867 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
1868 returns a nonzero value if X is a legitimate address for a memory
1869 operand of the indicated MODE. STRICT is nonzero if this function
1870 is called during reload. */
1873 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1875 struct mips_address_info addr;
1877 return mips_classify_address (&addr, x, mode, strict);
1881 /* Copy VALUE to a register and return that register. If new psuedos
1882 are allowed, copy it into a new register, otherwise use DEST. */
1885 mips_force_temporary (rtx dest, rtx value)
1887 if (!no_new_pseudos)
1888 return force_reg (Pmode, value);
1891 emit_move_insn (copy_rtx (dest), value);
1897 /* Return a LO_SUM expression for ADDR. TEMP is as for mips_force_temporary
1898 and is used to load the high part into a register. */
1901 mips_split_symbol (rtx temp, rtx addr)
1906 high = mips_force_temporary (temp, gen_rtx_HIGH (Pmode, copy_rtx (addr)));
1907 else if (no_new_pseudos)
1909 emit_insn (gen_load_const_gp (copy_rtx (temp)));
1913 high = mips16_gp_pseudo_reg ();
1914 return gen_rtx_LO_SUM (Pmode, high, addr);
1918 /* Return an UNSPEC address with underlying address ADDRESS and symbol
1919 type SYMBOL_TYPE. */
1922 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
1926 split_const (address, &base, &offset);
1927 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
1928 UNSPEC_ADDRESS_FIRST + symbol_type);
1929 if (offset != const0_rtx)
1930 base = gen_rtx_PLUS (Pmode, base, offset);
1931 return gen_rtx_CONST (Pmode, base);
1935 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
1936 high part to BASE and return the result. Just return BASE otherwise.
1937 TEMP is available as a temporary register if needed.
1939 The returned expression can be used as the first operand to a LO_SUM. */
1942 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
1943 enum mips_symbol_type symbol_type)
1945 if (mips_split_p[symbol_type])
1947 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
1948 addr = mips_force_temporary (temp, addr);
1949 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
1955 /* Return a legitimate address for REG + OFFSET. TEMP is as for
1956 mips_force_temporary; it is only needed when OFFSET is not a
1960 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
1962 if (!SMALL_OPERAND (offset))
1967 /* Load the full offset into a register so that we can use
1968 an unextended instruction for the address itself. */
1969 high = GEN_INT (offset);
1974 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
1975 high = GEN_INT (CONST_HIGH_PART (offset));
1976 offset = CONST_LOW_PART (offset);
1978 high = mips_force_temporary (temp, high);
1979 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
1981 return plus_constant (reg, offset);
1984 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
1985 referencing, and TYPE is the symbol type to use (either global
1986 dynamic or local dynamic). V0 is an RTX for the return value
1987 location. The entire insn sequence is returned. */
1989 static GTY(()) rtx mips_tls_symbol;
1992 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
1994 rtx insn, loc, tga, a0;
1996 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
1998 if (!mips_tls_symbol)
1999 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2001 loc = mips_unspec_address (sym, type);
2005 emit_insn (gen_rtx_SET (Pmode, a0,
2006 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2007 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
2008 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
2009 CONST_OR_PURE_CALL_P (insn) = 1;
2010 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
2011 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2012 insn = get_insns ();
2019 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2020 return value will be a valid address and move_operand (either a REG
2024 mips_legitimize_tls_address (rtx loc)
2026 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2027 enum tls_model model;
2029 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2030 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2032 model = SYMBOL_REF_TLS_MODEL (loc);
2033 /* Only TARGET_ABICALLS code can have more than one module; other
2034 code must be be static and should not use a GOT. All TLS models
2035 reduce to local exec in this situation. */
2036 if (!TARGET_ABICALLS)
2037 model = TLS_MODEL_LOCAL_EXEC;
2041 case TLS_MODEL_GLOBAL_DYNAMIC:
2042 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2043 dest = gen_reg_rtx (Pmode);
2044 emit_libcall_block (insn, dest, v0, loc);
2047 case TLS_MODEL_LOCAL_DYNAMIC:
2048 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2049 tmp1 = gen_reg_rtx (Pmode);
2051 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2052 share the LDM result with other LD model accesses. */
2053 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2055 emit_libcall_block (insn, tmp1, v0, eqv);
2057 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2058 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2059 mips_unspec_address (loc, SYMBOL_DTPREL));
2062 case TLS_MODEL_INITIAL_EXEC:
2063 tmp1 = gen_reg_rtx (Pmode);
2064 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2065 if (Pmode == DImode)
2067 emit_insn (gen_tls_get_tp_di (v1));
2068 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2072 emit_insn (gen_tls_get_tp_si (v1));
2073 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2075 dest = gen_reg_rtx (Pmode);
2076 emit_insn (gen_add3_insn (dest, tmp1, v1));
2079 case TLS_MODEL_LOCAL_EXEC:
2080 if (Pmode == DImode)
2081 emit_insn (gen_tls_get_tp_di (v1));
2083 emit_insn (gen_tls_get_tp_si (v1));
2085 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2086 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2087 mips_unspec_address (loc, SYMBOL_TPREL));
2097 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2098 be legitimized in a way that the generic machinery might not expect,
2099 put the new address in *XLOC and return true. MODE is the mode of
2100 the memory being accessed. */
2103 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2105 enum mips_symbol_type symbol_type;
2107 if (mips_tls_operand_p (*xloc))
2109 *xloc = mips_legitimize_tls_address (*xloc);
2113 /* See if the address can split into a high part and a LO_SUM. */
2114 if (mips_symbolic_constant_p (*xloc, &symbol_type)
2115 && mips_symbolic_address_p (symbol_type, mode)
2116 && mips_split_p[symbol_type])
2118 *xloc = mips_split_symbol (0, *xloc);
2122 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
2124 /* Handle REG + CONSTANT using mips_add_offset. */
2127 reg = XEXP (*xloc, 0);
2128 if (!mips_valid_base_register_p (reg, mode, 0))
2129 reg = copy_to_mode_reg (Pmode, reg);
2130 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
2138 /* Subroutine of mips_build_integer (with the same interface).
2139 Assume that the final action in the sequence should be a left shift. */
2142 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
2144 unsigned int i, shift;
2146 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
2147 since signed numbers are easier to load than unsigned ones. */
2149 while ((value & 1) == 0)
2150 value /= 2, shift++;
2152 i = mips_build_integer (codes, value);
2153 codes[i].code = ASHIFT;
2154 codes[i].value = shift;
2159 /* As for mips_build_shift, but assume that the final action will be
2160 an IOR or PLUS operation. */
2163 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
2165 unsigned HOST_WIDE_INT high;
2168 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
2169 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
2171 /* The constant is too complex to load with a simple lui/ori pair
2172 so our goal is to clear as many trailing zeros as possible.
2173 In this case, we know bit 16 is set and that the low 16 bits
2174 form a negative number. If we subtract that number from VALUE,
2175 we will clear at least the lowest 17 bits, maybe more. */
2176 i = mips_build_integer (codes, CONST_HIGH_PART (value));
2177 codes[i].code = PLUS;
2178 codes[i].value = CONST_LOW_PART (value);
2182 i = mips_build_integer (codes, high);
2183 codes[i].code = IOR;
2184 codes[i].value = value & 0xffff;
2190 /* Fill CODES with a sequence of rtl operations to load VALUE.
2191 Return the number of operations needed. */
2194 mips_build_integer (struct mips_integer_op *codes,
2195 unsigned HOST_WIDE_INT value)
2197 if (SMALL_OPERAND (value)
2198 || SMALL_OPERAND_UNSIGNED (value)
2199 || LUI_OPERAND (value))
2201 /* The value can be loaded with a single instruction. */
2202 codes[0].code = UNKNOWN;
2203 codes[0].value = value;
2206 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
2208 /* Either the constant is a simple LUI/ORI combination or its
2209 lowest bit is set. We don't want to shift in this case. */
2210 return mips_build_lower (codes, value);
2212 else if ((value & 0xffff) == 0)
2214 /* The constant will need at least three actions. The lowest
2215 16 bits are clear, so the final action will be a shift. */
2216 return mips_build_shift (codes, value);
2220 /* The final action could be a shift, add or inclusive OR.
2221 Rather than use a complex condition to select the best
2222 approach, try both mips_build_shift and mips_build_lower
2223 and pick the one that gives the shortest sequence.
2224 Note that this case is only used once per constant. */
2225 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
2226 unsigned int cost, alt_cost;
2228 cost = mips_build_shift (codes, value);
2229 alt_cost = mips_build_lower (alt_codes, value);
2230 if (alt_cost < cost)
2232 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
2240 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2243 mips_move_integer (rtx dest, rtx temp, unsigned HOST_WIDE_INT value)
2245 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2246 enum machine_mode mode;
2247 unsigned int i, cost;
2250 mode = GET_MODE (dest);
2251 cost = mips_build_integer (codes, value);
2253 /* Apply each binary operation to X. Invariant: X is a legitimate
2254 source operand for a SET pattern. */
2255 x = GEN_INT (codes[0].value);
2256 for (i = 1; i < cost; i++)
2260 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2264 x = force_reg (mode, x);
2265 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2268 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2272 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2273 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2277 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2281 /* Split moves of big integers into smaller pieces. */
2282 if (splittable_const_int_operand (src, mode))
2284 mips_move_integer (dest, dest, INTVAL (src));
2288 /* Split moves of symbolic constants into high/low pairs. */
2289 if (splittable_symbolic_operand (src, mode))
2291 emit_insn (gen_rtx_SET (VOIDmode, dest, mips_split_symbol (dest, src)));
2295 if (mips_tls_operand_p (src))
2297 emit_move_insn (dest, mips_legitimize_tls_address (src));
2301 /* If we have (const (plus symbol offset)), load the symbol first
2302 and then add in the offset. This is usually better than forcing
2303 the constant into memory, at least in non-mips16 code. */
2304 split_const (src, &base, &offset);
2306 && offset != const0_rtx
2307 && (!no_new_pseudos || SMALL_INT (offset)))
2309 base = mips_force_temporary (dest, base);
2310 emit_move_insn (dest, mips_add_offset (0, base, INTVAL (offset)));
2314 src = force_const_mem (mode, src);
2316 /* When using explicit relocs, constant pool references are sometimes
2317 not legitimate addresses. */
2318 if (!memory_operand (src, VOIDmode))
2319 src = replace_equiv_address (src, mips_split_symbol (dest, XEXP (src, 0)));
2320 emit_move_insn (dest, src);
2324 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2325 sequence that is valid. */
2328 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2330 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2332 emit_move_insn (dest, force_reg (mode, src));
2336 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2337 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2338 && REG_P (src) && MD_REG_P (REGNO (src))
2339 && REG_P (dest) && GP_REG_P (REGNO (dest)))
2341 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2342 if (GET_MODE_SIZE (mode) <= 4)
2343 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
2344 gen_rtx_REG (SImode, REGNO (src)),
2345 gen_rtx_REG (SImode, other_regno)));
2347 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
2348 gen_rtx_REG (DImode, REGNO (src)),
2349 gen_rtx_REG (DImode, other_regno)));
2353 /* We need to deal with constants that would be legitimate
2354 immediate_operands but not legitimate move_operands. */
2355 if (CONSTANT_P (src) && !move_operand (src, mode))
2357 mips_legitimize_const_move (mode, dest, src);
2358 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2364 /* We need a lot of little routines to check constant values on the
2365 mips16. These are used to figure out how long the instruction will
2366 be. It would be much better to do this using constraints, but
2367 there aren't nearly enough letters available. */
2370 m16_check_op (rtx op, int low, int high, int mask)
2372 return (GET_CODE (op) == CONST_INT
2373 && INTVAL (op) >= low
2374 && INTVAL (op) <= high
2375 && (INTVAL (op) & mask) == 0);
2379 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2381 return m16_check_op (op, 0x1, 0x8, 0);
2385 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2387 return m16_check_op (op, - 0x8, 0x7, 0);
2391 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2393 return m16_check_op (op, - 0x7, 0x8, 0);
2397 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2399 return m16_check_op (op, - 0x10, 0xf, 0);
2403 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2405 return m16_check_op (op, - 0xf, 0x10, 0);
2409 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2411 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2415 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2417 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2421 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2423 return m16_check_op (op, - 0x80, 0x7f, 0);
2427 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2429 return m16_check_op (op, - 0x7f, 0x80, 0);
2433 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2435 return m16_check_op (op, 0x0, 0xff, 0);
2439 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2441 return m16_check_op (op, - 0xff, 0x0, 0);
2445 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2447 return m16_check_op (op, - 0x1, 0xfe, 0);
2451 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2453 return m16_check_op (op, 0x0, 0xff << 2, 3);
2457 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2459 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2463 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2465 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2469 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2471 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2475 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
2477 enum machine_mode mode = GET_MODE (x);
2478 bool float_mode_p = FLOAT_MODE_P (mode);
2485 /* A number between 1 and 8 inclusive is efficient for a shift.
2486 Otherwise, we will need an extended instruction. */
2487 if ((outer_code) == ASHIFT || (outer_code) == ASHIFTRT
2488 || (outer_code) == LSHIFTRT)
2490 if (INTVAL (x) >= 1 && INTVAL (x) <= 8)
2493 *total = COSTS_N_INSNS (1);
2497 /* We can use cmpi for an xor with an unsigned 16-bit value. */
2498 if ((outer_code) == XOR
2499 && INTVAL (x) >= 0 && INTVAL (x) < 0x10000)
2505 /* We may be able to use slt or sltu for a comparison with a
2506 signed 16-bit value. (The boundary conditions aren't quite
2507 right, but this is just a heuristic anyhow.) */
2508 if (((outer_code) == LT || (outer_code) == LE
2509 || (outer_code) == GE || (outer_code) == GT
2510 || (outer_code) == LTU || (outer_code) == LEU
2511 || (outer_code) == GEU || (outer_code) == GTU)
2512 && INTVAL (x) >= -0x8000 && INTVAL (x) < 0x8000)
2518 /* Equality comparisons with 0 are cheap. */
2519 if (((outer_code) == EQ || (outer_code) == NE)
2526 /* Constants in the range 0...255 can be loaded with an unextended
2527 instruction. They are therefore as cheap as a register move.
2529 Given the choice between "li R1,0...255" and "move R1,R2"
2530 (where R2 is a known constant), it is usually better to use "li",
2531 since we do not want to unnecessarily extend the lifetime
2533 if (outer_code == SET
2535 && INTVAL (x) < 256)
2543 /* These can be used anywhere. */
2548 /* Otherwise fall through to the handling below because
2549 we'll need to construct the constant. */
2555 if (LEGITIMATE_CONSTANT_P (x))
2557 *total = COSTS_N_INSNS (1);
2562 /* The value will need to be fetched from the constant pool. */
2563 *total = CONSTANT_POOL_COST;
2569 /* If the address is legitimate, return the number of
2570 instructions it needs, otherwise use the default handling. */
2571 int n = mips_address_insns (XEXP (x, 0), GET_MODE (x));
2574 *total = COSTS_N_INSNS (n + 1);
2581 *total = COSTS_N_INSNS (6);
2585 *total = COSTS_N_INSNS ((mode == DImode && !TARGET_64BIT) ? 2 : 1);
2591 if (mode == DImode && !TARGET_64BIT)
2593 *total = COSTS_N_INSNS (2);
2601 if (mode == DImode && !TARGET_64BIT)
2603 *total = COSTS_N_INSNS ((GET_CODE (XEXP (x, 1)) == CONST_INT)
2611 *total = COSTS_N_INSNS (1);
2613 *total = COSTS_N_INSNS (4);
2617 *total = COSTS_N_INSNS (1);
2624 *total = mips_cost->fp_add;
2628 else if (mode == DImode && !TARGET_64BIT)
2630 *total = COSTS_N_INSNS (4);
2636 if (mode == DImode && !TARGET_64BIT)
2638 *total = COSTS_N_INSNS (4);
2645 *total = mips_cost->fp_mult_sf;
2647 else if (mode == DFmode)
2648 *total = mips_cost->fp_mult_df;
2650 else if (mode == SImode)
2651 *total = mips_cost->int_mult_si;
2654 *total = mips_cost->int_mult_di;
2663 *total = mips_cost->fp_div_sf;
2665 *total = mips_cost->fp_div_df;
2674 *total = mips_cost->int_div_di;
2676 *total = mips_cost->int_div_si;
2681 /* A sign extend from SImode to DImode in 64-bit mode is often
2682 zero instructions, because the result can often be used
2683 directly by another instruction; we'll call it one. */
2684 if (TARGET_64BIT && mode == DImode
2685 && GET_MODE (XEXP (x, 0)) == SImode)
2686 *total = COSTS_N_INSNS (1);
2688 *total = COSTS_N_INSNS (2);
2692 if (TARGET_64BIT && mode == DImode
2693 && GET_MODE (XEXP (x, 0)) == SImode)
2694 *total = COSTS_N_INSNS (2);
2696 *total = COSTS_N_INSNS (1);
2700 case UNSIGNED_FLOAT:
2703 case FLOAT_TRUNCATE:
2705 *total = mips_cost->fp_add;
2713 /* Provide the costs of an addressing mode that contains ADDR.
2714 If ADDR is not a valid address, its cost is irrelevant. */
2717 mips_address_cost (rtx addr)
2719 return mips_address_insns (addr, SImode);
2722 /* Return one word of double-word value OP, taking into account the fixed
2723 endianness of certain registers. HIGH_P is true to select the high part,
2724 false to select the low part. */
2727 mips_subword (rtx op, int high_p)
2730 enum machine_mode mode;
2732 mode = GET_MODE (op);
2733 if (mode == VOIDmode)
2736 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
2737 byte = UNITS_PER_WORD;
2743 if (FP_REG_P (REGNO (op)))
2744 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
2745 if (ACC_HI_REG_P (REGNO (op)))
2746 return gen_rtx_REG (word_mode, high_p ? REGNO (op) : REGNO (op) + 1);
2750 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
2752 return simplify_gen_subreg (word_mode, op, mode, byte);
2756 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
2759 mips_split_64bit_move_p (rtx dest, rtx src)
2764 /* FP->FP moves can be done in a single instruction. */
2765 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
2768 /* Check for floating-point loads and stores. They can be done using
2769 ldc1 and sdc1 on MIPS II and above. */
2772 if (FP_REG_RTX_P (dest) && MEM_P (src))
2774 if (FP_REG_RTX_P (src) && MEM_P (dest))
2781 /* Split a 64-bit move from SRC to DEST assuming that
2782 mips_split_64bit_move_p holds.
2784 Moves into and out of FPRs cause some difficulty here. Such moves
2785 will always be DFmode, since paired FPRs are not allowed to store
2786 DImode values. The most natural representation would be two separate
2787 32-bit moves, such as:
2789 (set (reg:SI $f0) (mem:SI ...))
2790 (set (reg:SI $f1) (mem:SI ...))
2792 However, the second insn is invalid because odd-numbered FPRs are
2793 not allowed to store independent values. Use the patterns load_df_low,
2794 load_df_high and store_df_high instead. */
2797 mips_split_64bit_move (rtx dest, rtx src)
2799 if (FP_REG_RTX_P (dest))
2801 /* Loading an FPR from memory or from GPRs. */
2804 dest = gen_lowpart (DFmode, dest);
2805 emit_insn (gen_load_df_low (dest, mips_subword (src, 0)));
2806 emit_insn (gen_mthc1 (dest, mips_subword (src, 1),
2811 emit_insn (gen_load_df_low (copy_rtx (dest),
2812 mips_subword (src, 0)));
2813 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
2817 else if (FP_REG_RTX_P (src))
2819 /* Storing an FPR into memory or GPRs. */
2822 src = gen_lowpart (DFmode, src);
2823 emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
2824 emit_insn (gen_mfhc1 (mips_subword (dest, 1), src));
2828 emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
2829 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
2834 /* The operation can be split into two normal moves. Decide in
2835 which order to do them. */
2838 low_dest = mips_subword (dest, 0);
2839 if (REG_P (low_dest)
2840 && reg_overlap_mentioned_p (low_dest, src))
2842 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2843 emit_move_insn (low_dest, mips_subword (src, 0));
2847 emit_move_insn (low_dest, mips_subword (src, 0));
2848 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2853 /* Return the appropriate instructions to move SRC into DEST. Assume
2854 that SRC is operand 1 and DEST is operand 0. */
2857 mips_output_move (rtx dest, rtx src)
2859 enum rtx_code dest_code, src_code;
2862 dest_code = GET_CODE (dest);
2863 src_code = GET_CODE (src);
2864 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
2866 if (dbl_p && mips_split_64bit_move_p (dest, src))
2869 if ((src_code == REG && GP_REG_P (REGNO (src)))
2870 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
2872 if (dest_code == REG)
2874 if (GP_REG_P (REGNO (dest)))
2875 return "move\t%0,%z1";
2877 if (MD_REG_P (REGNO (dest)))
2880 if (DSP_ACC_REG_P (REGNO (dest)))
2882 static char retval[] = "mt__\t%z1,%q0";
2883 retval[2] = reg_names[REGNO (dest)][4];
2884 retval[3] = reg_names[REGNO (dest)][5];
2888 if (FP_REG_P (REGNO (dest)))
2889 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
2891 if (ALL_COP_REG_P (REGNO (dest)))
2893 static char retval[] = "dmtc_\t%z1,%0";
2895 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2896 return (dbl_p ? retval : retval + 1);
2899 if (dest_code == MEM)
2900 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
2902 if (dest_code == REG && GP_REG_P (REGNO (dest)))
2904 if (src_code == REG)
2906 if (DSP_ACC_REG_P (REGNO (src)))
2908 static char retval[] = "mf__\t%0,%q1";
2909 retval[2] = reg_names[REGNO (src)][4];
2910 retval[3] = reg_names[REGNO (src)][5];
2914 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
2915 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
2917 if (FP_REG_P (REGNO (src)))
2918 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
2920 if (ALL_COP_REG_P (REGNO (src)))
2922 static char retval[] = "dmfc_\t%0,%1";
2924 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
2925 return (dbl_p ? retval : retval + 1);
2929 if (src_code == MEM)
2930 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
2932 if (src_code == CONST_INT)
2934 /* Don't use the X format, because that will give out of
2935 range numbers for 64-bit hosts and 32-bit targets. */
2937 return "li\t%0,%1\t\t\t# %X1";
2939 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
2942 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
2946 if (src_code == HIGH)
2947 return "lui\t%0,%h1";
2949 if (CONST_GP_P (src))
2950 return "move\t%0,%1";
2952 if (symbolic_operand (src, VOIDmode))
2953 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
2955 if (src_code == REG && FP_REG_P (REGNO (src)))
2957 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2959 if (GET_MODE (dest) == V2SFmode)
2960 return "mov.ps\t%0,%1";
2962 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
2965 if (dest_code == MEM)
2966 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
2968 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2970 if (src_code == MEM)
2971 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
2973 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
2975 static char retval[] = "l_c_\t%0,%1";
2977 retval[1] = (dbl_p ? 'd' : 'w');
2978 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2981 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
2983 static char retval[] = "s_c_\t%1,%0";
2985 retval[1] = (dbl_p ? 'd' : 'w');
2986 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
2992 /* Restore $gp from its save slot. Valid only when using o32 or
2996 mips_restore_gp (void)
3000 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
3002 address = mips_add_offset (pic_offset_table_rtx,
3003 frame_pointer_needed
3004 ? hard_frame_pointer_rtx
3005 : stack_pointer_rtx,
3006 current_function_outgoing_args_size);
3007 slot = gen_rtx_MEM (Pmode, address);
3009 emit_move_insn (pic_offset_table_rtx, slot);
3010 if (!TARGET_EXPLICIT_RELOCS)
3011 emit_insn (gen_blockage ());
3014 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
3017 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
3019 emit_insn (gen_rtx_SET (VOIDmode, target,
3020 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
3023 /* Return true if CMP1 is a suitable second operand for relational
3024 operator CODE. See also the *sCC patterns in mips.md. */
3027 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
3033 return reg_or_0_operand (cmp1, VOIDmode);
3037 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3041 return arith_operand (cmp1, VOIDmode);
3044 return sle_operand (cmp1, VOIDmode);
3047 return sleu_operand (cmp1, VOIDmode);
3054 /* Canonicalize LE or LEU comparisons into LT comparisons when
3055 possible to avoid extra instructions or inverting the
3059 mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
3060 enum machine_mode mode)
3062 HOST_WIDE_INT original, plus_one;
3064 if (GET_CODE (*cmp1) != CONST_INT)
3067 original = INTVAL (*cmp1);
3068 plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
3073 if (original < plus_one)
3076 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3085 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3098 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3099 result in TARGET. CMP0 and TARGET are register_operands that have
3100 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3101 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3104 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
3105 rtx target, rtx cmp0, rtx cmp1)
3107 /* First see if there is a MIPS instruction that can do this operation
3108 with CMP1 in its current form. If not, try to canonicalize the
3109 comparison to LT. If that fails, try doing the same for the
3110 inverse operation. If that also fails, force CMP1 into a register
3112 if (mips_relational_operand_ok_p (code, cmp1))
3113 mips_emit_binary (code, target, cmp0, cmp1);
3114 else if (mips_canonicalize_comparison (&code, &cmp1, GET_MODE (target)))
3115 mips_emit_binary (code, target, cmp0, cmp1);
3118 enum rtx_code inv_code = reverse_condition (code);
3119 if (!mips_relational_operand_ok_p (inv_code, cmp1))
3121 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
3122 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
3124 else if (invert_ptr == 0)
3126 rtx inv_target = gen_reg_rtx (GET_MODE (target));
3127 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3128 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3132 *invert_ptr = !*invert_ptr;
3133 mips_emit_binary (inv_code, target, cmp0, cmp1);
3138 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3139 The register will have the same mode as CMP0. */
3142 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3144 if (cmp1 == const0_rtx)
3147 if (uns_arith_operand (cmp1, VOIDmode))
3148 return expand_binop (GET_MODE (cmp0), xor_optab,
3149 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3151 return expand_binop (GET_MODE (cmp0), sub_optab,
3152 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3155 /* Convert *CODE into a code that can be used in a floating-point
3156 scc instruction (c.<cond>.<fmt>). Return true if the values of
3157 the condition code registers will be inverted, with 0 indicating
3158 that the condition holds. */
3161 mips_reverse_fp_cond_p (enum rtx_code *code)
3168 *code = reverse_condition_maybe_unordered (*code);
3176 /* Convert a comparison into something that can be used in a branch or
3177 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3178 being compared and *CODE is the code used to compare them.
3180 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3181 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3182 otherwise any standard branch condition can be used. The standard branch
3185 - EQ/NE between two registers.
3186 - any comparison between a register and zero. */
3189 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3191 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3193 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3195 *op0 = cmp_operands[0];
3196 *op1 = cmp_operands[1];
3198 else if (*code == EQ || *code == NE)
3202 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3207 *op0 = cmp_operands[0];
3208 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3213 /* The comparison needs a separate scc instruction. Store the
3214 result of the scc in *OP0 and compare it against zero. */
3215 bool invert = false;
3216 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3218 mips_emit_int_relational (*code, &invert, *op0,
3219 cmp_operands[0], cmp_operands[1]);
3220 *code = (invert ? EQ : NE);
3225 enum rtx_code cmp_code;
3227 /* Floating-point tests use a separate c.cond.fmt comparison to
3228 set a condition code register. The branch or conditional move
3229 will then compare that register against zero.
3231 Set CMP_CODE to the code of the comparison instruction and
3232 *CODE to the code that the branch or move should use. */
3234 *code = mips_reverse_fp_cond_p (&cmp_code) ? EQ : NE;
3236 ? gen_reg_rtx (CCmode)
3237 : gen_rtx_REG (CCmode, FPSW_REGNUM));
3239 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
3243 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
3244 Store the result in TARGET and return true if successful.
3246 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
3249 mips_emit_scc (enum rtx_code code, rtx target)
3251 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
3254 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
3255 if (code == EQ || code == NE)
3257 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3258 mips_emit_binary (code, target, zie, const0_rtx);
3261 mips_emit_int_relational (code, 0, target,
3262 cmp_operands[0], cmp_operands[1]);
3266 /* Emit the common code for doing conditional branches.
3267 operand[0] is the label to jump to.
3268 The comparison operands are saved away by cmp{si,di,sf,df}. */
3271 gen_conditional_branch (rtx *operands, enum rtx_code code)
3273 rtx op0, op1, condition;
3275 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
3276 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
3277 emit_jump_insn (gen_condjump (condition, operands[0]));
3282 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
3283 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
3286 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
3287 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
3292 reversed_p = mips_reverse_fp_cond_p (&cond);
3293 cmp_result = gen_reg_rtx (CCV2mode);
3294 emit_insn (gen_scc_ps (cmp_result,
3295 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
3297 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
3300 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
3304 /* Emit the common code for conditional moves. OPERANDS is the array
3305 of operands passed to the conditional move define_expand. */
3308 gen_conditional_move (rtx *operands)
3313 code = GET_CODE (operands[1]);
3314 mips_emit_compare (&code, &op0, &op1, true);
3315 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
3316 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3317 gen_rtx_fmt_ee (code,
3320 operands[2], operands[3])));
3323 /* Emit a conditional trap. OPERANDS is the array of operands passed to
3324 the conditional_trap expander. */
3327 mips_gen_conditional_trap (rtx *operands)
3330 enum rtx_code cmp_code = GET_CODE (operands[0]);
3331 enum machine_mode mode = GET_MODE (cmp_operands[0]);
3333 /* MIPS conditional trap machine instructions don't have GT or LE
3334 flavors, so we must invert the comparison and convert to LT and
3335 GE, respectively. */
3338 case GT: cmp_code = LT; break;
3339 case LE: cmp_code = GE; break;
3340 case GTU: cmp_code = LTU; break;
3341 case LEU: cmp_code = GEU; break;
3344 if (cmp_code == GET_CODE (operands[0]))
3346 op0 = cmp_operands[0];
3347 op1 = cmp_operands[1];
3351 op0 = cmp_operands[1];
3352 op1 = cmp_operands[0];
3354 op0 = force_reg (mode, op0);
3355 if (!arith_operand (op1, mode))
3356 op1 = force_reg (mode, op1);
3358 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
3359 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
3363 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
3366 mips_ok_for_lazy_binding_p (rtx x)
3368 return (TARGET_USE_GOT
3369 && GET_CODE (x) == SYMBOL_REF
3370 && !mips_symbol_binds_local_p (x));
3373 /* Load function address ADDR into register DEST. SIBCALL_P is true
3374 if the address is needed for a sibling call. */
3377 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
3379 /* If we're generating PIC, and this call is to a global function,
3380 try to allow its address to be resolved lazily. This isn't
3381 possible if TARGET_CALL_SAVED_GP since the value of $gp on entry
3382 to the stub would be our caller's gp, not ours. */
3383 if (TARGET_EXPLICIT_RELOCS
3384 && !(sibcall_p && TARGET_CALL_SAVED_GP)
3385 && mips_ok_for_lazy_binding_p (addr))
3387 rtx high, lo_sum_symbol;
3389 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
3390 addr, SYMBOL_GOTOFF_CALL);
3391 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
3392 if (Pmode == SImode)
3393 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
3395 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
3398 emit_move_insn (dest, addr);
3402 /* Expand a call or call_value instruction. RESULT is where the
3403 result will go (null for calls), ADDR is the address of the
3404 function, ARGS_SIZE is the size of the arguments and AUX is
3405 the value passed to us by mips_function_arg. SIBCALL_P is true
3406 if we are expanding a sibling call, false if we're expanding
3410 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
3412 rtx orig_addr, pattern, insn;
3415 if (!call_insn_operand (addr, VOIDmode))
3417 addr = gen_reg_rtx (Pmode);
3418 mips_load_call_address (addr, orig_addr, sibcall_p);
3422 && mips16_hard_float
3423 && build_mips16_call_stub (result, addr, args_size,
3424 aux == 0 ? 0 : (int) GET_MODE (aux)))
3428 pattern = (sibcall_p
3429 ? gen_sibcall_internal (addr, args_size)
3430 : gen_call_internal (addr, args_size));
3431 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
3435 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
3436 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
3439 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
3440 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
3443 pattern = (sibcall_p
3444 ? gen_sibcall_value_internal (result, addr, args_size)
3445 : gen_call_value_internal (result, addr, args_size));
3447 insn = emit_call_insn (pattern);
3449 /* Lazy-binding stubs require $gp to be valid on entry. */
3450 if (mips_ok_for_lazy_binding_p (orig_addr))
3451 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3455 /* We can handle any sibcall when TARGET_SIBCALLS is true. */
3458 mips_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
3459 tree exp ATTRIBUTE_UNUSED)
3461 return TARGET_SIBCALLS;
3464 /* Emit code to move general operand SRC into condition-code
3465 register DEST. SCRATCH is a scratch TFmode float register.
3472 where FP1 and FP2 are single-precision float registers
3473 taken from SCRATCH. */
3476 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
3480 /* Change the source to SFmode. */
3482 src = adjust_address (src, SFmode, 0);
3483 else if (REG_P (src) || GET_CODE (src) == SUBREG)
3484 src = gen_rtx_REG (SFmode, true_regnum (src));
3486 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
3487 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + FP_INC);
3489 emit_move_insn (copy_rtx (fp1), src);
3490 emit_move_insn (copy_rtx (fp2), CONST0_RTX (SFmode));
3491 emit_insn (gen_slt_sf (dest, fp2, fp1));
3494 /* Emit code to change the current function's return address to
3495 ADDRESS. SCRATCH is available as a scratch register, if needed.
3496 ADDRESS and SCRATCH are both word-mode GPRs. */
3499 mips_set_return_address (rtx address, rtx scratch)
3503 compute_frame_size (get_frame_size ());
3504 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
3505 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
3506 cfun->machine->frame.gp_sp_offset);
3508 emit_move_insn (gen_rtx_MEM (GET_MODE (address), slot_address), address);
3511 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
3512 Assume that the areas do not overlap. */
3515 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
3517 HOST_WIDE_INT offset, delta;
3518 unsigned HOST_WIDE_INT bits;
3520 enum machine_mode mode;
3523 /* Work out how many bits to move at a time. If both operands have
3524 half-word alignment, it is usually better to move in half words.
3525 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
3526 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
3527 Otherwise move word-sized chunks. */
3528 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
3529 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
3530 bits = BITS_PER_WORD / 2;
3532 bits = BITS_PER_WORD;
3534 mode = mode_for_size (bits, MODE_INT, 0);
3535 delta = bits / BITS_PER_UNIT;
3537 /* Allocate a buffer for the temporary registers. */
3538 regs = alloca (sizeof (rtx) * length / delta);
3540 /* Load as many BITS-sized chunks as possible. Use a normal load if
3541 the source has enough alignment, otherwise use left/right pairs. */
3542 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3544 regs[i] = gen_reg_rtx (mode);
3545 if (MEM_ALIGN (src) >= bits)
3546 emit_move_insn (regs[i], adjust_address (src, mode, offset));
3549 rtx part = adjust_address (src, BLKmode, offset);
3550 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
3555 /* Copy the chunks to the destination. */
3556 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3557 if (MEM_ALIGN (dest) >= bits)
3558 emit_move_insn (adjust_address (dest, mode, offset), regs[i]);
3561 rtx part = adjust_address (dest, BLKmode, offset);
3562 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
3566 /* Mop up any left-over bytes. */
3567 if (offset < length)
3569 src = adjust_address (src, BLKmode, offset);
3570 dest = adjust_address (dest, BLKmode, offset);
3571 move_by_pieces (dest, src, length - offset,
3572 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
3576 #define MAX_MOVE_REGS 4
3577 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
3580 /* Helper function for doing a loop-based block operation on memory
3581 reference MEM. Each iteration of the loop will operate on LENGTH
3584 Create a new base register for use within the loop and point it to
3585 the start of MEM. Create a new memory reference that uses this
3586 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
3589 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
3590 rtx *loop_reg, rtx *loop_mem)
3592 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
3594 /* Although the new mem does not refer to a known location,
3595 it does keep up to LENGTH bytes of alignment. */
3596 *loop_mem = change_address (mem, BLKmode, *loop_reg);
3597 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
3601 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
3602 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
3603 memory regions do not overlap. */
3606 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
3608 rtx label, src_reg, dest_reg, final_src;
3609 HOST_WIDE_INT leftover;
3611 leftover = length % MAX_MOVE_BYTES;
3614 /* Create registers and memory references for use within the loop. */
3615 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
3616 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
3618 /* Calculate the value that SRC_REG should have after the last iteration
3620 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
3623 /* Emit the start of the loop. */
3624 label = gen_label_rtx ();
3627 /* Emit the loop body. */
3628 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
3630 /* Move on to the next block. */
3631 emit_move_insn (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
3632 emit_move_insn (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
3634 /* Emit the loop condition. */
3635 if (Pmode == DImode)
3636 emit_insn (gen_cmpdi (src_reg, final_src));
3638 emit_insn (gen_cmpsi (src_reg, final_src));
3639 emit_jump_insn (gen_bne (label));
3641 /* Mop up any left-over bytes. */
3643 mips_block_move_straight (dest, src, leftover);
3646 /* Expand a movmemsi instruction. */
3649 mips_expand_block_move (rtx dest, rtx src, rtx length)
3651 if (GET_CODE (length) == CONST_INT)
3653 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
3655 mips_block_move_straight (dest, src, INTVAL (length));
3660 mips_block_move_loop (dest, src, INTVAL (length));
3667 /* Argument support functions. */
3669 /* Initialize CUMULATIVE_ARGS for a function. */
3672 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
3673 rtx libname ATTRIBUTE_UNUSED)
3675 static CUMULATIVE_ARGS zero_cum;
3676 tree param, next_param;
3679 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
3681 /* Determine if this function has variable arguments. This is
3682 indicated by the last argument being 'void_type_mode' if there
3683 are no variable arguments. The standard MIPS calling sequence
3684 passes all arguments in the general purpose registers in this case. */
3686 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
3687 param != 0; param = next_param)
3689 next_param = TREE_CHAIN (param);
3690 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
3691 cum->gp_reg_found = 1;
3696 /* Fill INFO with information about a single argument. CUM is the
3697 cumulative state for earlier arguments. MODE is the mode of this
3698 argument and TYPE is its type (if known). NAMED is true if this
3699 is a named (fixed) argument rather than a variable one. */
3702 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3703 tree type, int named, struct mips_arg_info *info)
3705 bool doubleword_aligned_p;
3706 unsigned int num_bytes, num_words, max_regs;
3708 /* Work out the size of the argument. */
3709 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
3710 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3712 /* Decide whether it should go in a floating-point register, assuming
3713 one is free. Later code checks for availability.
3715 The checks against UNITS_PER_FPVALUE handle the soft-float and
3716 single-float cases. */
3720 /* The EABI conventions have traditionally been defined in terms
3721 of TYPE_MODE, regardless of the actual type. */
3722 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
3723 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3724 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3729 /* Only leading floating-point scalars are passed in
3730 floating-point registers. We also handle vector floats the same
3731 say, which is OK because they are not covered by the standard ABI. */
3732 info->fpr_p = (!cum->gp_reg_found
3733 && cum->arg_number < 2
3734 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
3735 || VECTOR_FLOAT_TYPE_P (type))
3736 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3737 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3738 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3743 /* Scalar and complex floating-point types are passed in
3744 floating-point registers. */
3745 info->fpr_p = (named
3746 && (type == 0 || FLOAT_TYPE_P (type))
3747 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3748 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3749 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3750 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
3752 /* ??? According to the ABI documentation, the real and imaginary
3753 parts of complex floats should be passed in individual registers.
3754 The real and imaginary parts of stack arguments are supposed
3755 to be contiguous and there should be an extra word of padding
3758 This has two problems. First, it makes it impossible to use a
3759 single "void *" va_list type, since register and stack arguments
3760 are passed differently. (At the time of writing, MIPSpro cannot
3761 handle complex float varargs correctly.) Second, it's unclear
3762 what should happen when there is only one register free.
3764 For now, we assume that named complex floats should go into FPRs
3765 if there are two FPRs free, otherwise they should be passed in the
3766 same way as a struct containing two floats. */
3768 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3769 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
3771 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
3772 info->fpr_p = false;
3782 /* See whether the argument has doubleword alignment. */
3783 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
3785 /* Set REG_OFFSET to the register count we're interested in.
3786 The EABI allocates the floating-point registers separately,
3787 but the other ABIs allocate them like integer registers. */
3788 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
3792 /* Advance to an even register if the argument is doubleword-aligned. */
3793 if (doubleword_aligned_p)
3794 info->reg_offset += info->reg_offset & 1;
3796 /* Work out the offset of a stack argument. */
3797 info->stack_offset = cum->stack_words;
3798 if (doubleword_aligned_p)
3799 info->stack_offset += info->stack_offset & 1;
3801 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
3803 /* Partition the argument between registers and stack. */
3804 info->reg_words = MIN (num_words, max_regs);
3805 info->stack_words = num_words - info->reg_words;
3809 /* Implement FUNCTION_ARG_ADVANCE. */
3812 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3813 tree type, int named)
3815 struct mips_arg_info info;
3817 mips_arg_info (cum, mode, type, named, &info);
3820 cum->gp_reg_found = true;
3822 /* See the comment above the cumulative args structure in mips.h
3823 for an explanation of what this code does. It assumes the O32
3824 ABI, which passes at most 2 arguments in float registers. */
3825 if (cum->arg_number < 2 && info.fpr_p)
3826 cum->fp_code += (mode == SFmode ? 1 : 2) << ((cum->arg_number - 1) * 2);
3828 if (mips_abi != ABI_EABI || !info.fpr_p)
3829 cum->num_gprs = info.reg_offset + info.reg_words;
3830 else if (info.reg_words > 0)
3831 cum->num_fprs += FP_INC;
3833 if (info.stack_words > 0)
3834 cum->stack_words = info.stack_offset + info.stack_words;
3839 /* Implement FUNCTION_ARG. */
3842 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3843 tree type, int named)
3845 struct mips_arg_info info;
3847 /* We will be called with a mode of VOIDmode after the last argument
3848 has been seen. Whatever we return will be passed to the call
3849 insn. If we need a mips16 fp_code, return a REG with the code
3850 stored as the mode. */
3851 if (mode == VOIDmode)
3853 if (TARGET_MIPS16 && cum->fp_code != 0)
3854 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
3860 mips_arg_info (cum, mode, type, named, &info);
3862 /* Return straight away if the whole argument is passed on the stack. */
3863 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
3867 && TREE_CODE (type) == RECORD_TYPE
3869 && TYPE_SIZE_UNIT (type)
3870 && host_integerp (TYPE_SIZE_UNIT (type), 1)
3873 /* The Irix 6 n32/n64 ABIs say that if any 64-bit chunk of the
3874 structure contains a double in its entirety, then that 64-bit
3875 chunk is passed in a floating point register. */
3878 /* First check to see if there is any such field. */
3879 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3880 if (TREE_CODE (field) == FIELD_DECL
3881 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3882 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
3883 && host_integerp (bit_position (field), 0)
3884 && int_bit_position (field) % BITS_PER_WORD == 0)
3889 /* Now handle the special case by returning a PARALLEL
3890 indicating where each 64-bit chunk goes. INFO.REG_WORDS
3891 chunks are passed in registers. */
3893 HOST_WIDE_INT bitpos;
3896 /* assign_parms checks the mode of ENTRY_PARM, so we must
3897 use the actual mode here. */
3898 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
3901 field = TYPE_FIELDS (type);
3902 for (i = 0; i < info.reg_words; i++)
3906 for (; field; field = TREE_CHAIN (field))
3907 if (TREE_CODE (field) == FIELD_DECL
3908 && int_bit_position (field) >= bitpos)
3912 && int_bit_position (field) == bitpos
3913 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3914 && !TARGET_SOFT_FLOAT
3915 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
3916 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
3918 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
3921 = gen_rtx_EXPR_LIST (VOIDmode, reg,
3922 GEN_INT (bitpos / BITS_PER_UNIT));
3924 bitpos += BITS_PER_WORD;
3930 /* Handle the n32/n64 conventions for passing complex floating-point
3931 arguments in FPR pairs. The real part goes in the lower register
3932 and the imaginary part goes in the upper register. */
3935 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
3938 enum machine_mode inner;
3941 inner = GET_MODE_INNER (mode);
3942 reg = FP_ARG_FIRST + info.reg_offset;
3943 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
3945 /* Real part in registers, imaginary part on stack. */
3946 gcc_assert (info.stack_words == info.reg_words);
3947 return gen_rtx_REG (inner, reg);
3951 gcc_assert (info.stack_words == 0);
3952 real = gen_rtx_EXPR_LIST (VOIDmode,
3953 gen_rtx_REG (inner, reg),
3955 imag = gen_rtx_EXPR_LIST (VOIDmode,
3957 reg + info.reg_words / 2),
3958 GEN_INT (GET_MODE_SIZE (inner)));
3959 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
3964 return gen_rtx_REG (mode, GP_ARG_FIRST + info.reg_offset);
3965 else if (info.reg_offset == 1)
3966 /* This code handles the special o32 case in which the second word
3967 of the argument structure is passed in floating-point registers. */
3968 return gen_rtx_REG (mode, FP_ARG_FIRST + FP_INC);
3970 return gen_rtx_REG (mode, FP_ARG_FIRST + info.reg_offset);
3974 /* Implement TARGET_ARG_PARTIAL_BYTES. */
3977 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
3978 enum machine_mode mode, tree type, bool named)
3980 struct mips_arg_info info;
3982 mips_arg_info (cum, mode, type, named, &info);
3983 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
3987 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
3988 PARM_BOUNDARY bits of alignment, but will be given anything up
3989 to STACK_BOUNDARY bits if the type requires it. */
3992 function_arg_boundary (enum machine_mode mode, tree type)
3994 unsigned int alignment;
3996 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
3997 if (alignment < PARM_BOUNDARY)
3998 alignment = PARM_BOUNDARY;
3999 if (alignment > STACK_BOUNDARY)
4000 alignment = STACK_BOUNDARY;
4004 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4005 upward rather than downward. In other words, return true if the
4006 first byte of the stack slot has useful data, false if the last
4010 mips_pad_arg_upward (enum machine_mode mode, tree type)
4012 /* On little-endian targets, the first byte of every stack argument
4013 is passed in the first byte of the stack slot. */
4014 if (!BYTES_BIG_ENDIAN)
4017 /* Otherwise, integral types are padded downward: the last byte of a
4018 stack argument is passed in the last byte of the stack slot. */
4020 ? INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)
4021 : GET_MODE_CLASS (mode) == MODE_INT)
4024 /* Big-endian o64 pads floating-point arguments downward. */
4025 if (mips_abi == ABI_O64)
4026 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4029 /* Other types are padded upward for o32, o64, n32 and n64. */
4030 if (mips_abi != ABI_EABI)
4033 /* Arguments smaller than a stack slot are padded downward. */
4034 if (mode != BLKmode)
4035 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
4037 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
4041 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4042 if the least significant byte of the register has useful data. Return
4043 the opposite if the most significant byte does. */
4046 mips_pad_reg_upward (enum machine_mode mode, tree type)
4048 /* No shifting is required for floating-point arguments. */
4049 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4050 return !BYTES_BIG_ENDIAN;
4052 /* Otherwise, apply the same padding to register arguments as we do
4053 to stack arguments. */
4054 return mips_pad_arg_upward (mode, type);
4058 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4059 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4062 CUMULATIVE_ARGS local_cum;
4063 int gp_saved, fp_saved;
4065 /* The caller has advanced CUM up to, but not beyond, the last named
4066 argument. Advance a local copy of CUM past the last "real" named
4067 argument, to find out how many registers are left over. */
4070 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
4072 /* Found out how many registers we need to save. */
4073 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4074 fp_saved = (EABI_FLOAT_VARARGS_P
4075 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4084 ptr = plus_constant (virtual_incoming_args_rtx,
4085 REG_PARM_STACK_SPACE (cfun->decl)
4086 - gp_saved * UNITS_PER_WORD);
4087 mem = gen_rtx_MEM (BLKmode, ptr);
4088 set_mem_alias_set (mem, get_varargs_alias_set ());
4090 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4095 /* We can't use move_block_from_reg, because it will use
4097 enum machine_mode mode;
4100 /* Set OFF to the offset from virtual_incoming_args_rtx of
4101 the first float register. The FP save area lies below
4102 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4103 off = -gp_saved * UNITS_PER_WORD;
4104 off &= ~(UNITS_PER_FPVALUE - 1);
4105 off -= fp_saved * UNITS_PER_FPREG;
4107 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4109 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS; i += FP_INC)
4113 ptr = plus_constant (virtual_incoming_args_rtx, off);
4114 mem = gen_rtx_MEM (mode, ptr);
4115 set_mem_alias_set (mem, get_varargs_alias_set ());
4116 emit_move_insn (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4117 off += UNITS_PER_HWFPVALUE;
4121 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4122 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4123 + fp_saved * UNITS_PER_FPREG);
4126 /* Create the va_list data type.
4127 We keep 3 pointers, and two offsets.
4128 Two pointers are to the overflow area, which starts at the CFA.
4129 One of these is constant, for addressing into the GPR save area below it.
4130 The other is advanced up the stack through the overflow region.
4131 The third pointer is to the GPR save area. Since the FPR save area
4132 is just below it, we can address FPR slots off this pointer.
4133 We also keep two one-byte offsets, which are to be subtracted from the
4134 constant pointers to yield addresses in the GPR and FPR save areas.
4135 These are downcounted as float or non-float arguments are used,
4136 and when they get to zero, the argument must be obtained from the
4138 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4139 pointer is enough. It's started at the GPR save area, and is
4141 Note that the GPR save area is not constant size, due to optimization
4142 in the prologue. Hence, we can't use a design with two pointers
4143 and two offsets, although we could have designed this with two pointers
4144 and three offsets. */
4147 mips_build_builtin_va_list (void)
4149 if (EABI_FLOAT_VARARGS_P)
4151 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4154 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4156 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4158 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4160 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4162 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4163 unsigned_char_type_node);
4164 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4165 unsigned_char_type_node);
4166 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4167 warn on every user file. */
4168 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4169 array = build_array_type (unsigned_char_type_node,
4170 build_index_type (index));
4171 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4173 DECL_FIELD_CONTEXT (f_ovfl) = record;
4174 DECL_FIELD_CONTEXT (f_gtop) = record;
4175 DECL_FIELD_CONTEXT (f_ftop) = record;
4176 DECL_FIELD_CONTEXT (f_goff) = record;
4177 DECL_FIELD_CONTEXT (f_foff) = record;
4178 DECL_FIELD_CONTEXT (f_res) = record;
4180 TYPE_FIELDS (record) = f_ovfl;
4181 TREE_CHAIN (f_ovfl) = f_gtop;
4182 TREE_CHAIN (f_gtop) = f_ftop;
4183 TREE_CHAIN (f_ftop) = f_goff;
4184 TREE_CHAIN (f_goff) = f_foff;
4185 TREE_CHAIN (f_foff) = f_res;
4187 layout_type (record);
4190 else if (TARGET_IRIX && TARGET_IRIX6)
4191 /* On IRIX 6, this type is 'char *'. */
4192 return build_pointer_type (char_type_node);
4194 /* Otherwise, we use 'void *'. */
4195 return ptr_type_node;
4198 /* Implement va_start. */
4201 mips_va_start (tree valist, rtx nextarg)
4203 if (EABI_FLOAT_VARARGS_P)
4205 const CUMULATIVE_ARGS *cum;
4206 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4207 tree ovfl, gtop, ftop, goff, foff;
4209 int gpr_save_area_size;
4210 int fpr_save_area_size;
4213 cum = ¤t_function_args_info;
4215 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
4217 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
4219 f_ovfl = TYPE_FIELDS (va_list_type_node);
4220 f_gtop = TREE_CHAIN (f_ovfl);
4221 f_ftop = TREE_CHAIN (f_gtop);
4222 f_goff = TREE_CHAIN (f_ftop);
4223 f_foff = TREE_CHAIN (f_goff);
4225 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4227 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4229 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4231 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4233 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4236 /* Emit code to initialize OVFL, which points to the next varargs
4237 stack argument. CUM->STACK_WORDS gives the number of stack
4238 words used by named arguments. */
4239 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
4240 if (cum->stack_words > 0)
4241 t = build2 (PLUS_EXPR, TREE_TYPE (ovfl), t,
4242 build_int_cst (NULL_TREE,
4243 cum->stack_words * UNITS_PER_WORD));
4244 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4245 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4247 /* Emit code to initialize GTOP, the top of the GPR save area. */
4248 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
4249 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gtop), gtop, t);
4250 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4252 /* Emit code to initialize FTOP, the top of the FPR save area.
4253 This address is gpr_save_area_bytes below GTOP, rounded
4254 down to the next fp-aligned boundary. */
4255 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
4256 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
4257 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
4259 t = build2 (PLUS_EXPR, TREE_TYPE (ftop), t,
4260 build_int_cst (NULL_TREE, -fpr_offset));
4261 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ftop), ftop, t);
4262 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4264 /* Emit code to initialize GOFF, the offset from GTOP of the
4265 next GPR argument. */
4266 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (goff), goff,
4267 build_int_cst (NULL_TREE, gpr_save_area_size));
4268 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4270 /* Likewise emit code to initialize FOFF, the offset from FTOP
4271 of the next FPR argument. */
4272 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (foff), foff,
4273 build_int_cst (NULL_TREE, fpr_save_area_size));
4274 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4278 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
4279 std_expand_builtin_va_start (valist, nextarg);
4283 /* Implement va_arg. */
4286 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
4288 HOST_WIDE_INT size, rsize;
4292 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
4295 type = build_pointer_type (type);
4297 size = int_size_in_bytes (type);
4298 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
4300 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
4301 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4304 /* Not a simple merged stack. */
4306 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4307 tree ovfl, top, off, align;
4308 HOST_WIDE_INT osize;
4311 f_ovfl = TYPE_FIELDS (va_list_type_node);
4312 f_gtop = TREE_CHAIN (f_ovfl);
4313 f_ftop = TREE_CHAIN (f_gtop);
4314 f_goff = TREE_CHAIN (f_ftop);
4315 f_foff = TREE_CHAIN (f_goff);
4317 /* We maintain separate pointers and offsets for floating-point
4318 and integer arguments, but we need similar code in both cases.
4321 TOP be the top of the register save area;
4322 OFF be the offset from TOP of the next register;
4323 ADDR_RTX be the address of the argument;
4324 RSIZE be the number of bytes used to store the argument
4325 when it's in the register save area;
4326 OSIZE be the number of bytes used to store it when it's
4327 in the stack overflow area; and
4328 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
4330 The code we want is:
4332 1: off &= -rsize; // round down
4335 4: addr_rtx = top - off;
4340 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
4341 10: addr_rtx = ovfl + PADDING;
4345 [1] and [9] can sometimes be optimized away. */
4347 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4350 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
4351 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
4353 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4355 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4358 /* When floating-point registers are saved to the stack,
4359 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
4360 of the float's precision. */
4361 rsize = UNITS_PER_HWFPVALUE;
4363 /* Overflow arguments are padded to UNITS_PER_WORD bytes
4364 (= PARM_BOUNDARY bits). This can be different from RSIZE
4367 (1) On 32-bit targets when TYPE is a structure such as:
4369 struct s { float f; };
4371 Such structures are passed in paired FPRs, so RSIZE
4372 will be 8 bytes. However, the structure only takes
4373 up 4 bytes of memory, so OSIZE will only be 4.
4375 (2) In combinations such as -mgp64 -msingle-float
4376 -fshort-double. Doubles passed in registers
4377 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
4378 but those passed on the stack take up
4379 UNITS_PER_WORD bytes. */
4380 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
4384 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4386 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4388 if (rsize > UNITS_PER_WORD)
4390 /* [1] Emit code for: off &= -rsize. */
4391 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
4392 build_int_cst (NULL_TREE, -rsize));
4393 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (off), off, t);
4394 gimplify_and_add (t, pre_p);
4399 /* [2] Emit code to branch if off == 0. */
4400 t = build2 (NE_EXPR, boolean_type_node, off,
4401 build_int_cst (TREE_TYPE (off), 0));
4402 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
4404 /* [5] Emit code for: off -= rsize. We do this as a form of
4405 post-increment not available to C. Also widen for the
4406 coming pointer arithmetic. */
4407 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
4408 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
4409 t = fold_convert (sizetype, t);
4410 t = fold_convert (TREE_TYPE (top), t);
4412 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
4413 the argument has RSIZE - SIZE bytes of leading padding. */
4414 t = build2 (MINUS_EXPR, TREE_TYPE (top), top, t);
4415 if (BYTES_BIG_ENDIAN && rsize > size)
4417 u = fold_convert (TREE_TYPE (t), build_int_cst (NULL_TREE,
4419 t = build2 (PLUS_EXPR, TREE_TYPE (t), t, u);
4421 COND_EXPR_THEN (addr) = t;
4423 if (osize > UNITS_PER_WORD)
4425 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
4426 u = fold_convert (TREE_TYPE (ovfl),
4427 build_int_cst (NULL_TREE, osize - 1));
4428 t = build2 (PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
4429 u = fold_convert (TREE_TYPE (ovfl),
4430 build_int_cst (NULL_TREE, -osize));
4431 t = build2 (BIT_AND_EXPR, TREE_TYPE (ovfl), t, u);
4432 align = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4437 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
4438 post-increment ovfl by osize. On big-endian machines,
4439 the argument has OSIZE - SIZE bytes of leading padding. */
4440 u = fold_convert (TREE_TYPE (ovfl),
4441 build_int_cst (NULL_TREE, osize));
4442 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
4443 if (BYTES_BIG_ENDIAN && osize > size)
4445 u = fold_convert (TREE_TYPE (t),
4446 build_int_cst (NULL_TREE, osize - size));
4447 t = build2 (PLUS_EXPR, TREE_TYPE (t), t, u);
4450 /* String [9] and [10,11] together. */
4452 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
4453 COND_EXPR_ELSE (addr) = t;
4455 addr = fold_convert (build_pointer_type (type), addr);
4456 addr = build_va_arg_indirect_ref (addr);
4460 addr = build_va_arg_indirect_ref (addr);
4465 /* Return true if it is possible to use left/right accesses for a
4466 bitfield of WIDTH bits starting BITPOS bits into *OP. When
4467 returning true, update *OP, *LEFT and *RIGHT as follows:
4469 *OP is a BLKmode reference to the whole field.
4471 *LEFT is a QImode reference to the first byte if big endian or
4472 the last byte if little endian. This address can be used in the
4473 left-side instructions (lwl, swl, ldl, sdl).
4475 *RIGHT is a QImode reference to the opposite end of the field and
4476 can be used in the patterning right-side instruction. */
4479 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
4480 rtx *left, rtx *right)
4484 /* Check that the operand really is a MEM. Not all the extv and
4485 extzv predicates are checked. */
4489 /* Check that the size is valid. */
4490 if (width != 32 && (!TARGET_64BIT || width != 64))
4493 /* We can only access byte-aligned values. Since we are always passed
4494 a reference to the first byte of the field, it is not necessary to
4495 do anything with BITPOS after this check. */
4496 if (bitpos % BITS_PER_UNIT != 0)
4499 /* Reject aligned bitfields: we want to use a normal load or store
4500 instead of a left/right pair. */
4501 if (MEM_ALIGN (*op) >= width)
4504 /* Adjust *OP to refer to the whole field. This also has the effect
4505 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
4506 *op = adjust_address (*op, BLKmode, 0);
4507 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
4509 /* Get references to both ends of the field. We deliberately don't
4510 use the original QImode *OP for FIRST since the new BLKmode one
4511 might have a simpler address. */
4512 first = adjust_address (*op, QImode, 0);
4513 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
4515 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
4516 be the upper word and RIGHT the lower word. */
4517 if (TARGET_BIG_ENDIAN)
4518 *left = first, *right = last;
4520 *left = last, *right = first;
4526 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
4527 Return true on success. We only handle cases where zero_extract is
4528 equivalent to sign_extract. */
4531 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
4533 rtx left, right, temp;
4535 /* If TARGET_64BIT, the destination of a 32-bit load will be a
4536 paradoxical word_mode subreg. This is the only case in which
4537 we allow the destination to be larger than the source. */
4538 if (GET_CODE (dest) == SUBREG
4539 && GET_MODE (dest) == DImode
4540 && SUBREG_BYTE (dest) == 0
4541 && GET_MODE (SUBREG_REG (dest)) == SImode)
4542 dest = SUBREG_REG (dest);
4544 /* After the above adjustment, the destination must be the same
4545 width as the source. */
4546 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
4549 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
4552 temp = gen_reg_rtx (GET_MODE (dest));
4553 if (GET_MODE (dest) == DImode)
4555 emit_insn (gen_mov_ldl (temp, src, left));
4556 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
4560 emit_insn (gen_mov_lwl (temp, src, left));
4561 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
4567 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
4571 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
4574 enum machine_mode mode;
4576 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
4579 mode = mode_for_size (width, MODE_INT, 0);
4580 src = gen_lowpart (mode, src);
4584 emit_insn (gen_mov_sdl (dest, src, left));
4585 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
4589 emit_insn (gen_mov_swl (dest, src, left));
4590 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
4595 /* Return true if X is a MEM with the same size as MODE. */
4598 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
4605 size = MEM_SIZE (x);
4606 return size && INTVAL (size) == GET_MODE_SIZE (mode);
4609 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
4610 source of an "ext" instruction or the destination of an "ins"
4611 instruction. OP must be a register operand and the following
4612 conditions must hold:
4614 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
4615 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4616 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4618 Also reject lengths equal to a word as they are better handled
4619 by the move patterns. */
4622 mips_use_ins_ext_p (rtx op, rtx size, rtx position)
4624 HOST_WIDE_INT len, pos;
4626 if (!ISA_HAS_EXT_INS
4627 || !register_operand (op, VOIDmode)
4628 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
4631 len = INTVAL (size);
4632 pos = INTVAL (position);
4634 if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
4635 || pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
4641 /* Set up globals to generate code for the ISA or processor
4642 described by INFO. */
4645 mips_set_architecture (const struct mips_cpu_info *info)
4649 mips_arch_info = info;
4650 mips_arch = info->cpu;
4651 mips_isa = info->isa;
4656 /* Likewise for tuning. */
4659 mips_set_tune (const struct mips_cpu_info *info)
4663 mips_tune_info = info;
4664 mips_tune = info->cpu;
4668 /* Implement TARGET_HANDLE_OPTION. */
4671 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
4676 if (strcmp (arg, "32") == 0)
4678 else if (strcmp (arg, "o64") == 0)
4680 else if (strcmp (arg, "n32") == 0)
4682 else if (strcmp (arg, "64") == 0)
4684 else if (strcmp (arg, "eabi") == 0)
4685 mips_abi = ABI_EABI;
4692 return mips_parse_cpu (arg) != 0;
4695 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
4696 return mips_isa_info != 0;
4698 case OPT_mno_flush_func:
4699 mips_cache_flush_func = NULL;
4707 /* Set up the threshold for data to go into the small data area, instead
4708 of the normal data area, and detect any conflicts in the switches. */
4711 override_options (void)
4713 int i, start, regno;
4714 enum machine_mode mode;
4716 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
4718 /* The following code determines the architecture and register size.
4719 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
4720 The GAS and GCC code should be kept in sync as much as possible. */
4722 if (mips_arch_string != 0)
4723 mips_set_architecture (mips_parse_cpu (mips_arch_string));
4725 if (mips_isa_info != 0)
4727 if (mips_arch_info == 0)
4728 mips_set_architecture (mips_isa_info);
4729 else if (mips_arch_info->isa != mips_isa_info->isa)
4730 error ("-%s conflicts with the other architecture options, "
4731 "which specify a %s processor",
4732 mips_isa_info->name,
4733 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
4736 if (mips_arch_info == 0)
4738 #ifdef MIPS_CPU_STRING_DEFAULT
4739 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
4741 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
4745 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
4746 error ("-march=%s is not compatible with the selected ABI",
4747 mips_arch_info->name);
4749 /* Optimize for mips_arch, unless -mtune selects a different processor. */
4750 if (mips_tune_string != 0)
4751 mips_set_tune (mips_parse_cpu (mips_tune_string));
4753 if (mips_tune_info == 0)
4754 mips_set_tune (mips_arch_info);
4756 /* Set cost structure for the processor. */
4758 mips_cost = &mips_rtx_cost_optimize_size;
4760 mips_cost = &mips_rtx_cost_data[mips_tune];
4762 if ((target_flags_explicit & MASK_64BIT) != 0)
4764 /* The user specified the size of the integer registers. Make sure
4765 it agrees with the ABI and ISA. */
4766 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
4767 error ("-mgp64 used with a 32-bit processor");
4768 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
4769 error ("-mgp32 used with a 64-bit ABI");
4770 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
4771 error ("-mgp64 used with a 32-bit ABI");
4775 /* Infer the integer register size from the ABI and processor.
4776 Restrict ourselves to 32-bit registers if that's all the
4777 processor has, or if the ABI cannot handle 64-bit registers. */
4778 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
4779 target_flags &= ~MASK_64BIT;
4781 target_flags |= MASK_64BIT;
4784 if ((target_flags_explicit & MASK_FLOAT64) != 0)
4786 /* Really, -mfp32 and -mfp64 are ornamental options. There's
4787 only one right answer here. */
4788 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
4789 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
4790 else if (!TARGET_64BIT && TARGET_FLOAT64
4791 && !(ISA_HAS_MXHC1 && mips_abi == ABI_32))
4792 error ("-mgp32 and -mfp64 can only be combined if the target"
4793 " supports the mfhc1 and mthc1 instructions");
4794 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
4795 error ("unsupported combination: %s", "-mfp64 -msingle-float");
4799 /* -msingle-float selects 32-bit float registers. Otherwise the
4800 float registers should be the same size as the integer ones. */
4801 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
4802 target_flags |= MASK_FLOAT64;
4804 target_flags &= ~MASK_FLOAT64;
4807 /* End of code shared with GAS. */
4809 if ((target_flags_explicit & MASK_LONG64) == 0)
4811 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
4812 target_flags |= MASK_LONG64;
4814 target_flags &= ~MASK_LONG64;
4817 if (MIPS_MARCH_CONTROLS_SOFT_FLOAT
4818 && (target_flags_explicit & MASK_SOFT_FLOAT) == 0)
4820 /* For some configurations, it is useful to have -march control
4821 the default setting of MASK_SOFT_FLOAT. */
4822 switch ((int) mips_arch)
4824 case PROCESSOR_R4100:
4825 case PROCESSOR_R4111:
4826 case PROCESSOR_R4120:
4827 case PROCESSOR_R4130:
4828 target_flags |= MASK_SOFT_FLOAT;
4832 target_flags &= ~MASK_SOFT_FLOAT;
4838 flag_pcc_struct_return = 0;
4840 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
4842 /* If neither -mbranch-likely nor -mno-branch-likely was given
4843 on the command line, set MASK_BRANCHLIKELY based on the target
4846 By default, we enable use of Branch Likely instructions on
4847 all architectures which support them with the following
4848 exceptions: when creating MIPS32 or MIPS64 code, and when
4849 tuning for architectures where their use tends to hurt
4852 The MIPS32 and MIPS64 architecture specifications say "Software
4853 is strongly encouraged to avoid use of Branch Likely
4854 instructions, as they will be removed from a future revision
4855 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
4856 issue those instructions unless instructed to do so by
4858 if (ISA_HAS_BRANCHLIKELY
4859 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
4860 && !(TUNE_MIPS5500 || TUNE_SB1))
4861 target_flags |= MASK_BRANCHLIKELY;
4863 target_flags &= ~MASK_BRANCHLIKELY;
4865 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
4866 warning (0, "generation of Branch Likely instructions enabled, but not supported by architecture");
4868 /* The effect of -mabicalls isn't defined for the EABI. */
4869 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
4871 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
4872 target_flags &= ~MASK_ABICALLS;
4875 if (TARGET_ABICALLS)
4877 /* We need to set flag_pic for executables as well as DSOs
4878 because we may reference symbols that are not defined in
4879 the final executable. (MIPS does not use things like
4880 copy relocs, for example.)
4882 Also, there is a body of code that uses __PIC__ to distinguish
4883 between -mabicalls and -mno-abicalls code. */
4885 if (mips_section_threshold > 0)
4886 warning (0, "%<-G%> is incompatible with %<-mabicalls%>");
4889 /* mips_split_addresses is a half-way house between explicit
4890 relocations and the traditional assembler macros. It can
4891 split absolute 32-bit symbolic constants into a high/lo_sum
4892 pair but uses macros for other sorts of access.
4894 Like explicit relocation support for REL targets, it relies
4895 on GNU extensions in the assembler and the linker.
4897 Although this code should work for -O0, it has traditionally
4898 been treated as an optimization. */
4899 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
4900 && optimize && !flag_pic
4901 && !ABI_HAS_64BIT_SYMBOLS)
4902 mips_split_addresses = 1;
4904 mips_split_addresses = 0;
4906 /* -mvr4130-align is a "speed over size" optimization: it usually produces
4907 faster code, but at the expense of more nops. Enable it at -O3 and
4909 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
4910 target_flags |= MASK_VR4130_ALIGN;
4912 /* When compiling for the mips16, we cannot use floating point. We
4913 record the original hard float value in mips16_hard_float. */
4916 if (TARGET_SOFT_FLOAT)
4917 mips16_hard_float = 0;
4919 mips16_hard_float = 1;
4920 target_flags |= MASK_SOFT_FLOAT;
4922 /* Don't run the scheduler before reload, since it tends to
4923 increase register pressure. */
4924 flag_schedule_insns = 0;
4926 /* Don't do hot/cold partitioning. The constant layout code expects
4927 the whole function to be in a single section. */
4928 flag_reorder_blocks_and_partition = 0;
4930 /* Silently disable -mexplicit-relocs since it doesn't apply
4931 to mips16 code. Even so, it would overly pedantic to warn
4932 about "-mips16 -mexplicit-relocs", especially given that
4933 we use a %gprel() operator. */
4934 target_flags &= ~MASK_EXPLICIT_RELOCS;
4937 /* When using explicit relocs, we call dbr_schedule from within
4939 if (TARGET_EXPLICIT_RELOCS)
4941 mips_flag_delayed_branch = flag_delayed_branch;
4942 flag_delayed_branch = 0;
4945 #ifdef MIPS_TFMODE_FORMAT
4946 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
4949 /* Make sure that the user didn't turn off paired single support when
4950 MIPS-3D support is requested. */
4951 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
4952 && !TARGET_PAIRED_SINGLE_FLOAT)
4953 error ("-mips3d requires -mpaired-single");
4955 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
4957 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
4959 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
4960 and TARGET_HARD_FLOAT are both true. */
4961 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
4962 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
4964 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
4966 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
4967 error ("-mips3d/-mpaired-single must be used with -mips64");
4969 /* If TARGET_DSPR2, enable MASK_DSP. */
4971 target_flags |= MASK_DSP;
4973 if (TARGET_MIPS16 && TARGET_DSP)
4974 error ("-mips16 and -mdsp cannot be used together");
4976 mips_print_operand_punct['?'] = 1;
4977 mips_print_operand_punct['#'] = 1;
4978 mips_print_operand_punct['/'] = 1;
4979 mips_print_operand_punct['&'] = 1;
4980 mips_print_operand_punct['!'] = 1;
4981 mips_print_operand_punct['*'] = 1;
4982 mips_print_operand_punct['@'] = 1;
4983 mips_print_operand_punct['.'] = 1;
4984 mips_print_operand_punct['('] = 1;
4985 mips_print_operand_punct[')'] = 1;
4986 mips_print_operand_punct['['] = 1;
4987 mips_print_operand_punct[']'] = 1;
4988 mips_print_operand_punct['<'] = 1;
4989 mips_print_operand_punct['>'] = 1;
4990 mips_print_operand_punct['{'] = 1;
4991 mips_print_operand_punct['}'] = 1;
4992 mips_print_operand_punct['^'] = 1;
4993 mips_print_operand_punct['$'] = 1;
4994 mips_print_operand_punct['+'] = 1;
4995 mips_print_operand_punct['~'] = 1;
4997 /* Set up array to map GCC register number to debug register number.
4998 Ignore the special purpose register numbers. */
5000 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5001 mips_dbx_regno[i] = -1;
5003 start = GP_DBX_FIRST - GP_REG_FIRST;
5004 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
5005 mips_dbx_regno[i] = i + start;
5007 start = FP_DBX_FIRST - FP_REG_FIRST;
5008 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
5009 mips_dbx_regno[i] = i + start;
5011 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
5012 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
5014 /* Set up array giving whether a given register can hold a given mode. */
5016 for (mode = VOIDmode;
5017 mode != MAX_MACHINE_MODE;
5018 mode = (enum machine_mode) ((int)mode + 1))
5020 register int size = GET_MODE_SIZE (mode);
5021 register enum mode_class class = GET_MODE_CLASS (mode);
5023 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5027 if (mode == CCV2mode)
5030 && (regno - ST_REG_FIRST) % 2 == 0);
5032 else if (mode == CCV4mode)
5035 && (regno - ST_REG_FIRST) % 4 == 0);
5037 else if (mode == CCmode)
5040 temp = (regno == FPSW_REGNUM);
5042 temp = (ST_REG_P (regno) || GP_REG_P (regno)
5043 || FP_REG_P (regno));
5046 else if (GP_REG_P (regno))
5047 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
5049 else if (FP_REG_P (regno))
5050 temp = ((regno % FP_INC) == 0)
5051 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
5052 || class == MODE_VECTOR_FLOAT)
5053 && size <= UNITS_PER_FPVALUE)
5054 /* Allow integer modes that fit into a single
5055 register. We need to put integers into FPRs
5056 when using instructions like cvt and trunc.
5057 We can't allow sizes smaller than a word,
5058 the FPU has no appropriate load/store
5059 instructions for those. */
5060 || (class == MODE_INT
5061 && size >= MIN_UNITS_PER_WORD
5062 && size <= UNITS_PER_FPREG)
5063 /* Allow TFmode for CCmode reloads. */
5064 || (ISA_HAS_8CC && mode == TFmode));
5066 else if (ACC_REG_P (regno))
5067 temp = (INTEGRAL_MODE_P (mode)
5068 && (size <= UNITS_PER_WORD
5069 || (ACC_HI_REG_P (regno)
5070 && size == 2 * UNITS_PER_WORD)));
5072 else if (ALL_COP_REG_P (regno))
5073 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
5077 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
5081 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
5082 initialized yet, so we can't use that here. */
5083 gpr_mode = TARGET_64BIT ? DImode : SImode;
5085 /* Provide default values for align_* for 64-bit targets. */
5086 if (TARGET_64BIT && !TARGET_MIPS16)
5088 if (align_loops == 0)
5090 if (align_jumps == 0)
5092 if (align_functions == 0)
5093 align_functions = 8;
5096 /* Function to allocate machine-dependent function status. */
5097 init_machine_status = &mips_init_machine_status;
5099 if (ABI_HAS_64BIT_SYMBOLS)
5101 if (TARGET_EXPLICIT_RELOCS)
5103 mips_split_p[SYMBOL_64_HIGH] = true;
5104 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
5105 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
5107 mips_split_p[SYMBOL_64_MID] = true;
5108 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
5109 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
5111 mips_split_p[SYMBOL_64_LOW] = true;
5112 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
5113 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
5115 mips_split_p[SYMBOL_GENERAL] = true;
5116 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
5121 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses)
5123 mips_split_p[SYMBOL_GENERAL] = true;
5124 mips_hi_relocs[SYMBOL_GENERAL] = "%hi(";
5125 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
5131 /* The high part is provided by a pseudo copy of $gp. */
5132 mips_split_p[SYMBOL_SMALL_DATA] = true;
5133 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gprel(";
5136 if (TARGET_EXPLICIT_RELOCS)
5138 /* Small data constants are kept whole until after reload,
5139 then lowered by mips_rewrite_small_data. */
5140 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gp_rel(";
5142 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
5145 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
5146 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
5150 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
5151 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
5156 /* The HIGH and LO_SUM are matched by special .md patterns. */
5157 mips_split_p[SYMBOL_GOT_DISP] = true;
5159 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
5160 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
5161 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
5163 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
5164 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
5165 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
5170 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
5172 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
5173 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
5179 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
5180 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
5181 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
5184 /* Thread-local relocation operators. */
5185 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
5186 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
5187 mips_split_p[SYMBOL_DTPREL] = 1;
5188 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
5189 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
5190 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
5191 mips_split_p[SYMBOL_TPREL] = 1;
5192 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
5193 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
5195 /* We don't have a thread pointer access instruction on MIPS16, or
5196 appropriate TLS relocations. */
5198 targetm.have_tls = false;
5200 /* Default to working around R4000 errata only if the processor
5201 was selected explicitly. */
5202 if ((target_flags_explicit & MASK_FIX_R4000) == 0
5203 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
5204 target_flags |= MASK_FIX_R4000;
5206 /* Default to working around R4400 errata only if the processor
5207 was selected explicitly. */
5208 if ((target_flags_explicit & MASK_FIX_R4400) == 0
5209 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
5210 target_flags |= MASK_FIX_R4400;
5213 /* Implement CONDITIONAL_REGISTER_USAGE. */
5216 mips_conditional_register_usage (void)
5222 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
5223 fixed_regs[regno] = call_used_regs[regno] = 1;
5225 if (!TARGET_HARD_FLOAT)
5229 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
5230 fixed_regs[regno] = call_used_regs[regno] = 1;
5231 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5232 fixed_regs[regno] = call_used_regs[regno] = 1;
5234 else if (! ISA_HAS_8CC)
5238 /* We only have a single condition code register. We
5239 implement this by hiding all the condition code registers,
5240 and generating RTL that refers directly to ST_REG_FIRST. */
5241 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5242 fixed_regs[regno] = call_used_regs[regno] = 1;
5244 /* In mips16 mode, we permit the $t temporary registers to be used
5245 for reload. We prohibit the unused $s registers, since they
5246 are caller saved, and saving them via a mips16 register would
5247 probably waste more time than just reloading the value. */
5250 fixed_regs[18] = call_used_regs[18] = 1;
5251 fixed_regs[19] = call_used_regs[19] = 1;
5252 fixed_regs[20] = call_used_regs[20] = 1;
5253 fixed_regs[21] = call_used_regs[21] = 1;
5254 fixed_regs[22] = call_used_regs[22] = 1;
5255 fixed_regs[23] = call_used_regs[23] = 1;
5256 fixed_regs[26] = call_used_regs[26] = 1;
5257 fixed_regs[27] = call_used_regs[27] = 1;
5258 fixed_regs[30] = call_used_regs[30] = 1;
5260 /* fp20-23 are now caller saved. */
5261 if (mips_abi == ABI_64)
5264 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
5265 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5267 /* Odd registers from fp21 to fp31 are now caller saved. */
5268 if (mips_abi == ABI_N32)
5271 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
5272 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5276 /* Allocate a chunk of memory for per-function machine-dependent data. */
5277 static struct machine_function *
5278 mips_init_machine_status (void)
5280 return ((struct machine_function *)
5281 ggc_alloc_cleared (sizeof (struct machine_function)));
5284 /* On the mips16, we want to allocate $24 (T_REG) before other
5285 registers for instructions for which it is possible. This helps
5286 avoid shuffling registers around in order to set up for an xor,
5287 encouraging the compiler to use a cmp instead. */
5290 mips_order_regs_for_local_alloc (void)
5294 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5295 reg_alloc_order[i] = i;
5299 /* It really doesn't matter where we put register 0, since it is
5300 a fixed register anyhow. */
5301 reg_alloc_order[0] = 24;
5302 reg_alloc_order[24] = 0;
5307 /* The MIPS debug format wants all automatic variables and arguments
5308 to be in terms of the virtual frame pointer (stack pointer before
5309 any adjustment in the function), while the MIPS 3.0 linker wants
5310 the frame pointer to be the stack pointer after the initial
5311 adjustment. So, we do the adjustment here. The arg pointer (which
5312 is eliminated) points to the virtual frame pointer, while the frame
5313 pointer (which may be eliminated) points to the stack pointer after
5314 the initial adjustments. */
5317 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
5319 rtx offset2 = const0_rtx;
5320 rtx reg = eliminate_constant_term (addr, &offset2);
5323 offset = INTVAL (offset2);
5325 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
5326 || reg == hard_frame_pointer_rtx)
5328 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
5329 ? compute_frame_size (get_frame_size ())
5330 : cfun->machine->frame.total_size;
5332 /* MIPS16 frame is smaller */
5333 if (frame_pointer_needed && TARGET_MIPS16)
5334 frame_size -= cfun->machine->frame.args_size;
5336 offset = offset - frame_size;
5339 /* sdbout_parms does not want this to crash for unrecognized cases. */
5341 else if (reg != arg_pointer_rtx)
5342 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
5349 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
5351 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
5352 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
5353 'h' OP is HIGH, prints %hi(X),
5354 'd' output integer constant in decimal,
5355 'z' if the operand is 0, use $0 instead of normal operand.
5356 'D' print second part of double-word register or memory operand.
5357 'L' print low-order register of double-word register operand.
5358 'M' print high-order register of double-word register operand.
5359 'C' print part of opcode for a branch condition.
5360 'F' print part of opcode for a floating-point branch condition.
5361 'N' print part of opcode for a branch condition, inverted.
5362 'W' print part of opcode for a floating-point branch condition, inverted.
5363 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
5364 'z' for (eq:?I ...), 'n' for (ne:?I ...).
5365 't' like 'T', but with the EQ/NE cases reversed
5366 'Y' for a CONST_INT X, print mips_fp_conditions[X]
5367 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
5368 'R' print the reloc associated with LO_SUM
5369 'q' print DSP accumulator registers
5371 The punctuation characters are:
5373 '(' Turn on .set noreorder
5374 ')' Turn on .set reorder
5375 '[' Turn on .set noat
5377 '<' Turn on .set nomacro
5378 '>' Turn on .set macro
5379 '{' Turn on .set volatile (not GAS)
5380 '}' Turn on .set novolatile (not GAS)
5381 '&' Turn on .set noreorder if filling delay slots
5382 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
5383 '!' Turn on .set nomacro if filling delay slots
5384 '#' Print nop if in a .set noreorder section.
5385 '/' Like '#', but does nothing within a delayed branch sequence
5386 '?' Print 'l' if we are to use a branch likely instead of normal branch.
5387 '@' Print the name of the assembler temporary register (at or $1).
5388 '.' Print the name of the register with a hard-wired zero (zero or $0).
5389 '^' Print the name of the pic call-through register (t9 or $25).
5390 '$' Print the name of the stack pointer register (sp or $29).
5391 '+' Print the name of the gp register (usually gp or $28).
5392 '~' Output a branch alignment to LABEL_ALIGN(NULL). */
5395 print_operand (FILE *file, rtx op, int letter)
5397 register enum rtx_code code;
5399 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
5404 if (mips_branch_likely)
5409 fputs (reg_names [GP_REG_FIRST + 1], file);
5413 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
5417 fputs (reg_names [GP_REG_FIRST + 0], file);
5421 fputs (reg_names[STACK_POINTER_REGNUM], file);
5425 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
5429 if (final_sequence != 0 && set_noreorder++ == 0)
5430 fputs (".set\tnoreorder\n\t", file);
5434 if (final_sequence != 0)
5436 if (set_noreorder++ == 0)
5437 fputs (".set\tnoreorder\n\t", file);
5439 if (set_nomacro++ == 0)
5440 fputs (".set\tnomacro\n\t", file);
5445 if (final_sequence != 0 && set_nomacro++ == 0)
5446 fputs ("\n\t.set\tnomacro", file);
5450 if (set_noreorder != 0)
5451 fputs ("\n\tnop", file);
5455 /* Print an extra newline so that the delayed insn is separated
5456 from the following ones. This looks neater and is consistent
5457 with non-nop delayed sequences. */
5458 if (set_noreorder != 0 && final_sequence == 0)
5459 fputs ("\n\tnop\n", file);
5463 if (set_noreorder++ == 0)
5464 fputs (".set\tnoreorder\n\t", file);
5468 if (set_noreorder == 0)
5469 error ("internal error: %%) found without a %%( in assembler pattern");
5471 else if (--set_noreorder == 0)
5472 fputs ("\n\t.set\treorder", file);
5477 if (set_noat++ == 0)
5478 fputs (".set\tnoat\n\t", file);
5483 error ("internal error: %%] found without a %%[ in assembler pattern");
5484 else if (--set_noat == 0)
5485 fputs ("\n\t.set\tat", file);
5490 if (set_nomacro++ == 0)
5491 fputs (".set\tnomacro\n\t", file);
5495 if (set_nomacro == 0)
5496 error ("internal error: %%> found without a %%< in assembler pattern");
5497 else if (--set_nomacro == 0)
5498 fputs ("\n\t.set\tmacro", file);
5503 if (set_volatile++ == 0)
5504 fputs ("#.set\tvolatile\n\t", file);
5508 if (set_volatile == 0)
5509 error ("internal error: %%} found without a %%{ in assembler pattern");
5510 else if (--set_volatile == 0)
5511 fputs ("\n\t#.set\tnovolatile", file);
5517 if (align_labels_log > 0)
5518 ASM_OUTPUT_ALIGN (file, align_labels_log);
5523 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
5532 error ("PRINT_OPERAND null pointer");
5536 code = GET_CODE (op);
5541 case EQ: fputs ("eq", file); break;
5542 case NE: fputs ("ne", file); break;
5543 case GT: fputs ("gt", file); break;
5544 case GE: fputs ("ge", file); break;
5545 case LT: fputs ("lt", file); break;
5546 case LE: fputs ("le", file); break;
5547 case GTU: fputs ("gtu", file); break;
5548 case GEU: fputs ("geu", file); break;
5549 case LTU: fputs ("ltu", file); break;
5550 case LEU: fputs ("leu", file); break;
5552 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
5555 else if (letter == 'N')
5558 case EQ: fputs ("ne", file); break;
5559 case NE: fputs ("eq", file); break;
5560 case GT: fputs ("le", file); break;
5561 case GE: fputs ("lt", file); break;
5562 case LT: fputs ("ge", file); break;
5563 case LE: fputs ("gt", file); break;
5564 case GTU: fputs ("leu", file); break;
5565 case GEU: fputs ("ltu", file); break;
5566 case LTU: fputs ("geu", file); break;
5567 case LEU: fputs ("gtu", file); break;
5569 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
5572 else if (letter == 'F')
5575 case EQ: fputs ("c1f", file); break;
5576 case NE: fputs ("c1t", file); break;
5578 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
5581 else if (letter == 'W')
5584 case EQ: fputs ("c1t", file); break;
5585 case NE: fputs ("c1f", file); break;
5587 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
5590 else if (letter == 'h')
5592 if (GET_CODE (op) == HIGH)
5595 print_operand_reloc (file, op, mips_hi_relocs);
5598 else if (letter == 'R')
5599 print_operand_reloc (file, op, mips_lo_relocs);
5601 else if (letter == 'Y')
5603 if (GET_CODE (op) == CONST_INT
5604 && ((unsigned HOST_WIDE_INT) INTVAL (op)
5605 < ARRAY_SIZE (mips_fp_conditions)))
5606 fputs (mips_fp_conditions[INTVAL (op)], file);
5608 output_operand_lossage ("invalid %%Y value");
5611 else if (letter == 'Z')
5615 print_operand (file, op, 0);
5620 else if (letter == 'q')
5625 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
5627 regnum = REGNO (op);
5628 if (MD_REG_P (regnum))
5629 fprintf (file, "$ac0");
5630 else if (DSP_ACC_REG_P (regnum))
5631 fprintf (file, "$ac%c", reg_names[regnum][3]);
5633 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
5636 else if (code == REG || code == SUBREG)
5638 register int regnum;
5641 regnum = REGNO (op);
5643 regnum = true_regnum (op);
5645 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
5646 || (letter == 'L' && WORDS_BIG_ENDIAN)
5650 fprintf (file, "%s", reg_names[regnum]);
5653 else if (code == MEM)
5656 output_address (plus_constant (XEXP (op, 0), 4));
5658 output_address (XEXP (op, 0));
5661 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
5662 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
5664 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
5665 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
5667 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
5668 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
5670 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
5671 fputs (reg_names[GP_REG_FIRST], file);
5673 else if (letter == 'd' || letter == 'x' || letter == 'X')
5674 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
5676 else if (letter == 'T' || letter == 't')
5678 int truth = (code == NE) == (letter == 'T');
5679 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
5682 else if (CONST_GP_P (op))
5683 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
5686 output_addr_const (file, op);
5690 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM.
5691 RELOCS is the array of relocations to use. */
5694 print_operand_reloc (FILE *file, rtx op, const char **relocs)
5696 enum mips_symbol_type symbol_type;
5700 if (!mips_symbolic_constant_p (op, &symbol_type) || relocs[symbol_type] == 0)
5701 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
5703 /* If OP uses an UNSPEC address, we want to print the inner symbol. */
5704 split_const (op, &base, &offset);
5705 if (UNSPEC_ADDRESS_P (base))
5706 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
5708 fputs (relocs[symbol_type], file);
5709 output_addr_const (file, op);
5710 for (p = relocs[symbol_type]; *p != 0; p++)
5715 /* Output address operand X to FILE. */
5718 print_operand_address (FILE *file, rtx x)
5720 struct mips_address_info addr;
5722 if (mips_classify_address (&addr, x, word_mode, true))
5726 print_operand (file, addr.offset, 0);
5727 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
5730 case ADDRESS_LO_SUM:
5731 print_operand (file, addr.offset, 'R');
5732 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
5735 case ADDRESS_CONST_INT:
5736 output_addr_const (file, x);
5737 fprintf (file, "(%s)", reg_names[0]);
5740 case ADDRESS_SYMBOLIC:
5741 output_addr_const (file, x);
5747 /* When using assembler macros, keep track of all of small-data externs
5748 so that mips_file_end can emit the appropriate declarations for them.
5750 In most cases it would be safe (though pointless) to emit .externs
5751 for other symbols too. One exception is when an object is within
5752 the -G limit but declared by the user to be in a section other
5753 than .sbss or .sdata. */
5756 mips_output_external (FILE *file, tree decl, const char *name)
5758 default_elf_asm_output_external (file, decl, name);
5760 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
5761 set in order to avoid putting out names that are never really
5763 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
5765 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
5767 fputs ("\t.extern\t", file);
5768 assemble_name (file, name);
5769 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
5770 int_size_in_bytes (TREE_TYPE (decl)));
5772 else if (TARGET_IRIX
5773 && mips_abi == ABI_32
5774 && TREE_CODE (decl) == FUNCTION_DECL)
5776 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
5777 `.global name .text' directive for every used but
5778 undefined function. If we don't, the linker may perform
5779 an optimization (skipping over the insns that set $gp)
5780 when it is unsafe. */
5781 fputs ("\t.globl ", file);
5782 assemble_name (file, name);
5783 fputs (" .text\n", file);
5788 /* Emit a new filename to a stream. If we are smuggling stabs, try to
5789 put out a MIPS ECOFF file and a stab. */
5792 mips_output_filename (FILE *stream, const char *name)
5795 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
5797 if (write_symbols == DWARF2_DEBUG)
5799 else if (mips_output_filename_first_time)
5801 mips_output_filename_first_time = 0;
5802 num_source_filenames += 1;
5803 current_function_file = name;
5804 fprintf (stream, "\t.file\t%d ", num_source_filenames);
5805 output_quoted_string (stream, name);
5806 putc ('\n', stream);
5809 /* If we are emitting stabs, let dbxout.c handle this (except for
5810 the mips_output_filename_first_time case). */
5811 else if (write_symbols == DBX_DEBUG)
5814 else if (name != current_function_file
5815 && strcmp (name, current_function_file) != 0)
5817 num_source_filenames += 1;
5818 current_function_file = name;
5819 fprintf (stream, "\t.file\t%d ", num_source_filenames);
5820 output_quoted_string (stream, name);
5821 putc ('\n', stream);
5825 /* Output an ASCII string, in a space-saving way. PREFIX is the string
5826 that should be written before the opening quote, such as "\t.ascii\t"
5827 for real string data or "\t# " for a comment. */
5830 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
5835 register const unsigned char *string =
5836 (const unsigned char *)string_param;
5838 fprintf (stream, "%s\"", prefix);
5839 for (i = 0; i < len; i++)
5841 register int c = string[i];
5845 if (c == '\\' || c == '\"')
5847 putc ('\\', stream);
5855 fprintf (stream, "\\%03o", c);
5859 if (cur_pos > 72 && i+1 < len)
5862 fprintf (stream, "\"\n%s\"", prefix);
5865 fprintf (stream, "\"\n");
5868 /* Implement TARGET_ASM_FILE_START. */
5871 mips_file_start (void)
5873 default_file_start ();
5877 /* Generate a special section to describe the ABI switches used to
5878 produce the resultant binary. This used to be done by the assembler
5879 setting bits in the ELF header's flags field, but we have run out of
5880 bits. GDB needs this information in order to be able to correctly
5881 debug these binaries. See the function mips_gdbarch_init() in
5882 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
5883 causes unnecessary IRIX 6 ld warnings. */
5884 const char * abi_string = NULL;
5888 case ABI_32: abi_string = "abi32"; break;
5889 case ABI_N32: abi_string = "abiN32"; break;
5890 case ABI_64: abi_string = "abi64"; break;
5891 case ABI_O64: abi_string = "abiO64"; break;
5892 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
5896 /* Note - we use fprintf directly rather than calling switch_to_section
5897 because in this way we can avoid creating an allocated section. We
5898 do not want this section to take up any space in the running
5900 fprintf (asm_out_file, "\t.section .mdebug.%s\n", abi_string);
5902 /* There is no ELF header flag to distinguish long32 forms of the
5903 EABI from long64 forms. Emit a special section to help tools
5904 such as GDB. Do the same for o64, which is sometimes used with
5906 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
5907 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n",
5908 TARGET_LONG64 ? 64 : 32);
5910 /* Restore the default section. */
5911 fprintf (asm_out_file, "\t.previous\n");
5914 /* Generate the pseudo ops that System V.4 wants. */
5915 if (TARGET_ABICALLS)
5916 fprintf (asm_out_file, "\t.abicalls\n");
5919 fprintf (asm_out_file, "\t.set\tmips16\n");
5921 if (flag_verbose_asm)
5922 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
5924 mips_section_threshold, mips_arch_info->name, mips_isa);
5927 #ifdef BSS_SECTION_ASM_OP
5928 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
5929 in the use of sbss. */
5932 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
5933 unsigned HOST_WIDE_INT size, int align)
5935 extern tree last_assemble_variable_decl;
5937 if (mips_in_small_data_p (decl))
5938 switch_to_section (get_named_section (NULL, ".sbss", 0));
5940 switch_to_section (bss_section);
5941 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
5942 last_assemble_variable_decl = decl;
5943 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
5944 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
5948 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
5949 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
5952 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
5953 unsigned HOST_WIDE_INT size,
5956 /* If the target wants uninitialized const declarations in
5957 .rdata then don't put them in .comm. */
5958 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
5959 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
5960 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
5962 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
5963 targetm.asm_out.globalize_label (stream, name);
5965 switch_to_section (readonly_data_section);
5966 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
5967 mips_declare_object (stream, name, "",
5968 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
5972 mips_declare_common_object (stream, name, "\n\t.comm\t",
5976 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
5977 NAME is the name of the object and ALIGN is the required alignment
5978 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
5979 alignment argument. */
5982 mips_declare_common_object (FILE *stream, const char *name,
5983 const char *init_string,
5984 unsigned HOST_WIDE_INT size,
5985 unsigned int align, bool takes_alignment_p)
5987 if (!takes_alignment_p)
5989 size += (align / BITS_PER_UNIT) - 1;
5990 size -= size % (align / BITS_PER_UNIT);
5991 mips_declare_object (stream, name, init_string,
5992 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
5995 mips_declare_object (stream, name, init_string,
5996 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
5997 size, align / BITS_PER_UNIT);
6000 /* Emit either a label, .comm, or .lcomm directive. When using assembler
6001 macros, mark the symbol as written so that mips_file_end won't emit an
6002 .extern for it. STREAM is the output file, NAME is the name of the
6003 symbol, INIT_STRING is the string that should be written before the
6004 symbol and FINAL_STRING is the string that should be written after it.
6005 FINAL_STRING is a printf() format that consumes the remaining arguments. */
6008 mips_declare_object (FILE *stream, const char *name, const char *init_string,
6009 const char *final_string, ...)
6013 fputs (init_string, stream);
6014 assemble_name (stream, name);
6015 va_start (ap, final_string);
6016 vfprintf (stream, final_string, ap);
6019 if (!TARGET_EXPLICIT_RELOCS)
6021 tree name_tree = get_identifier (name);
6022 TREE_ASM_WRITTEN (name_tree) = 1;
6026 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
6027 extern int size_directive_output;
6029 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
6030 definitions except that it uses mips_declare_object() to emit the label. */
6033 mips_declare_object_name (FILE *stream, const char *name,
6034 tree decl ATTRIBUTE_UNUSED)
6036 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
6037 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
6040 size_directive_output = 0;
6041 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
6045 size_directive_output = 1;
6046 size = int_size_in_bytes (TREE_TYPE (decl));
6047 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6050 mips_declare_object (stream, name, "", ":\n");
6053 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
6056 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
6060 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
6061 if (!flag_inhibit_size_directive
6062 && DECL_SIZE (decl) != 0
6063 && !at_end && top_level
6064 && DECL_INITIAL (decl) == error_mark_node
6065 && !size_directive_output)
6069 size_directive_output = 1;
6070 size = int_size_in_bytes (TREE_TYPE (decl));
6071 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6076 /* Return true if X is a small data address that can be rewritten
6080 mips_rewrite_small_data_p (rtx x)
6082 enum mips_symbol_type symbol_type;
6084 return (TARGET_EXPLICIT_RELOCS
6085 && mips_symbolic_constant_p (x, &symbol_type)
6086 && symbol_type == SYMBOL_SMALL_DATA);
6090 /* A for_each_rtx callback for mips_small_data_pattern_p. */
6093 mips_small_data_pattern_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
6095 if (GET_CODE (*loc) == LO_SUM)
6098 return mips_rewrite_small_data_p (*loc);
6101 /* Return true if OP refers to small data symbols directly, not through
6105 mips_small_data_pattern_p (rtx op)
6107 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
6110 /* A for_each_rtx callback, used by mips_rewrite_small_data. */
6113 mips_rewrite_small_data_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
6115 if (mips_rewrite_small_data_p (*loc))
6116 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
6118 if (GET_CODE (*loc) == LO_SUM)
6124 /* If possible, rewrite OP so that it refers to small data using
6125 explicit relocations. */
6128 mips_rewrite_small_data (rtx op)
6130 op = copy_insn (op);
6131 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
6135 /* Return true if the current function has an insn that implicitly
6139 mips_function_has_gp_insn (void)
6141 /* Don't bother rechecking if we found one last time. */
6142 if (!cfun->machine->has_gp_insn_p)
6146 push_topmost_sequence ();
6147 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6149 && GET_CODE (PATTERN (insn)) != USE
6150 && GET_CODE (PATTERN (insn)) != CLOBBER
6151 && (get_attr_got (insn) != GOT_UNSET
6152 || small_data_pattern (PATTERN (insn), VOIDmode)))
6154 pop_topmost_sequence ();
6156 cfun->machine->has_gp_insn_p = (insn != 0);
6158 return cfun->machine->has_gp_insn_p;
6162 /* Return the register that should be used as the global pointer
6163 within this function. Return 0 if the function doesn't need
6164 a global pointer. */
6167 mips_global_pointer (void)
6171 /* $gp is always available unless we're using a GOT. */
6172 if (!TARGET_USE_GOT)
6173 return GLOBAL_POINTER_REGNUM;
6175 /* We must always provide $gp when it is used implicitly. */
6176 if (!TARGET_EXPLICIT_RELOCS)
6177 return GLOBAL_POINTER_REGNUM;
6179 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
6181 if (current_function_profile)
6182 return GLOBAL_POINTER_REGNUM;
6184 /* If the function has a nonlocal goto, $gp must hold the correct
6185 global pointer for the target function. */
6186 if (current_function_has_nonlocal_goto)
6187 return GLOBAL_POINTER_REGNUM;
6189 /* If the gp is never referenced, there's no need to initialize it.
6190 Note that reload can sometimes introduce constant pool references
6191 into a function that otherwise didn't need them. For example,
6192 suppose we have an instruction like:
6194 (set (reg:DF R1) (float:DF (reg:SI R2)))
6196 If R2 turns out to be constant such as 1, the instruction may have a
6197 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
6198 using this constant if R2 doesn't get allocated to a register.
6200 In cases like these, reload will have added the constant to the pool
6201 but no instruction will yet refer to it. */
6202 if (!regs_ever_live[GLOBAL_POINTER_REGNUM]
6203 && !current_function_uses_const_pool
6204 && !mips_function_has_gp_insn ())
6207 /* We need a global pointer, but perhaps we can use a call-clobbered
6208 register instead of $gp. */
6209 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
6210 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6211 if (!regs_ever_live[regno]
6212 && call_used_regs[regno]
6213 && !fixed_regs[regno]
6214 && regno != PIC_FUNCTION_ADDR_REGNUM)
6217 return GLOBAL_POINTER_REGNUM;
6221 /* Return true if the current function must save REGNO. */
6224 mips_save_reg_p (unsigned int regno)
6226 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
6227 if we have not chosen a call-clobbered substitute. */
6228 if (regno == GLOBAL_POINTER_REGNUM)
6229 return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno;
6231 /* Check call-saved registers. */
6232 if (regs_ever_live[regno] && !call_used_regs[regno])
6235 /* We need to save the old frame pointer before setting up a new one. */
6236 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
6239 /* We need to save the incoming return address if it is ever clobbered
6240 within the function. */
6241 if (regno == GP_REG_FIRST + 31 && regs_ever_live[regno])
6248 return_type = DECL_RESULT (current_function_decl);
6250 /* $18 is a special case in mips16 code. It may be used to call
6251 a function which returns a floating point value, but it is
6252 marked in call_used_regs. */
6253 if (regno == GP_REG_FIRST + 18 && regs_ever_live[regno])
6256 /* $31 is also a special case. It will be used to copy a return
6257 value into the floating point registers if the return value is
6259 if (regno == GP_REG_FIRST + 31
6260 && mips16_hard_float
6261 && !aggregate_value_p (return_type, current_function_decl)
6262 && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
6263 && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
6271 /* Return the bytes needed to compute the frame pointer from the current
6272 stack pointer. SIZE is the size (in bytes) of the local variables.
6274 MIPS stack frames look like:
6276 Before call After call
6277 +-----------------------+ +-----------------------+
6280 | caller's temps. | | caller's temps. |
6282 +-----------------------+ +-----------------------+
6284 | arguments on stack. | | arguments on stack. |
6286 +-----------------------+ +-----------------------+
6287 | 4 words to save | | 4 words to save |
6288 | arguments passed | | arguments passed |
6289 | in registers, even | | in registers, even |
6290 SP->| if not passed. | VFP->| if not passed. |
6291 +-----------------------+ +-----------------------+
6293 | fp register save |
6295 +-----------------------+
6297 | gp register save |
6299 +-----------------------+
6303 +-----------------------+
6305 | alloca allocations |
6307 +-----------------------+
6309 | GP save for V.4 abi |
6311 +-----------------------+
6313 | arguments on stack |
6315 +-----------------------+
6317 | arguments passed |
6318 | in registers, even |
6319 low SP->| if not passed. |
6320 memory +-----------------------+
6325 compute_frame_size (HOST_WIDE_INT size)
6328 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
6329 HOST_WIDE_INT var_size; /* # bytes that variables take up */
6330 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
6331 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
6332 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
6333 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
6334 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
6335 unsigned int mask; /* mask of saved gp registers */
6336 unsigned int fmask; /* mask of saved fp registers */
6338 cfun->machine->global_pointer = mips_global_pointer ();
6344 var_size = MIPS_STACK_ALIGN (size);
6345 args_size = current_function_outgoing_args_size;
6346 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
6348 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
6349 functions. If the function has local variables, we're committed
6350 to allocating it anyway. Otherwise reclaim it here. */
6351 if (var_size == 0 && current_function_is_leaf)
6352 cprestore_size = args_size = 0;
6354 /* The MIPS 3.0 linker does not like functions that dynamically
6355 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
6356 looks like we are trying to create a second frame pointer to the
6357 function, so allocate some stack space to make it happy. */
6359 if (args_size == 0 && current_function_calls_alloca)
6360 args_size = 4 * UNITS_PER_WORD;
6362 total_size = var_size + args_size + cprestore_size;
6364 /* Calculate space needed for gp registers. */
6365 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6366 if (mips_save_reg_p (regno))
6368 gp_reg_size += GET_MODE_SIZE (gpr_mode);
6369 mask |= 1 << (regno - GP_REG_FIRST);
6372 /* We need to restore these for the handler. */
6373 if (current_function_calls_eh_return)
6378 regno = EH_RETURN_DATA_REGNO (i);
6379 if (regno == INVALID_REGNUM)
6381 gp_reg_size += GET_MODE_SIZE (gpr_mode);
6382 mask |= 1 << (regno - GP_REG_FIRST);
6386 /* This loop must iterate over the same space as its companion in
6387 save_restore_insns. */
6388 for (regno = (FP_REG_LAST - FP_INC + 1);
6389 regno >= FP_REG_FIRST;
6392 if (mips_save_reg_p (regno))
6394 fp_reg_size += FP_INC * UNITS_PER_FPREG;
6395 fmask |= ((1 << FP_INC) - 1) << (regno - FP_REG_FIRST);
6399 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
6400 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
6402 /* Add in the space required for saving incoming register arguments. */
6403 total_size += current_function_pretend_args_size;
6404 total_size += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
6406 /* Save other computed information. */
6407 cfun->machine->frame.total_size = total_size;
6408 cfun->machine->frame.var_size = var_size;
6409 cfun->machine->frame.args_size = args_size;
6410 cfun->machine->frame.cprestore_size = cprestore_size;
6411 cfun->machine->frame.gp_reg_size = gp_reg_size;
6412 cfun->machine->frame.fp_reg_size = fp_reg_size;
6413 cfun->machine->frame.mask = mask;
6414 cfun->machine->frame.fmask = fmask;
6415 cfun->machine->frame.initialized = reload_completed;
6416 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
6417 cfun->machine->frame.num_fp = fp_reg_size / (FP_INC * UNITS_PER_FPREG);
6421 HOST_WIDE_INT offset;
6423 offset = (args_size + cprestore_size + var_size
6424 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
6425 cfun->machine->frame.gp_sp_offset = offset;
6426 cfun->machine->frame.gp_save_offset = offset - total_size;
6430 cfun->machine->frame.gp_sp_offset = 0;
6431 cfun->machine->frame.gp_save_offset = 0;
6436 HOST_WIDE_INT offset;
6438 offset = (args_size + cprestore_size + var_size
6439 + gp_reg_rounded + fp_reg_size
6440 - FP_INC * UNITS_PER_FPREG);
6441 cfun->machine->frame.fp_sp_offset = offset;
6442 cfun->machine->frame.fp_save_offset = offset - total_size;
6446 cfun->machine->frame.fp_sp_offset = 0;
6447 cfun->machine->frame.fp_save_offset = 0;
6450 /* Ok, we're done. */
6454 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
6455 pointer or argument pointer. TO is either the stack pointer or
6456 hard frame pointer. */
6459 mips_initial_elimination_offset (int from, int to)
6461 HOST_WIDE_INT offset;
6463 compute_frame_size (get_frame_size ());
6465 /* Set OFFSET to the offset from the stack pointer. */
6468 case FRAME_POINTER_REGNUM:
6472 case ARG_POINTER_REGNUM:
6473 offset = (cfun->machine->frame.total_size
6474 - current_function_pretend_args_size);
6481 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
6482 offset -= cfun->machine->frame.args_size;
6487 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
6488 back to a previous frame. */
6490 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
6495 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
6498 /* Use FN to save or restore register REGNO. MODE is the register's
6499 mode and OFFSET is the offset of its save slot from the current
6503 mips_save_restore_reg (enum machine_mode mode, int regno,
6504 HOST_WIDE_INT offset, mips_save_restore_fn fn)
6508 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
6510 fn (gen_rtx_REG (mode, regno), mem);
6514 /* Call FN for each register that is saved by the current function.
6515 SP_OFFSET is the offset of the current stack pointer from the start
6519 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
6521 #define BITSET_P(VALUE, BIT) (((VALUE) & (1L << (BIT))) != 0)
6523 enum machine_mode fpr_mode;
6524 HOST_WIDE_INT offset;
6527 /* Save registers starting from high to low. The debuggers prefer at least
6528 the return register be stored at func+4, and also it allows us not to
6529 need a nop in the epilogue if at least one register is reloaded in
6530 addition to return address. */
6531 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
6532 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
6533 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
6535 mips_save_restore_reg (gpr_mode, regno, offset, fn);
6536 offset -= GET_MODE_SIZE (gpr_mode);
6539 /* This loop must iterate over the same space as its companion in
6540 compute_frame_size. */
6541 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
6542 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
6543 for (regno = (FP_REG_LAST - FP_INC + 1);
6544 regno >= FP_REG_FIRST;
6546 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
6548 mips_save_restore_reg (fpr_mode, regno, offset, fn);
6549 offset -= GET_MODE_SIZE (fpr_mode);
6554 /* If we're generating n32 or n64 abicalls, and the current function
6555 does not use $28 as its global pointer, emit a cplocal directive.
6556 Use pic_offset_table_rtx as the argument to the directive. */
6559 mips_output_cplocal (void)
6561 if (!TARGET_EXPLICIT_RELOCS
6562 && cfun->machine->global_pointer > 0
6563 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
6564 output_asm_insn (".cplocal %+", 0);
6567 /* Return the style of GP load sequence that is being used for the
6568 current function. */
6570 enum mips_loadgp_style
6571 mips_current_loadgp_style (void)
6573 if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
6576 if (TARGET_ABSOLUTE_ABICALLS)
6577 return LOADGP_ABSOLUTE;
6579 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
6582 /* The __gnu_local_gp symbol. */
6584 static GTY(()) rtx mips_gnu_local_gp;
6586 /* If we're generating n32 or n64 abicalls, emit instructions
6587 to set up the global pointer. */
6590 mips_emit_loadgp (void)
6592 rtx addr, offset, incoming_address;
6594 switch (mips_current_loadgp_style ())
6596 case LOADGP_ABSOLUTE:
6597 if (mips_gnu_local_gp == NULL)
6599 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
6600 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
6602 emit_insn (gen_loadgp_noshared (mips_gnu_local_gp));
6606 addr = XEXP (DECL_RTL (current_function_decl), 0);
6607 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
6608 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
6609 emit_insn (gen_loadgp (offset, incoming_address));
6610 if (!TARGET_EXPLICIT_RELOCS)
6611 emit_insn (gen_loadgp_blockage ());
6619 /* Set up the stack and frame (if desired) for the function. */
6622 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6625 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
6627 #ifdef SDB_DEBUGGING_INFO
6628 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
6629 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
6632 /* In mips16 mode, we may need to generate a 32 bit to handle
6633 floating point arguments. The linker will arrange for any 32-bit
6634 functions to call this stub, which will then jump to the 16-bit
6636 if (TARGET_MIPS16 && !TARGET_SOFT_FLOAT
6637 && current_function_args_info.fp_code != 0)
6638 build_mips16_function_stub (file);
6640 if (!FUNCTION_NAME_ALREADY_DECLARED)
6642 /* Get the function name the same way that toplev.c does before calling
6643 assemble_start_function. This is needed so that the name used here
6644 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
6645 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
6647 if (!flag_inhibit_size_directive)
6649 fputs ("\t.ent\t", file);
6650 assemble_name (file, fnname);
6654 assemble_name (file, fnname);
6655 fputs (":\n", file);
6658 /* Stop mips_file_end from treating this function as external. */
6659 if (TARGET_IRIX && mips_abi == ABI_32)
6660 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
6662 if (!flag_inhibit_size_directive)
6664 /* .frame FRAMEREG, FRAMESIZE, RETREG */
6666 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
6667 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
6668 ", args= " HOST_WIDE_INT_PRINT_DEC
6669 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
6670 (reg_names[(frame_pointer_needed)
6671 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
6672 ((frame_pointer_needed && TARGET_MIPS16)
6673 ? tsize - cfun->machine->frame.args_size
6675 reg_names[GP_REG_FIRST + 31],
6676 cfun->machine->frame.var_size,
6677 cfun->machine->frame.num_gp,
6678 cfun->machine->frame.num_fp,
6679 cfun->machine->frame.args_size,
6680 cfun->machine->frame.cprestore_size);
6682 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
6683 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
6684 cfun->machine->frame.mask,
6685 cfun->machine->frame.gp_save_offset);
6686 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
6687 cfun->machine->frame.fmask,
6688 cfun->machine->frame.fp_save_offset);
6691 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
6692 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
6695 if (mips_current_loadgp_style () == LOADGP_OLDABI)
6697 /* Handle the initialization of $gp for SVR4 PIC. */
6698 if (!cfun->machine->all_noreorder_p)
6699 output_asm_insn ("%(.cpload\t%^%)", 0);
6701 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
6703 else if (cfun->machine->all_noreorder_p)
6704 output_asm_insn ("%(%<", 0);
6706 /* Tell the assembler which register we're using as the global
6707 pointer. This is needed for thunks, since they can use either
6708 explicit relocs or assembler macros. */
6709 mips_output_cplocal ();
6712 /* Make the last instruction frame related and note that it performs
6713 the operation described by FRAME_PATTERN. */
6716 mips_set_frame_expr (rtx frame_pattern)
6720 insn = get_last_insn ();
6721 RTX_FRAME_RELATED_P (insn) = 1;
6722 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
6728 /* Return a frame-related rtx that stores REG at MEM.
6729 REG must be a single register. */
6732 mips_frame_set (rtx mem, rtx reg)
6736 /* If we're saving the return address register and the dwarf return
6737 address column differs from the hard register number, adjust the
6738 note reg to refer to the former. */
6739 if (REGNO (reg) == GP_REG_FIRST + 31
6740 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
6741 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
6743 set = gen_rtx_SET (VOIDmode, mem, reg);
6744 RTX_FRAME_RELATED_P (set) = 1;
6750 /* Save register REG to MEM. Make the instruction frame-related. */
6753 mips_save_reg (rtx reg, rtx mem)
6755 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
6759 if (mips_split_64bit_move_p (mem, reg))
6760 mips_split_64bit_move (mem, reg);
6762 emit_move_insn (mem, reg);
6764 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
6765 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
6766 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
6771 && REGNO (reg) != GP_REG_FIRST + 31
6772 && !M16_REG_P (REGNO (reg)))
6774 /* Save a non-mips16 register by moving it through a temporary.
6775 We don't need to do this for $31 since there's a special
6776 instruction for it. */
6777 emit_move_insn (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
6778 emit_move_insn (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
6781 emit_move_insn (mem, reg);
6783 mips_set_frame_expr (mips_frame_set (mem, reg));
6788 /* Expand the prologue into a bunch of separate insns. */
6791 mips_expand_prologue (void)
6795 if (cfun->machine->global_pointer > 0)
6796 REGNO (pic_offset_table_rtx) = cfun->machine->global_pointer;
6798 size = compute_frame_size (get_frame_size ());
6800 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
6801 bytes beforehand; this is enough to cover the register save area
6802 without going out of range. */
6803 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
6805 HOST_WIDE_INT step1;
6807 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
6808 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
6810 GEN_INT (-step1)))) = 1;
6812 mips_for_each_saved_reg (size, mips_save_reg);
6815 /* Allocate the rest of the frame. */
6818 if (SMALL_OPERAND (-size))
6819 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
6821 GEN_INT (-size)))) = 1;
6824 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
6827 /* There are no instructions to add or subtract registers
6828 from the stack pointer, so use the frame pointer as a
6829 temporary. We should always be using a frame pointer
6830 in this case anyway. */
6831 gcc_assert (frame_pointer_needed);
6832 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
6833 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
6834 hard_frame_pointer_rtx,
6835 MIPS_PROLOGUE_TEMP (Pmode)));
6836 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
6839 emit_insn (gen_sub3_insn (stack_pointer_rtx,
6841 MIPS_PROLOGUE_TEMP (Pmode)));
6843 /* Describe the combined effect of the previous instructions. */
6845 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
6846 plus_constant (stack_pointer_rtx, -size)));
6850 /* Set up the frame pointer, if we're using one. In mips16 code,
6851 we point the frame pointer ahead of the outgoing argument area.
6852 This should allow more variables & incoming arguments to be
6853 accessed with unextended instructions. */
6854 if (frame_pointer_needed)
6856 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
6858 rtx offset = GEN_INT (cfun->machine->frame.args_size);
6859 if (SMALL_OPERAND (cfun->machine->frame.args_size))
6861 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
6866 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), offset);
6867 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
6868 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
6869 hard_frame_pointer_rtx,
6870 MIPS_PROLOGUE_TEMP (Pmode)));
6872 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
6873 plus_constant (stack_pointer_rtx,
6874 cfun->machine->frame.args_size)));
6878 RTX_FRAME_RELATED_P (emit_move_insn (hard_frame_pointer_rtx,
6879 stack_pointer_rtx)) = 1;
6882 mips_emit_loadgp ();
6884 /* If generating o32/o64 abicalls, save $gp on the stack. */
6885 if (TARGET_ABICALLS && TARGET_OLDABI && !current_function_is_leaf)
6886 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
6888 /* If we are profiling, make sure no instructions are scheduled before
6889 the call to mcount. */
6891 if (current_function_profile)
6892 emit_insn (gen_blockage ());
6895 /* Do any necessary cleanup after a function to restore stack, frame,
6898 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
6901 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
6902 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6904 /* Reinstate the normal $gp. */
6905 REGNO (pic_offset_table_rtx) = GLOBAL_POINTER_REGNUM;
6906 mips_output_cplocal ();
6908 if (cfun->machine->all_noreorder_p)
6910 /* Avoid using %>%) since it adds excess whitespace. */
6911 output_asm_insn (".set\tmacro", 0);
6912 output_asm_insn (".set\treorder", 0);
6913 set_noreorder = set_nomacro = 0;
6916 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
6920 /* Get the function name the same way that toplev.c does before calling
6921 assemble_start_function. This is needed so that the name used here
6922 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
6923 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
6924 fputs ("\t.end\t", file);
6925 assemble_name (file, fnname);
6930 /* Emit instructions to restore register REG from slot MEM. */
6933 mips_restore_reg (rtx reg, rtx mem)
6935 /* There's no mips16 instruction to load $31 directly. Load into
6936 $7 instead and adjust the return insn appropriately. */
6937 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
6938 reg = gen_rtx_REG (GET_MODE (reg), 7);
6940 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
6942 /* Can't restore directly; move through a temporary. */
6943 emit_move_insn (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
6944 emit_move_insn (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
6947 emit_move_insn (reg, mem);
6951 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
6952 if this epilogue precedes a sibling call, false if it is for a normal
6953 "epilogue" pattern. */
6956 mips_expand_epilogue (int sibcall_p)
6958 HOST_WIDE_INT step1, step2;
6961 if (!sibcall_p && mips_can_use_return_insn ())
6963 emit_jump_insn (gen_return ());
6967 /* Split the frame into two. STEP1 is the amount of stack we should
6968 deallocate before restoring the registers. STEP2 is the amount we
6969 should deallocate afterwards.
6971 Start off by assuming that no registers need to be restored. */
6972 step1 = cfun->machine->frame.total_size;
6975 /* Work out which register holds the frame address. Account for the
6976 frame pointer offset used by mips16 code. */
6977 if (!frame_pointer_needed)
6978 base = stack_pointer_rtx;
6981 base = hard_frame_pointer_rtx;
6983 step1 -= cfun->machine->frame.args_size;
6986 /* If we need to restore registers, deallocate as much stack as
6987 possible in the second step without going out of range. */
6988 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
6990 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
6994 /* Set TARGET to BASE + STEP1. */
7000 /* Get an rtx for STEP1 that we can add to BASE. */
7001 adjust = GEN_INT (step1);
7002 if (!SMALL_OPERAND (step1))
7004 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), adjust);
7005 adjust = MIPS_EPILOGUE_TEMP (Pmode);
7008 /* Normal mode code can copy the result straight into $sp. */
7010 target = stack_pointer_rtx;
7012 emit_insn (gen_add3_insn (target, base, adjust));
7015 /* Copy TARGET into the stack pointer. */
7016 if (target != stack_pointer_rtx)
7017 emit_move_insn (stack_pointer_rtx, target);
7019 /* If we're using addressing macros, $gp is implicitly used by all
7020 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
7022 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
7023 emit_insn (gen_blockage ());
7025 /* Restore the registers. */
7026 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
7029 /* Deallocate the final bit of the frame. */
7031 emit_insn (gen_add3_insn (stack_pointer_rtx,
7035 /* Add in the __builtin_eh_return stack adjustment. We need to
7036 use a temporary in mips16 code. */
7037 if (current_function_calls_eh_return)
7041 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
7042 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
7043 MIPS_EPILOGUE_TEMP (Pmode),
7044 EH_RETURN_STACKADJ_RTX));
7045 emit_move_insn (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
7048 emit_insn (gen_add3_insn (stack_pointer_rtx,
7050 EH_RETURN_STACKADJ_RTX));
7055 /* The mips16 loads the return address into $7, not $31. */
7056 if (TARGET_MIPS16 && (cfun->machine->frame.mask & RA_MASK) != 0)
7057 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
7058 GP_REG_FIRST + 7)));
7060 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
7061 GP_REG_FIRST + 31)));
7065 /* Return nonzero if this function is known to have a null epilogue.
7066 This allows the optimizer to omit jumps to jumps if no stack
7070 mips_can_use_return_insn (void)
7074 if (! reload_completed)
7077 if (regs_ever_live[31] || current_function_profile)
7080 return_type = DECL_RESULT (current_function_decl);
7082 /* In mips16 mode, a function which returns a floating point value
7083 needs to arrange to copy the return value into the floating point
7086 && mips16_hard_float
7087 && ! aggregate_value_p (return_type, current_function_decl)
7088 && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
7089 && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
7092 if (cfun->machine->frame.initialized)
7093 return cfun->machine->frame.total_size == 0;
7095 return compute_frame_size (get_frame_size ()) == 0;
7098 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
7099 in order to avoid duplicating too much logic from elsewhere. */
7102 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
7103 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
7106 rtx this, temp1, temp2, insn, fnaddr;
7108 /* Pretend to be a post-reload pass while generating rtl. */
7110 reload_completed = 1;
7111 reset_block_changes ();
7113 /* Pick a global pointer. Use a call-clobbered register if
7114 TARGET_CALL_SAVED_GP, so that we can use a sibcall. */
7116 cfun->machine->global_pointer
7117 = REGNO (pic_offset_table_rtx)
7118 = TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
7120 /* Set up the global pointer for n32 or n64 abicalls. */
7121 mips_emit_loadgp ();
7123 /* We need two temporary registers in some cases. */
7124 temp1 = gen_rtx_REG (Pmode, 2);
7125 temp2 = gen_rtx_REG (Pmode, 3);
7127 /* Find out which register contains the "this" pointer. */
7128 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
7129 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
7131 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
7133 /* Add DELTA to THIS. */
7136 rtx offset = GEN_INT (delta);
7137 if (!SMALL_OPERAND (delta))
7139 emit_move_insn (temp1, offset);
7142 emit_insn (gen_add3_insn (this, this, offset));
7145 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
7146 if (vcall_offset != 0)
7150 /* Set TEMP1 to *THIS. */
7151 emit_move_insn (temp1, gen_rtx_MEM (Pmode, this));
7153 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
7154 addr = mips_add_offset (temp2, temp1, vcall_offset);
7156 /* Load the offset and add it to THIS. */
7157 emit_move_insn (temp1, gen_rtx_MEM (Pmode, addr));
7158 emit_insn (gen_add3_insn (this, this, temp1));
7161 /* Jump to the target function. Use a sibcall if direct jumps are
7162 allowed, otherwise load the address into a register first. */
7163 fnaddr = XEXP (DECL_RTL (function), 0);
7164 if (TARGET_MIPS16 || TARGET_USE_GOT || TARGET_LONG_CALLS)
7166 /* This is messy. gas treats "la $25,foo" as part of a call
7167 sequence and may allow a global "foo" to be lazily bound.
7168 The general move patterns therefore reject this combination.
7170 In this context, lazy binding would actually be OK
7171 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
7172 TARGET_CALL_SAVED_GP; see mips_load_call_address.
7173 We must therefore load the address via a temporary
7174 register if mips_dangerous_for_la25_p.
7176 If we jump to the temporary register rather than $25, the assembler
7177 can use the move insn to fill the jump's delay slot. */
7178 if (TARGET_USE_PIC_FN_ADDR_REG
7179 && !mips_dangerous_for_la25_p (fnaddr))
7180 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
7181 mips_load_call_address (temp1, fnaddr, true);
7183 if (TARGET_USE_PIC_FN_ADDR_REG
7184 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
7185 emit_move_insn (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
7186 emit_jump_insn (gen_indirect_jump (temp1));
7190 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
7191 SIBLING_CALL_P (insn) = 1;
7194 /* Run just enough of rest_of_compilation. This sequence was
7195 "borrowed" from alpha.c. */
7196 insn = get_insns ();
7197 insn_locators_initialize ();
7198 split_all_insns_noflow ();
7200 mips16_lay_out_constants ();
7201 shorten_branches (insn);
7202 final_start_function (insn, file, 1);
7203 final (insn, file, 1);
7204 final_end_function ();
7206 /* Clean up the vars set above. Note that final_end_function resets
7207 the global pointer for us. */
7208 reload_completed = 0;
7212 /* Returns nonzero if X contains a SYMBOL_REF. */
7215 symbolic_expression_p (rtx x)
7217 if (GET_CODE (x) == SYMBOL_REF)
7220 if (GET_CODE (x) == CONST)
7221 return symbolic_expression_p (XEXP (x, 0));
7224 return symbolic_expression_p (XEXP (x, 0));
7226 if (ARITHMETIC_P (x))
7227 return (symbolic_expression_p (XEXP (x, 0))
7228 || symbolic_expression_p (XEXP (x, 1)));
7233 /* Choose the section to use for the constant rtx expression X that has
7237 mips_select_rtx_section (enum machine_mode mode, rtx x,
7238 unsigned HOST_WIDE_INT align)
7242 /* In mips16 mode, the constant table always goes in the same section
7243 as the function, so that constants can be loaded using PC relative
7245 return function_section (current_function_decl);
7247 else if (TARGET_EMBEDDED_DATA)
7249 /* For embedded applications, always put constants in read-only data,
7250 in order to reduce RAM usage. */
7251 return mergeable_constant_section (mode, align, 0);
7255 /* For hosted applications, always put constants in small data if
7256 possible, as this gives the best performance. */
7257 /* ??? Consider using mergeable small data sections. */
7259 if (GET_MODE_SIZE (mode) <= (unsigned) mips_section_threshold
7260 && mips_section_threshold > 0)
7261 return get_named_section (NULL, ".sdata", 0);
7262 else if (flag_pic && symbolic_expression_p (x))
7263 return get_named_section (NULL, ".data.rel.ro", 3);
7265 return mergeable_constant_section (mode, align, 0);
7269 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
7271 The complication here is that, with the combination TARGET_ABICALLS
7272 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
7273 therefore not be included in the read-only part of a DSO. Handle such
7274 cases by selecting a normal data section instead of a read-only one.
7275 The logic apes that in default_function_rodata_section. */
7278 mips_function_rodata_section (tree decl)
7280 if (!TARGET_ABICALLS || TARGET_GPWORD)
7281 return default_function_rodata_section (decl);
7283 if (decl && DECL_SECTION_NAME (decl))
7285 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
7286 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
7288 char *rname = ASTRDUP (name);
7290 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
7292 else if (flag_function_sections && flag_data_sections
7293 && strncmp (name, ".text.", 6) == 0)
7295 char *rname = ASTRDUP (name);
7296 memcpy (rname + 1, "data", 4);
7297 return get_section (rname, SECTION_WRITE, decl);
7300 return data_section;
7303 /* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
7304 locally-defined objects go in a small data section. It also controls
7305 the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
7306 mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
7309 mips_in_small_data_p (tree decl)
7313 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
7316 /* We don't yet generate small-data references for -mabicalls. See related
7317 -G handling in override_options. */
7318 if (TARGET_ABICALLS)
7321 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
7325 /* Reject anything that isn't in a known small-data section. */
7326 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
7327 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
7330 /* If a symbol is defined externally, the assembler will use the
7331 usual -G rules when deciding how to implement macros. */
7332 if (TARGET_EXPLICIT_RELOCS || !DECL_EXTERNAL (decl))
7335 else if (TARGET_EMBEDDED_DATA)
7337 /* Don't put constants into the small data section: we want them
7338 to be in ROM rather than RAM. */
7339 if (TREE_CODE (decl) != VAR_DECL)
7342 if (TREE_READONLY (decl)
7343 && !TREE_SIDE_EFFECTS (decl)
7344 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
7348 size = int_size_in_bytes (TREE_TYPE (decl));
7349 return (size > 0 && size <= mips_section_threshold);
7352 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
7353 anchors for small data: the GP register acts as an anchor in that
7354 case. We also don't want to use them for PC-relative accesses,
7355 where the PC acts as an anchor. */
7358 mips_use_anchors_for_symbol_p (rtx symbol)
7360 switch (mips_classify_symbol (symbol))
7362 case SYMBOL_CONSTANT_POOL:
7363 case SYMBOL_SMALL_DATA:
7371 /* See whether VALTYPE is a record whose fields should be returned in
7372 floating-point registers. If so, return the number of fields and
7373 list them in FIELDS (which should have two elements). Return 0
7376 For n32 & n64, a structure with one or two fields is returned in
7377 floating-point registers as long as every field has a floating-point
7381 mips_fpr_return_fields (tree valtype, tree *fields)
7389 if (TREE_CODE (valtype) != RECORD_TYPE)
7393 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
7395 if (TREE_CODE (field) != FIELD_DECL)
7398 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
7404 fields[i++] = field;
7410 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
7411 a value in the most significant part of $2/$3 if:
7413 - the target is big-endian;
7415 - the value has a structure or union type (we generalize this to
7416 cover aggregates from other languages too); and
7418 - the structure is not returned in floating-point registers. */
7421 mips_return_in_msb (tree valtype)
7425 return (TARGET_NEWABI
7426 && TARGET_BIG_ENDIAN
7427 && AGGREGATE_TYPE_P (valtype)
7428 && mips_fpr_return_fields (valtype, fields) == 0);
7432 /* Return a composite value in a pair of floating-point registers.
7433 MODE1 and OFFSET1 are the mode and byte offset for the first value,
7434 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
7437 For n32 & n64, $f0 always holds the first value and $f2 the second.
7438 Otherwise the values are packed together as closely as possible. */
7441 mips_return_fpr_pair (enum machine_mode mode,
7442 enum machine_mode mode1, HOST_WIDE_INT offset1,
7443 enum machine_mode mode2, HOST_WIDE_INT offset2)
7447 inc = (TARGET_NEWABI ? 2 : FP_INC);
7448 return gen_rtx_PARALLEL
7451 gen_rtx_EXPR_LIST (VOIDmode,
7452 gen_rtx_REG (mode1, FP_RETURN),
7454 gen_rtx_EXPR_LIST (VOIDmode,
7455 gen_rtx_REG (mode2, FP_RETURN + inc),
7456 GEN_INT (offset2))));
7461 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
7462 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
7463 VALTYPE is null and MODE is the mode of the return value. */
7466 mips_function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
7467 enum machine_mode mode)
7474 mode = TYPE_MODE (valtype);
7475 unsignedp = TYPE_UNSIGNED (valtype);
7477 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
7478 true, we must promote the mode just as PROMOTE_MODE does. */
7479 mode = promote_mode (valtype, mode, &unsignedp, 1);
7481 /* Handle structures whose fields are returned in $f0/$f2. */
7482 switch (mips_fpr_return_fields (valtype, fields))
7485 return gen_rtx_REG (mode, FP_RETURN);
7488 return mips_return_fpr_pair (mode,
7489 TYPE_MODE (TREE_TYPE (fields[0])),
7490 int_byte_position (fields[0]),
7491 TYPE_MODE (TREE_TYPE (fields[1])),
7492 int_byte_position (fields[1]));
7495 /* If a value is passed in the most significant part of a register, see
7496 whether we have to round the mode up to a whole number of words. */
7497 if (mips_return_in_msb (valtype))
7499 HOST_WIDE_INT size = int_size_in_bytes (valtype);
7500 if (size % UNITS_PER_WORD != 0)
7502 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
7503 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
7507 /* For EABI, the class of return register depends entirely on MODE.
7508 For example, "struct { some_type x; }" and "union { some_type x; }"
7509 are returned in the same way as a bare "some_type" would be.
7510 Other ABIs only use FPRs for scalar, complex or vector types. */
7511 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
7512 return gen_rtx_REG (mode, GP_RETURN);
7515 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
7516 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
7517 && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE)
7518 return gen_rtx_REG (mode, FP_RETURN);
7520 /* Handle long doubles for n32 & n64. */
7522 return mips_return_fpr_pair (mode,
7524 DImode, GET_MODE_SIZE (mode) / 2);
7526 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
7527 && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE * 2)
7528 return mips_return_fpr_pair (mode,
7529 GET_MODE_INNER (mode), 0,
7530 GET_MODE_INNER (mode),
7531 GET_MODE_SIZE (mode) / 2);
7533 return gen_rtx_REG (mode, GP_RETURN);
7536 /* Return nonzero when an argument must be passed by reference. */
7539 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
7540 enum machine_mode mode, tree type,
7541 bool named ATTRIBUTE_UNUSED)
7543 if (mips_abi == ABI_EABI)
7547 /* ??? How should SCmode be handled? */
7548 if (mode == DImode || mode == DFmode)
7551 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
7552 return size == -1 || size > UNITS_PER_WORD;
7556 /* If we have a variable-sized parameter, we have no choice. */
7557 return targetm.calls.must_pass_in_stack (mode, type);
7562 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
7563 enum machine_mode mode ATTRIBUTE_UNUSED,
7564 tree type ATTRIBUTE_UNUSED, bool named)
7566 return mips_abi == ABI_EABI && named;
7569 /* Return true if registers of class CLASS cannot change from mode FROM
7573 mips_cannot_change_mode_class (enum machine_mode from,
7574 enum machine_mode to, enum reg_class class)
7576 if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
7577 && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
7579 if (TARGET_BIG_ENDIAN)
7581 /* When a multi-word value is stored in paired floating-point
7582 registers, the first register always holds the low word.
7583 We therefore can't allow FPRs to change between single-word
7584 and multi-word modes. */
7585 if (FP_INC > 1 && reg_classes_intersect_p (FP_REGS, class))
7590 /* LO_REGNO == HI_REGNO + 1, so if a multi-word value is stored
7591 in LO and HI, the high word always comes first. We therefore
7592 can't allow values stored in HI to change between single-word
7593 and multi-word modes.
7594 This rule applies to both the original HI/LO pair and the new
7595 DSP accumulators. */
7596 if (reg_classes_intersect_p (ACC_REGS, class))
7601 /* gcc assumes that each word of a multiword register can be accessed
7602 individually using SUBREGs. This is not true for floating-point
7603 registers if they are bigger than a word. */
7604 if (UNITS_PER_FPREG > UNITS_PER_WORD
7605 && GET_MODE_SIZE (from) > UNITS_PER_WORD
7606 && GET_MODE_SIZE (to) < UNITS_PER_FPREG
7607 && reg_classes_intersect_p (FP_REGS, class))
7610 /* Loading a 32-bit value into a 64-bit floating-point register
7611 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
7612 We can't allow 64-bit float registers to change from SImode to
7617 && GET_MODE_SIZE (to) >= UNITS_PER_WORD
7618 && reg_classes_intersect_p (FP_REGS, class))
7624 /* Return true if X should not be moved directly into register $25.
7625 We need this because many versions of GAS will treat "la $25,foo" as
7626 part of a call sequence and so allow a global "foo" to be lazily bound. */
7629 mips_dangerous_for_la25_p (rtx x)
7631 return (!TARGET_EXPLICIT_RELOCS
7633 && GET_CODE (x) == SYMBOL_REF
7634 && mips_global_symbol_p (x));
7637 /* Implement PREFERRED_RELOAD_CLASS. */
7640 mips_preferred_reload_class (rtx x, enum reg_class class)
7642 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
7645 if (TARGET_HARD_FLOAT
7646 && FLOAT_MODE_P (GET_MODE (x))
7647 && reg_class_subset_p (FP_REGS, class))
7650 if (reg_class_subset_p (GR_REGS, class))
7653 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
7659 /* This function returns the register class required for a secondary
7660 register when copying between one of the registers in CLASS, and X,
7661 using MODE. If IN_P is nonzero, the copy is going from X to the
7662 register, otherwise the register is the source. A return value of
7663 NO_REGS means that no secondary register is required. */
7666 mips_secondary_reload_class (enum reg_class class,
7667 enum machine_mode mode, rtx x, int in_p)
7669 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
7673 if (REG_P (x)|| GET_CODE (x) == SUBREG)
7674 regno = true_regnum (x);
7676 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
7678 if (mips_dangerous_for_la25_p (x))
7681 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
7685 /* Copying from HI or LO to anywhere other than a general register
7686 requires a general register.
7687 This rule applies to both the original HI/LO pair and the new
7688 DSP accumulators. */
7689 if (reg_class_subset_p (class, ACC_REGS))
7691 if (TARGET_MIPS16 && in_p)
7693 /* We can't really copy to HI or LO at all in mips16 mode. */
7696 return gp_reg_p ? NO_REGS : gr_regs;
7698 if (ACC_REG_P (regno))
7700 if (TARGET_MIPS16 && ! in_p)
7702 /* We can't really copy to HI or LO at all in mips16 mode. */
7705 return class == gr_regs ? NO_REGS : gr_regs;
7708 /* We can only copy a value to a condition code register from a
7709 floating point register, and even then we require a scratch
7710 floating point register. We can only copy a value out of a
7711 condition code register into a general register. */
7712 if (class == ST_REGS)
7716 return gp_reg_p ? NO_REGS : gr_regs;
7718 if (ST_REG_P (regno))
7722 return class == gr_regs ? NO_REGS : gr_regs;
7725 if (class == FP_REGS)
7729 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
7732 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
7734 /* We can use the l.s and l.d macros to load floating-point
7735 constants. ??? For l.s, we could probably get better
7736 code by returning GR_REGS here. */
7739 else if (gp_reg_p || x == CONST0_RTX (mode))
7741 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
7744 else if (FP_REG_P (regno))
7746 /* In this case we can use mov.s or mov.d. */
7751 /* Otherwise, we need to reload through an integer register. */
7756 /* In mips16 mode, going between memory and anything but M16_REGS
7757 requires an M16_REG. */
7760 if (class != M16_REGS && class != M16_NA_REGS)
7768 if (class == M16_REGS || class == M16_NA_REGS)
7777 /* Implement CLASS_MAX_NREGS.
7779 - UNITS_PER_FPREG controls the number of registers needed by FP_REGS.
7781 - ST_REGS are always hold CCmode values, and CCmode values are
7782 considered to be 4 bytes wide.
7784 All other register classes are covered by UNITS_PER_WORD. Note that
7785 this is true even for unions of integer and float registers when the
7786 latter are smaller than the former. The only supported combination
7787 in which case this occurs is -mgp64 -msingle-float, which has 64-bit
7788 words but 32-bit float registers. A word-based calculation is correct
7789 in that case since -msingle-float disallows multi-FPR values. */
7792 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
7793 enum machine_mode mode)
7795 if (class == ST_REGS)
7796 return (GET_MODE_SIZE (mode) + 3) / 4;
7797 else if (class == FP_REGS)
7798 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
7800 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7804 mips_valid_pointer_mode (enum machine_mode mode)
7806 return (mode == SImode || (TARGET_64BIT && mode == DImode));
7809 /* Target hook for vector_mode_supported_p. */
7812 mips_vector_mode_supported_p (enum machine_mode mode)
7817 return TARGET_PAIRED_SINGLE_FLOAT;
7828 /* If we can access small data directly (using gp-relative relocation
7829 operators) return the small data pointer, otherwise return null.
7831 For each mips16 function which refers to GP relative symbols, we
7832 use a pseudo register, initialized at the start of the function, to
7833 hold the $gp value. */
7836 mips16_gp_pseudo_reg (void)
7838 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
7842 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
7844 /* We want to initialize this to a value which gcc will believe
7846 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
7848 push_topmost_sequence ();
7849 /* We need to emit the initialization after the FUNCTION_BEG
7850 note, so that it will be integrated. */
7851 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
7853 && NOTE_LINE_NUMBER (scan) == NOTE_INSN_FUNCTION_BEG)
7855 if (scan == NULL_RTX)
7856 scan = get_insns ();
7857 insn = emit_insn_after (insn, scan);
7858 pop_topmost_sequence ();
7861 return cfun->machine->mips16_gp_pseudo_rtx;
7864 /* Write out code to move floating point arguments in or out of
7865 general registers. Output the instructions to FILE. FP_CODE is
7866 the code describing which arguments are present (see the comment at
7867 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
7868 we are copying from the floating point registers. */
7871 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
7877 /* This code only works for the original 32-bit ABI and the O64 ABI. */
7878 gcc_assert (TARGET_OLDABI);
7884 gparg = GP_ARG_FIRST;
7885 fparg = FP_ARG_FIRST;
7886 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
7890 if ((fparg & 1) != 0)
7892 fprintf (file, "\t%s\t%s,%s\n", s,
7893 reg_names[gparg], reg_names[fparg]);
7895 else if ((f & 3) == 2)
7898 fprintf (file, "\td%s\t%s,%s\n", s,
7899 reg_names[gparg], reg_names[fparg]);
7902 if ((fparg & 1) != 0)
7904 if (TARGET_BIG_ENDIAN)
7905 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
7906 reg_names[gparg], reg_names[fparg + 1], s,
7907 reg_names[gparg + 1], reg_names[fparg]);
7909 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
7910 reg_names[gparg], reg_names[fparg], s,
7911 reg_names[gparg + 1], reg_names[fparg + 1]);
7924 /* Build a mips16 function stub. This is used for functions which
7925 take arguments in the floating point registers. It is 32-bit code
7926 that moves the floating point args into the general registers, and
7927 then jumps to the 16-bit code. */
7930 build_mips16_function_stub (FILE *file)
7933 char *secname, *stubname;
7934 tree stubid, stubdecl;
7938 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7939 secname = (char *) alloca (strlen (fnname) + 20);
7940 sprintf (secname, ".mips16.fn.%s", fnname);
7941 stubname = (char *) alloca (strlen (fnname) + 20);
7942 sprintf (stubname, "__fn_stub_%s", fnname);
7943 stubid = get_identifier (stubname);
7944 stubdecl = build_decl (FUNCTION_DECL, stubid,
7945 build_function_type (void_type_node, NULL_TREE));
7946 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
7948 fprintf (file, "\t# Stub function for %s (", current_function_name ());
7950 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
7952 fprintf (file, "%s%s",
7953 need_comma ? ", " : "",
7954 (f & 3) == 1 ? "float" : "double");
7957 fprintf (file, ")\n");
7959 fprintf (file, "\t.set\tnomips16\n");
7960 switch_to_section (function_section (stubdecl));
7961 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
7963 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
7964 within a .ent, and we cannot emit another .ent. */
7965 if (!FUNCTION_NAME_ALREADY_DECLARED)
7967 fputs ("\t.ent\t", file);
7968 assemble_name (file, stubname);
7972 assemble_name (file, stubname);
7973 fputs (":\n", file);
7975 /* We don't want the assembler to insert any nops here. */
7976 fprintf (file, "\t.set\tnoreorder\n");
7978 mips16_fp_args (file, current_function_args_info.fp_code, 1);
7980 fprintf (asm_out_file, "\t.set\tnoat\n");
7981 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
7982 assemble_name (file, fnname);
7983 fprintf (file, "\n");
7984 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
7985 fprintf (asm_out_file, "\t.set\tat\n");
7987 /* Unfortunately, we can't fill the jump delay slot. We can't fill
7988 with one of the mfc1 instructions, because the result is not
7989 available for one instruction, so if the very first instruction
7990 in the function refers to the register, it will see the wrong
7992 fprintf (file, "\tnop\n");
7994 fprintf (file, "\t.set\treorder\n");
7996 if (!FUNCTION_NAME_ALREADY_DECLARED)
7998 fputs ("\t.end\t", file);
7999 assemble_name (file, stubname);
8003 fprintf (file, "\t.set\tmips16\n");
8005 switch_to_section (function_section (current_function_decl));
8008 /* We keep a list of functions for which we have already built stubs
8009 in build_mips16_call_stub. */
8013 struct mips16_stub *next;
8018 static struct mips16_stub *mips16_stubs;
8020 /* Build a call stub for a mips16 call. A stub is needed if we are
8021 passing any floating point values which should go into the floating
8022 point registers. If we are, and the call turns out to be to a
8023 32-bit function, the stub will be used to move the values into the
8024 floating point registers before calling the 32-bit function. The
8025 linker will magically adjust the function call to either the 16-bit
8026 function or the 32-bit stub, depending upon where the function call
8027 is actually defined.
8029 Similarly, we need a stub if the return value might come back in a
8030 floating point register.
8032 RETVAL is the location of the return value, or null if this is
8033 a call rather than a call_value. FN is the address of the
8034 function and ARG_SIZE is the size of the arguments. FP_CODE
8035 is the code built by function_arg. This function returns a nonzero
8036 value if it builds the call instruction itself. */
8039 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
8043 char *secname, *stubname;
8044 struct mips16_stub *l;
8045 tree stubid, stubdecl;
8049 /* We don't need to do anything if we aren't in mips16 mode, or if
8050 we were invoked with the -msoft-float option. */
8051 if (! TARGET_MIPS16 || ! mips16_hard_float)
8054 /* Figure out whether the value might come back in a floating point
8056 fpret = (retval != 0
8057 && GET_MODE_CLASS (GET_MODE (retval)) == MODE_FLOAT
8058 && GET_MODE_SIZE (GET_MODE (retval)) <= UNITS_PER_FPVALUE);
8060 /* We don't need to do anything if there were no floating point
8061 arguments and the value will not be returned in a floating point
8063 if (fp_code == 0 && ! fpret)
8066 /* We don't need to do anything if this is a call to a special
8067 mips16 support function. */
8068 if (GET_CODE (fn) == SYMBOL_REF
8069 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
8072 /* This code will only work for o32 and o64 abis. The other ABI's
8073 require more sophisticated support. */
8074 gcc_assert (TARGET_OLDABI);
8076 /* We can only handle SFmode and DFmode floating point return
8079 gcc_assert (GET_MODE (retval) == SFmode || GET_MODE (retval) == DFmode);
8081 /* If we're calling via a function pointer, then we must always call
8082 via a stub. There are magic stubs provided in libgcc.a for each
8083 of the required cases. Each of them expects the function address
8084 to arrive in register $2. */
8086 if (GET_CODE (fn) != SYMBOL_REF)
8092 /* ??? If this code is modified to support other ABI's, we need
8093 to handle PARALLEL return values here. */
8095 sprintf (buf, "__mips16_call_stub_%s%d",
8097 ? (GET_MODE (retval) == SFmode ? "sf_" : "df_")
8100 id = get_identifier (buf);
8101 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
8103 emit_move_insn (gen_rtx_REG (Pmode, 2), fn);
8105 if (retval == NULL_RTX)
8106 insn = gen_call_internal (stub_fn, arg_size);
8108 insn = gen_call_value_internal (retval, stub_fn, arg_size);
8109 insn = emit_call_insn (insn);
8111 /* Put the register usage information on the CALL. */
8112 CALL_INSN_FUNCTION_USAGE (insn) =
8113 gen_rtx_EXPR_LIST (VOIDmode,
8114 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
8115 CALL_INSN_FUNCTION_USAGE (insn));
8117 /* If we are handling a floating point return value, we need to
8118 save $18 in the function prologue. Putting a note on the
8119 call will mean that regs_ever_live[$18] will be true if the
8120 call is not eliminated, and we can check that in the prologue
8123 CALL_INSN_FUNCTION_USAGE (insn) =
8124 gen_rtx_EXPR_LIST (VOIDmode,
8125 gen_rtx_USE (VOIDmode,
8126 gen_rtx_REG (word_mode, 18)),
8127 CALL_INSN_FUNCTION_USAGE (insn));
8129 /* Return 1 to tell the caller that we've generated the call
8134 /* We know the function we are going to call. If we have already
8135 built a stub, we don't need to do anything further. */
8137 fnname = XSTR (fn, 0);
8138 for (l = mips16_stubs; l != NULL; l = l->next)
8139 if (strcmp (l->name, fnname) == 0)
8144 /* Build a special purpose stub. When the linker sees a
8145 function call in mips16 code, it will check where the target
8146 is defined. If the target is a 32-bit call, the linker will
8147 search for the section defined here. It can tell which
8148 symbol this section is associated with by looking at the
8149 relocation information (the name is unreliable, since this
8150 might be a static function). If such a section is found, the
8151 linker will redirect the call to the start of the magic
8154 If the function does not return a floating point value, the
8155 special stub section is named
8158 If the function does return a floating point value, the stub
8160 .mips16.call.fp.FNNAME
8163 secname = (char *) alloca (strlen (fnname) + 40);
8164 sprintf (secname, ".mips16.call.%s%s",
8167 stubname = (char *) alloca (strlen (fnname) + 20);
8168 sprintf (stubname, "__call_stub_%s%s",
8171 stubid = get_identifier (stubname);
8172 stubdecl = build_decl (FUNCTION_DECL, stubid,
8173 build_function_type (void_type_node, NULL_TREE));
8174 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
8176 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
8178 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
8182 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
8184 fprintf (asm_out_file, "%s%s",
8185 need_comma ? ", " : "",
8186 (f & 3) == 1 ? "float" : "double");
8189 fprintf (asm_out_file, ")\n");
8191 fprintf (asm_out_file, "\t.set\tnomips16\n");
8192 assemble_start_function (stubdecl, stubname);
8194 if (!FUNCTION_NAME_ALREADY_DECLARED)
8196 fputs ("\t.ent\t", asm_out_file);
8197 assemble_name (asm_out_file, stubname);
8198 fputs ("\n", asm_out_file);
8200 assemble_name (asm_out_file, stubname);
8201 fputs (":\n", asm_out_file);
8204 /* We build the stub code by hand. That's the only way we can
8205 do it, since we can't generate 32-bit code during a 16-bit
8208 /* We don't want the assembler to insert any nops here. */
8209 fprintf (asm_out_file, "\t.set\tnoreorder\n");
8211 mips16_fp_args (asm_out_file, fp_code, 0);
8215 fprintf (asm_out_file, "\t.set\tnoat\n");
8216 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
8218 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
8219 fprintf (asm_out_file, "\t.set\tat\n");
8220 /* Unfortunately, we can't fill the jump delay slot. We
8221 can't fill with one of the mtc1 instructions, because the
8222 result is not available for one instruction, so if the
8223 very first instruction in the function refers to the
8224 register, it will see the wrong value. */
8225 fprintf (asm_out_file, "\tnop\n");
8229 fprintf (asm_out_file, "\tmove\t%s,%s\n",
8230 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
8231 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
8232 /* As above, we can't fill the delay slot. */
8233 fprintf (asm_out_file, "\tnop\n");
8234 if (GET_MODE (retval) == SFmode)
8235 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8236 reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
8239 if (TARGET_BIG_ENDIAN)
8241 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8242 reg_names[GP_REG_FIRST + 2],
8243 reg_names[FP_REG_FIRST + 1]);
8244 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8245 reg_names[GP_REG_FIRST + 3],
8246 reg_names[FP_REG_FIRST + 0]);
8250 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8251 reg_names[GP_REG_FIRST + 2],
8252 reg_names[FP_REG_FIRST + 0]);
8253 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8254 reg_names[GP_REG_FIRST + 3],
8255 reg_names[FP_REG_FIRST + 1]);
8258 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
8259 /* As above, we can't fill the delay slot. */
8260 fprintf (asm_out_file, "\tnop\n");
8263 fprintf (asm_out_file, "\t.set\treorder\n");
8265 #ifdef ASM_DECLARE_FUNCTION_SIZE
8266 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
8269 if (!FUNCTION_NAME_ALREADY_DECLARED)
8271 fputs ("\t.end\t", asm_out_file);
8272 assemble_name (asm_out_file, stubname);
8273 fputs ("\n", asm_out_file);
8276 fprintf (asm_out_file, "\t.set\tmips16\n");
8278 /* Record this stub. */
8279 l = (struct mips16_stub *) xmalloc (sizeof *l);
8280 l->name = xstrdup (fnname);
8282 l->next = mips16_stubs;
8286 /* If we expect a floating point return value, but we've built a
8287 stub which does not expect one, then we're in trouble. We can't
8288 use the existing stub, because it won't handle the floating point
8289 value. We can't build a new stub, because the linker won't know
8290 which stub to use for the various calls in this object file.
8291 Fortunately, this case is illegal, since it means that a function
8292 was declared in two different ways in a single compilation. */
8293 if (fpret && ! l->fpret)
8294 error ("cannot handle inconsistent calls to %qs", fnname);
8296 /* If we are calling a stub which handles a floating point return
8297 value, we need to arrange to save $18 in the prologue. We do
8298 this by marking the function call as using the register. The
8299 prologue will later see that it is used, and emit code to save
8306 if (retval == NULL_RTX)
8307 insn = gen_call_internal (fn, arg_size);
8309 insn = gen_call_value_internal (retval, fn, arg_size);
8310 insn = emit_call_insn (insn);
8312 CALL_INSN_FUNCTION_USAGE (insn) =
8313 gen_rtx_EXPR_LIST (VOIDmode,
8314 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
8315 CALL_INSN_FUNCTION_USAGE (insn));
8317 /* Return 1 to tell the caller that we've generated the call
8322 /* Return 0 to let the caller generate the call insn. */
8326 /* An entry in the mips16 constant pool. VALUE is the pool constant,
8327 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
8329 struct mips16_constant {
8330 struct mips16_constant *next;
8333 enum machine_mode mode;
8336 /* Information about an incomplete mips16 constant pool. FIRST is the
8337 first constant, HIGHEST_ADDRESS is the highest address that the first
8338 byte of the pool can have, and INSN_ADDRESS is the current instruction
8341 struct mips16_constant_pool {
8342 struct mips16_constant *first;
8343 int highest_address;
8347 /* Add constant VALUE to POOL and return its label. MODE is the
8348 value's mode (used for CONST_INTs, etc.). */
8351 add_constant (struct mips16_constant_pool *pool,
8352 rtx value, enum machine_mode mode)
8354 struct mips16_constant **p, *c;
8355 bool first_of_size_p;
8357 /* See whether the constant is already in the pool. If so, return the
8358 existing label, otherwise leave P pointing to the place where the
8359 constant should be added.
8361 Keep the pool sorted in increasing order of mode size so that we can
8362 reduce the number of alignments needed. */
8363 first_of_size_p = true;
8364 for (p = &pool->first; *p != 0; p = &(*p)->next)
8366 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
8368 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
8370 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
8371 first_of_size_p = false;
8374 /* In the worst case, the constant needed by the earliest instruction
8375 will end up at the end of the pool. The entire pool must then be
8376 accessible from that instruction.
8378 When adding the first constant, set the pool's highest address to
8379 the address of the first out-of-range byte. Adjust this address
8380 downwards each time a new constant is added. */
8381 if (pool->first == 0)
8382 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
8383 is the address of the instruction with the lowest two bits clear.
8384 The base PC value for ld has the lowest three bits clear. Assume
8385 the worst case here. */
8386 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
8387 pool->highest_address -= GET_MODE_SIZE (mode);
8388 if (first_of_size_p)
8389 /* Take into account the worst possible padding due to alignment. */
8390 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
8392 /* Create a new entry. */
8393 c = (struct mips16_constant *) xmalloc (sizeof *c);
8396 c->label = gen_label_rtx ();
8403 /* Output constant VALUE after instruction INSN and return the last
8404 instruction emitted. MODE is the mode of the constant. */
8407 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
8409 switch (GET_MODE_CLASS (mode))
8413 rtx size = GEN_INT (GET_MODE_SIZE (mode));
8414 return emit_insn_after (gen_consttable_int (value, size), insn);
8418 return emit_insn_after (gen_consttable_float (value), insn);
8420 case MODE_VECTOR_FLOAT:
8421 case MODE_VECTOR_INT:
8424 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
8425 insn = dump_constants_1 (GET_MODE_INNER (mode),
8426 CONST_VECTOR_ELT (value, i), insn);
8436 /* Dump out the constants in CONSTANTS after INSN. */
8439 dump_constants (struct mips16_constant *constants, rtx insn)
8441 struct mips16_constant *c, *next;
8445 for (c = constants; c != NULL; c = next)
8447 /* If necessary, increase the alignment of PC. */
8448 if (align < GET_MODE_SIZE (c->mode))
8450 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
8451 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
8453 align = GET_MODE_SIZE (c->mode);
8455 insn = emit_label_after (c->label, insn);
8456 insn = dump_constants_1 (c->mode, c->value, insn);
8462 emit_barrier_after (insn);
8465 /* Return the length of instruction INSN. */
8468 mips16_insn_length (rtx insn)
8472 rtx body = PATTERN (insn);
8473 if (GET_CODE (body) == ADDR_VEC)
8474 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
8475 if (GET_CODE (body) == ADDR_DIFF_VEC)
8476 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
8478 return get_attr_length (insn);
8481 /* Rewrite *X so that constant pool references refer to the constant's
8482 label instead. DATA points to the constant pool structure. */
8485 mips16_rewrite_pool_refs (rtx *x, void *data)
8487 struct mips16_constant_pool *pool = data;
8488 if (GET_CODE (*x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (*x))
8489 *x = gen_rtx_LABEL_REF (Pmode, add_constant (pool,
8490 get_pool_constant (*x),
8491 get_pool_mode (*x)));
8495 /* Build MIPS16 constant pools. */
8498 mips16_lay_out_constants (void)
8500 struct mips16_constant_pool pool;
8504 memset (&pool, 0, sizeof (pool));
8505 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8507 /* Rewrite constant pool references in INSN. */
8509 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &pool);
8511 pool.insn_address += mips16_insn_length (insn);
8513 if (pool.first != NULL)
8515 /* If there are no natural barriers between the first user of
8516 the pool and the highest acceptable address, we'll need to
8517 create a new instruction to jump around the constant pool.
8518 In the worst case, this instruction will be 4 bytes long.
8520 If it's too late to do this transformation after INSN,
8521 do it immediately before INSN. */
8522 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
8526 label = gen_label_rtx ();
8528 jump = emit_jump_insn_before (gen_jump (label), insn);
8529 JUMP_LABEL (jump) = label;
8530 LABEL_NUSES (label) = 1;
8531 barrier = emit_barrier_after (jump);
8533 emit_label_after (label, barrier);
8534 pool.insn_address += 4;
8537 /* See whether the constant pool is now out of range of the first
8538 user. If so, output the constants after the previous barrier.
8539 Note that any instructions between BARRIER and INSN (inclusive)
8540 will use negative offsets to refer to the pool. */
8541 if (pool.insn_address > pool.highest_address)
8543 dump_constants (pool.first, barrier);
8547 else if (BARRIER_P (insn))
8551 dump_constants (pool.first, get_last_insn ());
8554 /* A temporary variable used by for_each_rtx callbacks, etc. */
8555 static rtx mips_sim_insn;
8557 /* A structure representing the state of the processor pipeline.
8558 Used by the mips_sim_* family of functions. */
8560 /* The maximum number of instructions that can be issued in a cycle.
8561 (Caches mips_issue_rate.) */
8562 unsigned int issue_rate;
8564 /* The current simulation time. */
8567 /* How many more instructions can be issued in the current cycle. */
8568 unsigned int insns_left;
8570 /* LAST_SET[X].INSN is the last instruction to set register X.
8571 LAST_SET[X].TIME is the time at which that instruction was issued.
8572 INSN is null if no instruction has yet set register X. */
8576 } last_set[FIRST_PSEUDO_REGISTER];
8578 /* The pipeline's current DFA state. */
8582 /* Reset STATE to the initial simulation state. */
8585 mips_sim_reset (struct mips_sim *state)
8588 state->insns_left = state->issue_rate;
8589 memset (&state->last_set, 0, sizeof (state->last_set));
8590 state_reset (state->dfa_state);
8593 /* Initialize STATE before its first use. DFA_STATE points to an
8594 allocated but uninitialized DFA state. */
8597 mips_sim_init (struct mips_sim *state, state_t dfa_state)
8599 state->issue_rate = mips_issue_rate ();
8600 state->dfa_state = dfa_state;
8601 mips_sim_reset (state);
8604 /* Advance STATE by one clock cycle. */
8607 mips_sim_next_cycle (struct mips_sim *state)
8610 state->insns_left = state->issue_rate;
8611 state_transition (state->dfa_state, 0);
8614 /* Advance simulation state STATE until instruction INSN can read
8618 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
8622 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
8623 if (state->last_set[REGNO (reg) + i].insn != 0)
8627 t = state->last_set[REGNO (reg) + i].time;
8628 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
8629 while (state->time < t)
8630 mips_sim_next_cycle (state);
8634 /* A for_each_rtx callback. If *X is a register, advance simulation state
8635 DATA until mips_sim_insn can read the register's value. */
8638 mips_sim_wait_regs_2 (rtx *x, void *data)
8641 mips_sim_wait_reg (data, mips_sim_insn, *x);
8645 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
8648 mips_sim_wait_regs_1 (rtx *x, void *data)
8650 for_each_rtx (x, mips_sim_wait_regs_2, data);
8653 /* Advance simulation state STATE until all of INSN's register
8654 dependencies are satisfied. */
8657 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
8659 mips_sim_insn = insn;
8660 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
8663 /* Advance simulation state STATE until the units required by
8664 instruction INSN are available. */
8667 mips_sim_wait_units (struct mips_sim *state, rtx insn)
8671 tmp_state = alloca (state_size ());
8672 while (state->insns_left == 0
8673 || (memcpy (tmp_state, state->dfa_state, state_size ()),
8674 state_transition (tmp_state, insn) >= 0))
8675 mips_sim_next_cycle (state);
8678 /* Advance simulation state STATE until INSN is ready to issue. */
8681 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
8683 mips_sim_wait_regs (state, insn);
8684 mips_sim_wait_units (state, insn);
8687 /* mips_sim_insn has just set X. Update the LAST_SET array
8688 in simulation state DATA. */
8691 mips_sim_record_set (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
8693 struct mips_sim *state;
8698 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
8700 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
8701 state->last_set[REGNO (x) + i].time = state->time;
8705 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
8706 can issue immediately (i.e., that mips_sim_wait_insn has already
8710 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
8712 state_transition (state->dfa_state, insn);
8713 state->insns_left--;
8715 mips_sim_insn = insn;
8716 note_stores (PATTERN (insn), mips_sim_record_set, state);
8719 /* Simulate issuing a NOP in state STATE. */
8722 mips_sim_issue_nop (struct mips_sim *state)
8724 if (state->insns_left == 0)
8725 mips_sim_next_cycle (state);
8726 state->insns_left--;
8729 /* Update simulation state STATE so that it's ready to accept the instruction
8730 after INSN. INSN should be part of the main rtl chain, not a member of a
8734 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
8736 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
8738 mips_sim_issue_nop (state);
8740 switch (GET_CODE (SEQ_BEGIN (insn)))
8744 /* We can't predict the processor state after a call or label. */
8745 mips_sim_reset (state);
8749 /* The delay slots of branch likely instructions are only executed
8750 when the branch is taken. Therefore, if the caller has simulated
8751 the delay slot instruction, STATE does not really reflect the state
8752 of the pipeline for the instruction after the delay slot. Also,
8753 branch likely instructions tend to incur a penalty when not taken,
8754 so there will probably be an extra delay between the branch and
8755 the instruction after the delay slot. */
8756 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
8757 mips_sim_reset (state);
8765 /* The VR4130 pipeline issues aligned pairs of instructions together,
8766 but it stalls the second instruction if it depends on the first.
8767 In order to cut down the amount of logic required, this dependence
8768 check is not based on a full instruction decode. Instead, any non-SPECIAL
8769 instruction is assumed to modify the register specified by bits 20-16
8770 (which is usually the "rt" field).
8772 In beq, beql, bne and bnel instructions, the rt field is actually an
8773 input, so we can end up with a false dependence between the branch
8774 and its delay slot. If this situation occurs in instruction INSN,
8775 try to avoid it by swapping rs and rt. */
8778 vr4130_avoid_branch_rt_conflict (rtx insn)
8782 first = SEQ_BEGIN (insn);
8783 second = SEQ_END (insn);
8785 && NONJUMP_INSN_P (second)
8786 && GET_CODE (PATTERN (first)) == SET
8787 && GET_CODE (SET_DEST (PATTERN (first))) == PC
8788 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
8790 /* Check for the right kind of condition. */
8791 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
8792 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
8793 && REG_P (XEXP (cond, 0))
8794 && REG_P (XEXP (cond, 1))
8795 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
8796 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
8798 /* SECOND mentions the rt register but not the rs register. */
8799 rtx tmp = XEXP (cond, 0);
8800 XEXP (cond, 0) = XEXP (cond, 1);
8801 XEXP (cond, 1) = tmp;
8806 /* Implement -mvr4130-align. Go through each basic block and simulate the
8807 processor pipeline. If we find that a pair of instructions could execute
8808 in parallel, and the first of those instruction is not 8-byte aligned,
8809 insert a nop to make it aligned. */
8812 vr4130_align_insns (void)
8814 struct mips_sim state;
8815 rtx insn, subinsn, last, last2, next;
8820 /* LAST is the last instruction before INSN to have a nonzero length.
8821 LAST2 is the last such instruction before LAST. */
8825 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
8828 mips_sim_init (&state, alloca (state_size ()));
8829 for (insn = get_insns (); insn != 0; insn = next)
8831 unsigned int length;
8833 next = NEXT_INSN (insn);
8835 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
8836 This isn't really related to the alignment pass, but we do it on
8837 the fly to avoid a separate instruction walk. */
8838 vr4130_avoid_branch_rt_conflict (insn);
8840 if (USEFUL_INSN_P (insn))
8841 FOR_EACH_SUBINSN (subinsn, insn)
8843 mips_sim_wait_insn (&state, subinsn);
8845 /* If we want this instruction to issue in parallel with the
8846 previous one, make sure that the previous instruction is
8847 aligned. There are several reasons why this isn't worthwhile
8848 when the second instruction is a call:
8850 - Calls are less likely to be performance critical,
8851 - There's a good chance that the delay slot can execute
8852 in parallel with the call.
8853 - The return address would then be unaligned.
8855 In general, if we're going to insert a nop between instructions
8856 X and Y, it's better to insert it immediately after X. That
8857 way, if the nop makes Y aligned, it will also align any labels
8859 if (state.insns_left != state.issue_rate
8860 && !CALL_P (subinsn))
8862 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
8864 /* SUBINSN is the first instruction in INSN and INSN is
8865 aligned. We want to align the previous instruction
8866 instead, so insert a nop between LAST2 and LAST.
8868 Note that LAST could be either a single instruction
8869 or a branch with a delay slot. In the latter case,
8870 LAST, like INSN, is already aligned, but the delay
8871 slot must have some extra delay that stops it from
8872 issuing at the same time as the branch. We therefore
8873 insert a nop before the branch in order to align its
8875 emit_insn_after (gen_nop (), last2);
8878 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
8880 /* SUBINSN is the delay slot of INSN, but INSN is
8881 currently unaligned. Insert a nop between
8882 LAST and INSN to align it. */
8883 emit_insn_after (gen_nop (), last);
8887 mips_sim_issue_insn (&state, subinsn);
8889 mips_sim_finish_insn (&state, insn);
8891 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
8892 length = get_attr_length (insn);
8895 /* If the instruction is an asm statement or multi-instruction
8896 mips.md patern, the length is only an estimate. Insert an
8897 8 byte alignment after it so that the following instructions
8898 can be handled correctly. */
8899 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
8900 && (recog_memoized (insn) < 0 || length >= 8))
8902 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
8903 next = NEXT_INSN (next);
8904 mips_sim_next_cycle (&state);
8907 else if (length & 4)
8908 aligned_p = !aligned_p;
8913 /* See whether INSN is an aligned label. */
8914 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
8920 /* Subroutine of mips_reorg. If there is a hazard between INSN
8921 and a previous instruction, avoid it by inserting nops after
8924 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
8925 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
8926 before using the value of that register. *HILO_DELAY counts the
8927 number of instructions since the last hilo hazard (that is,
8928 the number of instructions since the last mflo or mfhi).
8930 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
8931 for the next instruction.
8933 LO_REG is an rtx for the LO register, used in dependence checking. */
8936 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
8937 rtx *delayed_reg, rtx lo_reg)
8945 pattern = PATTERN (insn);
8947 /* Do not put the whole function in .set noreorder if it contains
8948 an asm statement. We don't know whether there will be hazards
8949 between the asm statement and the gcc-generated code. */
8950 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
8951 cfun->machine->all_noreorder_p = false;
8953 /* Ignore zero-length instructions (barriers and the like). */
8954 ninsns = get_attr_length (insn) / 4;
8958 /* Work out how many nops are needed. Note that we only care about
8959 registers that are explicitly mentioned in the instruction's pattern.
8960 It doesn't matter that calls use the argument registers or that they
8961 clobber hi and lo. */
8962 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
8963 nops = 2 - *hilo_delay;
8964 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
8969 /* Insert the nops between this instruction and the previous one.
8970 Each new nop takes us further from the last hilo hazard. */
8971 *hilo_delay += nops;
8973 emit_insn_after (gen_hazard_nop (), after);
8975 /* Set up the state for the next instruction. */
8976 *hilo_delay += ninsns;
8978 if (INSN_CODE (insn) >= 0)
8979 switch (get_attr_hazard (insn))
8989 set = single_set (insn);
8990 gcc_assert (set != 0);
8991 *delayed_reg = SET_DEST (set);
8997 /* Go through the instruction stream and insert nops where necessary.
8998 See if the whole function can then be put into .set noreorder &
9002 mips_avoid_hazards (void)
9004 rtx insn, last_insn, lo_reg, delayed_reg;
9007 /* Force all instructions to be split into their final form. */
9008 split_all_insns_noflow ();
9010 /* Recalculate instruction lengths without taking nops into account. */
9011 cfun->machine->ignore_hazard_length_p = true;
9012 shorten_branches (get_insns ());
9014 cfun->machine->all_noreorder_p = true;
9016 /* Profiled functions can't be all noreorder because the profiler
9017 support uses assembler macros. */
9018 if (current_function_profile)
9019 cfun->machine->all_noreorder_p = false;
9021 /* Code compiled with -mfix-vr4120 can't be all noreorder because
9022 we rely on the assembler to work around some errata. */
9023 if (TARGET_FIX_VR4120)
9024 cfun->machine->all_noreorder_p = false;
9026 /* The same is true for -mfix-vr4130 if we might generate mflo or
9027 mfhi instructions. Note that we avoid using mflo and mfhi if
9028 the VR4130 macc and dmacc instructions are available instead;
9029 see the *mfhilo_{si,di}_macc patterns. */
9030 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
9031 cfun->machine->all_noreorder_p = false;
9036 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
9038 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
9041 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
9042 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
9043 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
9044 &hilo_delay, &delayed_reg, lo_reg);
9046 mips_avoid_hazard (last_insn, insn, &hilo_delay,
9047 &delayed_reg, lo_reg);
9054 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
9060 mips16_lay_out_constants ();
9061 else if (TARGET_EXPLICIT_RELOCS)
9063 if (mips_flag_delayed_branch)
9064 dbr_schedule (get_insns ());
9065 mips_avoid_hazards ();
9066 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
9067 vr4130_align_insns ();
9071 /* This function does three things:
9073 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
9074 - Register the mips16 hardware floating point stubs.
9075 - Register the gofast functions if selected using --enable-gofast. */
9077 #include "config/gofast.h"
9080 mips_init_libfuncs (void)
9082 if (TARGET_FIX_VR4120)
9084 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
9085 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
9088 if (TARGET_MIPS16 && mips16_hard_float)
9090 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
9091 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
9092 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
9093 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
9095 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
9096 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
9097 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
9098 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
9099 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
9100 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
9102 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
9103 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
9105 if (TARGET_DOUBLE_FLOAT)
9107 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
9108 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
9109 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
9110 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
9112 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
9113 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
9114 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
9115 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
9116 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
9117 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
9119 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
9120 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
9122 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
9123 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
9127 gofast_maybe_init_libfuncs ();
9130 /* Return a number assessing the cost of moving a register in class
9131 FROM to class TO. The classes are expressed using the enumeration
9132 values such as `GENERAL_REGS'. A value of 2 is the default; other
9133 values are interpreted relative to that.
9135 It is not required that the cost always equal 2 when FROM is the
9136 same as TO; on some machines it is expensive to move between
9137 registers if they are not general registers.
9139 If reload sees an insn consisting of a single `set' between two
9140 hard registers, and if `REGISTER_MOVE_COST' applied to their
9141 classes returns a value of 2, reload does not check to ensure that
9142 the constraints of the insn are met. Setting a cost of other than
9143 2 will allow reload to verify that the constraints are met. You
9144 should do this if the `movM' pattern's constraints do not allow
9147 ??? We make the cost of moving from HI/LO into general
9148 registers the same as for one of moving general registers to
9149 HI/LO for TARGET_MIPS16 in order to prevent allocating a
9150 pseudo to HI/LO. This might hurt optimizations though, it
9151 isn't clear if it is wise. And it might not work in all cases. We
9152 could solve the DImode LO reg problem by using a multiply, just
9153 like reload_{in,out}si. We could solve the SImode/HImode HI reg
9154 problem by using divide instructions. divu puts the remainder in
9155 the HI reg, so doing a divide by -1 will move the value in the HI
9156 reg for all values except -1. We could handle that case by using a
9157 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
9158 a compare/branch to test the input value to see which instruction
9159 we need to use. This gets pretty messy, but it is feasible. */
9162 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
9163 enum reg_class to, enum reg_class from)
9165 if (from == M16_REGS && GR_REG_CLASS_P (to))
9167 else if (from == M16_NA_REGS && GR_REG_CLASS_P (to))
9169 else if (GR_REG_CLASS_P (from))
9173 else if (to == M16_NA_REGS)
9175 else if (GR_REG_CLASS_P (to))
9182 else if (to == FP_REGS)
9184 else if (reg_class_subset_p (to, ACC_REGS))
9191 else if (COP_REG_CLASS_P (to))
9196 else if (from == FP_REGS)
9198 if (GR_REG_CLASS_P (to))
9200 else if (to == FP_REGS)
9202 else if (to == ST_REGS)
9205 else if (reg_class_subset_p (from, ACC_REGS))
9207 if (GR_REG_CLASS_P (to))
9215 else if (from == ST_REGS && GR_REG_CLASS_P (to))
9217 else if (COP_REG_CLASS_P (from))
9223 ??? What cases are these? Shouldn't we return 2 here? */
9228 /* Return the length of INSN. LENGTH is the initial length computed by
9229 attributes in the machine-description file. */
9232 mips_adjust_insn_length (rtx insn, int length)
9234 /* A unconditional jump has an unfilled delay slot if it is not part
9235 of a sequence. A conditional jump normally has a delay slot, but
9236 does not on MIPS16. */
9237 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
9240 /* See how many nops might be needed to avoid hardware hazards. */
9241 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
9242 switch (get_attr_hazard (insn))
9256 /* All MIPS16 instructions are a measly two bytes. */
9264 /* Return an asm sequence to start a noat block and load the address
9265 of a label into $1. */
9268 mips_output_load_label (void)
9270 if (TARGET_EXPLICIT_RELOCS)
9274 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
9277 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
9280 if (ISA_HAS_LOAD_DELAY)
9281 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
9282 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
9286 if (Pmode == DImode)
9287 return "%[dla\t%@,%0";
9289 return "%[la\t%@,%0";
9293 /* Return the assembly code for INSN, which has the operands given by
9294 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
9295 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
9296 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
9297 version of BRANCH_IF_TRUE. */
9300 mips_output_conditional_branch (rtx insn, rtx *operands,
9301 const char *branch_if_true,
9302 const char *branch_if_false)
9304 unsigned int length;
9305 rtx taken, not_taken;
9307 length = get_attr_length (insn);
9310 /* Just a simple conditional branch. */
9311 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
9312 return branch_if_true;
9315 /* Generate a reversed branch around a direct jump. This fallback does
9316 not use branch-likely instructions. */
9317 mips_branch_likely = false;
9318 not_taken = gen_label_rtx ();
9319 taken = operands[1];
9321 /* Generate the reversed branch to NOT_TAKEN. */
9322 operands[1] = not_taken;
9323 output_asm_insn (branch_if_false, operands);
9325 /* If INSN has a delay slot, we must provide delay slots for both the
9326 branch to NOT_TAKEN and the conditional jump. We must also ensure
9327 that INSN's delay slot is executed in the appropriate cases. */
9330 /* This first delay slot will always be executed, so use INSN's
9331 delay slot if is not annulled. */
9332 if (!INSN_ANNULLED_BRANCH_P (insn))
9334 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9335 asm_out_file, optimize, 1, NULL);
9336 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9339 output_asm_insn ("nop", 0);
9340 fprintf (asm_out_file, "\n");
9343 /* Output the unconditional branch to TAKEN. */
9345 output_asm_insn ("j\t%0%/", &taken);
9348 output_asm_insn (mips_output_load_label (), &taken);
9349 output_asm_insn ("jr\t%@%]%/", 0);
9352 /* Now deal with its delay slot; see above. */
9355 /* This delay slot will only be executed if the branch is taken.
9356 Use INSN's delay slot if is annulled. */
9357 if (INSN_ANNULLED_BRANCH_P (insn))
9359 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9360 asm_out_file, optimize, 1, NULL);
9361 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9364 output_asm_insn ("nop", 0);
9365 fprintf (asm_out_file, "\n");
9368 /* Output NOT_TAKEN. */
9369 (*targetm.asm_out.internal_label) (asm_out_file, "L",
9370 CODE_LABEL_NUMBER (not_taken));
9374 /* Return the assembly code for INSN, which branches to OPERANDS[1]
9375 if some ordered condition is true. The condition is given by
9376 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
9377 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
9378 its second is always zero. */
9381 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
9383 const char *branch[2];
9385 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
9386 Make BRANCH[0] branch on the inverse condition. */
9387 switch (GET_CODE (operands[0]))
9389 /* These cases are equivalent to comparisons against zero. */
9391 inverted_p = !inverted_p;
9394 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
9395 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
9398 /* These cases are always true or always false. */
9400 inverted_p = !inverted_p;
9403 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
9404 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
9408 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
9409 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
9412 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
9415 /* Used to output div or ddiv instruction DIVISION, which has the operands
9416 given by OPERANDS. Add in a divide-by-zero check if needed.
9418 When working around R4000 and R4400 errata, we need to make sure that
9419 the division is not immediately followed by a shift[1][2]. We also
9420 need to stop the division from being put into a branch delay slot[3].
9421 The easiest way to avoid both problems is to add a nop after the
9422 division. When a divide-by-zero check is needed, this nop can be
9423 used to fill the branch delay slot.
9425 [1] If a double-word or a variable shift executes immediately
9426 after starting an integer division, the shift may give an
9427 incorrect result. See quotations of errata #16 and #28 from
9428 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9429 in mips.md for details.
9431 [2] A similar bug to [1] exists for all revisions of the
9432 R4000 and the R4400 when run in an MC configuration.
9433 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
9435 "19. In this following sequence:
9437 ddiv (or ddivu or div or divu)
9438 dsll32 (or dsrl32, dsra32)
9440 if an MPT stall occurs, while the divide is slipping the cpu
9441 pipeline, then the following double shift would end up with an
9444 Workaround: The compiler needs to avoid generating any
9445 sequence with divide followed by extended double shift."
9447 This erratum is also present in "MIPS R4400MC Errata, Processor
9448 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
9449 & 3.0" as errata #10 and #4, respectively.
9451 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9452 (also valid for MIPS R4000MC processors):
9454 "52. R4000SC: This bug does not apply for the R4000PC.
9456 There are two flavors of this bug:
9458 1) If the instruction just after divide takes an RF exception
9459 (tlb-refill, tlb-invalid) and gets an instruction cache
9460 miss (both primary and secondary) and the line which is
9461 currently in secondary cache at this index had the first
9462 data word, where the bits 5..2 are set, then R4000 would
9463 get a wrong result for the div.
9468 ------------------- # end-of page. -tlb-refill
9473 ------------------- # end-of page. -tlb-invalid
9476 2) If the divide is in the taken branch delay slot, where the
9477 target takes RF exception and gets an I-cache miss for the
9478 exception vector or where I-cache miss occurs for the
9479 target address, under the above mentioned scenarios, the
9480 div would get wrong results.
9483 j r2 # to next page mapped or unmapped
9484 div r8,r9 # this bug would be there as long
9485 # as there is an ICache miss and
9486 nop # the "data pattern" is present
9489 beq r0, r0, NextPage # to Next page
9493 This bug is present for div, divu, ddiv, and ddivu
9496 Workaround: For item 1), OS could make sure that the next page
9497 after the divide instruction is also mapped. For item 2), the
9498 compiler could make sure that the divide instruction is not in
9499 the branch delay slot."
9501 These processors have PRId values of 0x00004220 and 0x00004300 for
9502 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
9505 mips_output_division (const char *division, rtx *operands)
9510 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
9512 output_asm_insn (s, operands);
9515 if (TARGET_CHECK_ZERO_DIV)
9519 output_asm_insn (s, operands);
9520 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
9522 else if (GENERATE_DIVIDE_TRAPS)
9524 output_asm_insn (s, operands);
9529 output_asm_insn ("%(bne\t%2,%.,1f", operands);
9530 output_asm_insn (s, operands);
9531 s = "break\t7%)\n1:";
9537 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
9538 with a final "000" replaced by "k". Ignore case.
9540 Note: this function is shared between GCC and GAS. */
9543 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
9545 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
9546 given++, canonical++;
9548 return ((*given == 0 && *canonical == 0)
9549 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
9553 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
9554 CPU name. We've traditionally allowed a lot of variation here.
9556 Note: this function is shared between GCC and GAS. */
9559 mips_matching_cpu_name_p (const char *canonical, const char *given)
9561 /* First see if the name matches exactly, or with a final "000"
9563 if (mips_strict_matching_cpu_name_p (canonical, given))
9566 /* If not, try comparing based on numerical designation alone.
9567 See if GIVEN is an unadorned number, or 'r' followed by a number. */
9568 if (TOLOWER (*given) == 'r')
9570 if (!ISDIGIT (*given))
9573 /* Skip over some well-known prefixes in the canonical name,
9574 hoping to find a number there too. */
9575 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
9577 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
9579 else if (TOLOWER (canonical[0]) == 'r')
9582 return mips_strict_matching_cpu_name_p (canonical, given);
9586 /* Return the mips_cpu_info entry for the processor or ISA given
9587 by CPU_STRING. Return null if the string isn't recognized.
9589 A similar function exists in GAS. */
9591 static const struct mips_cpu_info *
9592 mips_parse_cpu (const char *cpu_string)
9594 const struct mips_cpu_info *p;
9597 /* In the past, we allowed upper-case CPU names, but it doesn't
9598 work well with the multilib machinery. */
9599 for (s = cpu_string; *s != 0; s++)
9602 warning (0, "the cpu name must be lower case");
9606 /* 'from-abi' selects the most compatible architecture for the given
9607 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
9608 EABIs, we have to decide whether we're using the 32-bit or 64-bit
9609 version. Look first at the -mgp options, if given, otherwise base
9610 the choice on MASK_64BIT in TARGET_DEFAULT. */
9611 if (strcasecmp (cpu_string, "from-abi") == 0)
9612 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
9613 : ABI_NEEDS_64BIT_REGS ? 3
9614 : (TARGET_64BIT ? 3 : 1));
9616 /* 'default' has traditionally been a no-op. Probably not very useful. */
9617 if (strcasecmp (cpu_string, "default") == 0)
9620 for (p = mips_cpu_info_table; p->name != 0; p++)
9621 if (mips_matching_cpu_name_p (p->name, cpu_string))
9628 /* Return the processor associated with the given ISA level, or null
9629 if the ISA isn't valid. */
9631 static const struct mips_cpu_info *
9632 mips_cpu_info_from_isa (int isa)
9634 const struct mips_cpu_info *p;
9636 for (p = mips_cpu_info_table; p->name != 0; p++)
9643 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
9644 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
9645 they only hold condition code modes, and CCmode is always considered to
9646 be 4 bytes wide. All other registers are word sized. */
9649 mips_hard_regno_nregs (int regno, enum machine_mode mode)
9651 if (ST_REG_P (regno))
9652 return ((GET_MODE_SIZE (mode) + 3) / 4);
9653 else if (! FP_REG_P (regno))
9654 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
9656 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
9659 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
9660 all BLKmode objects are returned in memory. Under the new (N32 and
9661 64-bit MIPS ABIs) small structures are returned in a register.
9662 Objects with varying size must still be returned in memory, of
9666 mips_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
9669 return (TYPE_MODE (type) == BLKmode);
9671 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
9672 || (int_size_in_bytes (type) == -1));
9676 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
9678 return !TARGET_OLDABI;
9681 /* Return true if INSN is a multiply-add or multiply-subtract
9682 instruction and PREV assigns to the accumulator operand. */
9685 mips_linked_madd_p (rtx prev, rtx insn)
9689 x = single_set (insn);
9695 if (GET_CODE (x) == PLUS
9696 && GET_CODE (XEXP (x, 0)) == MULT
9697 && reg_set_p (XEXP (x, 1), prev))
9700 if (GET_CODE (x) == MINUS
9701 && GET_CODE (XEXP (x, 1)) == MULT
9702 && reg_set_p (XEXP (x, 0), prev))
9708 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
9709 that may clobber hi or lo. */
9711 static rtx mips_macc_chains_last_hilo;
9713 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
9714 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
9717 mips_macc_chains_record (rtx insn)
9719 if (get_attr_may_clobber_hilo (insn))
9720 mips_macc_chains_last_hilo = insn;
9723 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
9724 has NREADY elements, looking for a multiply-add or multiply-subtract
9725 instruction that is cumulative with mips_macc_chains_last_hilo.
9726 If there is one, promote it ahead of anything else that might
9727 clobber hi or lo. */
9730 mips_macc_chains_reorder (rtx *ready, int nready)
9734 if (mips_macc_chains_last_hilo != 0)
9735 for (i = nready - 1; i >= 0; i--)
9736 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
9738 for (j = nready - 1; j > i; j--)
9739 if (recog_memoized (ready[j]) >= 0
9740 && get_attr_may_clobber_hilo (ready[j]))
9742 mips_promote_ready (ready, i, j);
9749 /* The last instruction to be scheduled. */
9751 static rtx vr4130_last_insn;
9753 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
9754 points to an rtx that is initially an instruction. Nullify the rtx
9755 if the instruction uses the value of register X. */
9758 vr4130_true_reg_dependence_p_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
9760 rtx *insn_ptr = data;
9763 && reg_referenced_p (x, PATTERN (*insn_ptr)))
9767 /* Return true if there is true register dependence between vr4130_last_insn
9771 vr4130_true_reg_dependence_p (rtx insn)
9773 note_stores (PATTERN (vr4130_last_insn),
9774 vr4130_true_reg_dependence_p_1, &insn);
9778 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
9779 the ready queue and that INSN2 is the instruction after it, return
9780 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
9781 in which INSN1 and INSN2 can probably issue in parallel, but for
9782 which (INSN2, INSN1) should be less sensitive to instruction
9783 alignment than (INSN1, INSN2). See 4130.md for more details. */
9786 vr4130_swap_insns_p (rtx insn1, rtx insn2)
9790 /* Check for the following case:
9792 1) there is some other instruction X with an anti dependence on INSN1;
9793 2) X has a higher priority than INSN2; and
9794 3) X is an arithmetic instruction (and thus has no unit restrictions).
9796 If INSN1 is the last instruction blocking X, it would better to
9797 choose (INSN1, X) over (INSN2, INSN1). */
9798 FOR_EACH_DEP_LINK (dep, INSN_FORW_DEPS (insn1))
9799 if (DEP_LINK_KIND (dep) == REG_DEP_ANTI
9800 && INSN_PRIORITY (DEP_LINK_CON (dep)) > INSN_PRIORITY (insn2)
9801 && recog_memoized (DEP_LINK_CON (dep)) >= 0
9802 && get_attr_vr4130_class (DEP_LINK_CON (dep)) == VR4130_CLASS_ALU)
9805 if (vr4130_last_insn != 0
9806 && recog_memoized (insn1) >= 0
9807 && recog_memoized (insn2) >= 0)
9809 /* See whether INSN1 and INSN2 use different execution units,
9810 or if they are both ALU-type instructions. If so, they can
9811 probably execute in parallel. */
9812 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
9813 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
9814 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
9816 /* If only one of the instructions has a dependence on
9817 vr4130_last_insn, prefer to schedule the other one first. */
9818 bool dep1 = vr4130_true_reg_dependence_p (insn1);
9819 bool dep2 = vr4130_true_reg_dependence_p (insn2);
9823 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
9824 is not an ALU-type instruction and if INSN1 uses the same
9825 execution unit. (Note that if this condition holds, we already
9826 know that INSN2 uses a different execution unit.) */
9827 if (class1 != VR4130_CLASS_ALU
9828 && recog_memoized (vr4130_last_insn) >= 0
9829 && class1 == get_attr_vr4130_class (vr4130_last_insn))
9836 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
9837 queue with at least two instructions. Swap the first two if
9838 vr4130_swap_insns_p says that it could be worthwhile. */
9841 vr4130_reorder (rtx *ready, int nready)
9843 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
9844 mips_promote_ready (ready, nready - 2, nready - 1);
9847 /* Remove the instruction at index LOWER from ready queue READY and
9848 reinsert it in front of the instruction at index HIGHER. LOWER must
9852 mips_promote_ready (rtx *ready, int lower, int higher)
9857 new_head = ready[lower];
9858 for (i = lower; i < higher; i++)
9859 ready[i] = ready[i + 1];
9860 ready[i] = new_head;
9863 /* Implement TARGET_SCHED_REORDER. */
9866 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9867 rtx *ready, int *nreadyp, int cycle)
9869 if (!reload_completed && TUNE_MACC_CHAINS)
9872 mips_macc_chains_last_hilo = 0;
9874 mips_macc_chains_reorder (ready, *nreadyp);
9876 if (reload_completed && TUNE_MIPS4130 && !TARGET_VR4130_ALIGN)
9879 vr4130_last_insn = 0;
9881 vr4130_reorder (ready, *nreadyp);
9883 return mips_issue_rate ();
9886 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
9889 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9892 switch (GET_CODE (PATTERN (insn)))
9896 /* Don't count USEs and CLOBBERs against the issue rate. */
9901 if (!reload_completed && TUNE_MACC_CHAINS)
9902 mips_macc_chains_record (insn);
9903 vr4130_last_insn = insn;
9909 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
9910 dependencies have no cost. */
9913 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
9914 rtx dep ATTRIBUTE_UNUSED, int cost)
9916 if (REG_NOTE_KIND (link) != 0)
9921 /* Return the number of instructions that can be issued per cycle. */
9924 mips_issue_rate (void)
9928 case PROCESSOR_R4130:
9929 case PROCESSOR_R5400:
9930 case PROCESSOR_R5500:
9931 case PROCESSOR_R7000:
9932 case PROCESSOR_R9000:
9936 case PROCESSOR_SB1A:
9937 /* This is actually 4, but we get better performance if we claim 3.
9938 This is partly because of unwanted speculative code motion with the
9939 larger number, and partly because in most common cases we can't
9940 reach the theoretical max of 4. */
9948 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
9949 be as wide as the scheduling freedom in the DFA. */
9952 mips_multipass_dfa_lookahead (void)
9954 /* Can schedule up to 4 of the 6 function units in any one cycle. */
9961 /* Implements a store data bypass check. We need this because the cprestore
9962 pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
9963 default routine to abort. We just return false for that case. */
9964 /* ??? Should try to give a better result here than assuming false. */
9967 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
9969 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
9972 return ! store_data_bypass_p (out_insn, in_insn);
9975 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
9976 return the first operand of the associated "pref" or "prefx" insn. */
9979 mips_prefetch_cookie (rtx write, rtx locality)
9981 /* store_streamed / load_streamed. */
9982 if (INTVAL (locality) <= 0)
9983 return GEN_INT (INTVAL (write) + 4);
9986 if (INTVAL (locality) <= 2)
9989 /* store_retained / load_retained. */
9990 return GEN_INT (INTVAL (write) + 6);
9993 /* MIPS builtin function support. */
9995 struct builtin_description
9997 /* The code of the main .md file instruction. See mips_builtin_type
9998 for more information. */
9999 enum insn_code icode;
10001 /* The floating-point comparison code to use with ICODE, if any. */
10002 enum mips_fp_condition cond;
10004 /* The name of the builtin function. */
10007 /* Specifies how the function should be expanded. */
10008 enum mips_builtin_type builtin_type;
10010 /* The function's prototype. */
10011 enum mips_function_type function_type;
10013 /* The target flags required for this function. */
10017 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
10018 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
10019 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
10020 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
10021 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
10023 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
10025 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
10026 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
10027 "__builtin_mips_" #INSN "_" #COND "_s", \
10028 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
10029 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
10030 "__builtin_mips_" #INSN "_" #COND "_d", \
10031 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
10033 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
10034 The lower and upper forms require TARGET_FLAGS while the any and all
10035 forms require MASK_MIPS3D. */
10036 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
10037 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10038 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
10039 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
10040 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10041 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
10042 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
10043 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10044 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
10045 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
10046 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10047 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
10048 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
10050 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
10051 require MASK_MIPS3D. */
10052 #define CMP_4S_BUILTINS(INSN, COND) \
10053 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
10054 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
10055 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10057 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
10058 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
10059 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10062 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
10063 instruction requires TARGET_FLAGS. */
10064 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
10065 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10066 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
10067 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10069 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
10070 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
10071 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
10074 /* Define all the builtins related to c.cond.fmt condition COND. */
10075 #define CMP_BUILTINS(COND) \
10076 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
10077 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
10078 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
10079 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
10080 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
10081 CMP_4S_BUILTINS (c, COND), \
10082 CMP_4S_BUILTINS (cabs, COND)
10084 static const struct builtin_description mips_bdesc[] =
10086 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10087 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10088 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10089 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10090 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
10091 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10092 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10093 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10095 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
10096 MASK_PAIRED_SINGLE_FLOAT),
10097 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10098 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10099 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10100 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10102 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
10103 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
10104 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10105 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
10106 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
10107 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10109 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
10110 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
10111 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10112 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
10113 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
10114 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10116 MIPS_FP_CONDITIONS (CMP_BUILTINS)
10119 /* Builtin functions for the SB-1 processor. */
10121 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
10123 static const struct builtin_description sb1_bdesc[] =
10125 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
10128 /* Builtin functions for DSP ASE. */
10130 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
10131 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
10132 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
10133 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
10134 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
10136 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
10137 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
10138 builtin_description fields. */
10139 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
10140 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
10141 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
10143 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
10144 branch instruction. TARGET_FLAGS is a builtin_description field. */
10145 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
10146 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
10147 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
10149 static const struct builtin_description dsp_bdesc[] =
10151 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10152 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10153 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10154 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10155 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10156 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10157 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10158 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10159 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10160 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10161 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10162 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10163 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10164 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
10165 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
10166 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
10167 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
10168 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
10169 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
10170 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
10171 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
10172 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
10173 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10174 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10175 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10176 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10177 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10178 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10179 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10180 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10181 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
10182 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10183 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10184 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10185 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
10186 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10187 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10188 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10189 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
10190 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
10191 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10192 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
10193 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
10194 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
10195 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10196 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
10197 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
10198 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10199 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10200 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10201 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10202 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10203 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10204 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10205 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10206 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10207 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10208 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10209 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10210 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
10211 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
10212 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
10213 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
10214 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
10215 BPOSGE_BUILTIN (32, MASK_DSP),
10217 /* The following are for the MIPS DSP ASE REV 2. */
10218 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, MASK_DSPR2),
10219 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10220 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10221 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10222 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10223 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
10224 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
10225 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10226 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10227 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10228 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10229 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10230 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10231 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10232 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10233 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10234 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
10235 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
10236 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
10237 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
10238 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
10239 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSPR2),
10240 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10241 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10242 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10243 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
10244 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10245 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10246 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10247 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10248 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10249 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
10250 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
10251 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2)
10254 static const struct builtin_description dsp_32only_bdesc[] =
10256 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10257 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10258 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10259 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10260 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10261 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10262 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10263 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
10264 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
10265 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10266 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10267 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10268 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10269 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10270 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10271 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10272 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10273 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10274 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10275 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
10276 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
10278 /* The following are for the MIPS DSP ASE REV 2. */
10279 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10280 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10281 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
10282 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
10283 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
10284 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
10285 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10286 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, MASK_DSPR2),
10287 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, MASK_DSPR2),
10288 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10289 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10290 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10291 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10292 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
10293 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2)
10296 /* This helps provide a mapping from builtin function codes to bdesc
10301 /* The builtin function table that this entry describes. */
10302 const struct builtin_description *bdesc;
10304 /* The number of entries in the builtin function table. */
10307 /* The target processor that supports these builtin functions.
10308 PROCESSOR_MAX means we enable them for all processors. */
10309 enum processor_type proc;
10311 /* If the target has these flags, this builtin function table
10312 will not be supported. */
10313 int unsupported_target_flags;
10316 static const struct bdesc_map bdesc_arrays[] =
10318 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX, 0 },
10319 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1, 0 },
10320 { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX, 0 },
10321 { dsp_32only_bdesc, ARRAY_SIZE (dsp_32only_bdesc), PROCESSOR_MAX,
10325 /* Take the argument ARGNUM of the arglist of EXP and convert it into a form
10326 suitable for input operand OP of instruction ICODE. Return the value. */
10329 mips_prepare_builtin_arg (enum insn_code icode,
10330 unsigned int op, tree exp, unsigned int argnum)
10333 enum machine_mode mode;
10335 value = expand_normal (CALL_EXPR_ARG (exp, argnum));
10336 mode = insn_data[icode].operand[op].mode;
10337 if (!insn_data[icode].operand[op].predicate (value, mode))
10339 value = copy_to_mode_reg (mode, value);
10340 /* Check the predicate again. */
10341 if (!insn_data[icode].operand[op].predicate (value, mode))
10343 error ("invalid argument to builtin function");
10351 /* Return an rtx suitable for output operand OP of instruction ICODE.
10352 If TARGET is non-null, try to use it where possible. */
10355 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
10357 enum machine_mode mode;
10359 mode = insn_data[icode].operand[op].mode;
10360 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
10361 target = gen_reg_rtx (mode);
10366 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
10369 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10370 enum machine_mode mode ATTRIBUTE_UNUSED,
10371 int ignore ATTRIBUTE_UNUSED)
10373 enum insn_code icode;
10374 enum mips_builtin_type type;
10376 unsigned int fcode;
10377 const struct builtin_description *bdesc;
10378 const struct bdesc_map *m;
10380 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
10381 fcode = DECL_FUNCTION_CODE (fndecl);
10384 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
10386 if (fcode < m->size)
10389 icode = bdesc[fcode].icode;
10390 type = bdesc[fcode].builtin_type;
10400 case MIPS_BUILTIN_DIRECT:
10401 return mips_expand_builtin_direct (icode, target, exp, true);
10403 case MIPS_BUILTIN_DIRECT_NO_TARGET:
10404 return mips_expand_builtin_direct (icode, target, exp, false);
10406 case MIPS_BUILTIN_MOVT:
10407 case MIPS_BUILTIN_MOVF:
10408 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
10411 case MIPS_BUILTIN_CMP_ANY:
10412 case MIPS_BUILTIN_CMP_ALL:
10413 case MIPS_BUILTIN_CMP_UPPER:
10414 case MIPS_BUILTIN_CMP_LOWER:
10415 case MIPS_BUILTIN_CMP_SINGLE:
10416 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
10419 case MIPS_BUILTIN_BPOSGE32:
10420 return mips_expand_builtin_bposge (type, target);
10427 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
10430 mips_init_builtins (void)
10432 const struct builtin_description *d;
10433 const struct bdesc_map *m;
10434 tree types[(int) MIPS_MAX_FTYPE_MAX];
10435 tree V2SF_type_node;
10436 tree V2HI_type_node;
10437 tree V4QI_type_node;
10438 unsigned int offset;
10440 /* We have only builtins for -mpaired-single, -mips3d and -mdsp. */
10441 if (!TARGET_PAIRED_SINGLE_FLOAT && !TARGET_DSP)
10444 if (TARGET_PAIRED_SINGLE_FLOAT)
10446 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
10448 types[MIPS_V2SF_FTYPE_V2SF]
10449 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
10451 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
10452 = build_function_type_list (V2SF_type_node,
10453 V2SF_type_node, V2SF_type_node, NULL_TREE);
10455 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
10456 = build_function_type_list (V2SF_type_node,
10457 V2SF_type_node, V2SF_type_node,
10458 integer_type_node, NULL_TREE);
10460 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
10461 = build_function_type_list (V2SF_type_node,
10462 V2SF_type_node, V2SF_type_node,
10463 V2SF_type_node, V2SF_type_node, NULL_TREE);
10465 types[MIPS_V2SF_FTYPE_SF_SF]
10466 = build_function_type_list (V2SF_type_node,
10467 float_type_node, float_type_node, NULL_TREE);
10469 types[MIPS_INT_FTYPE_V2SF_V2SF]
10470 = build_function_type_list (integer_type_node,
10471 V2SF_type_node, V2SF_type_node, NULL_TREE);
10473 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
10474 = build_function_type_list (integer_type_node,
10475 V2SF_type_node, V2SF_type_node,
10476 V2SF_type_node, V2SF_type_node, NULL_TREE);
10478 types[MIPS_INT_FTYPE_SF_SF]
10479 = build_function_type_list (integer_type_node,
10480 float_type_node, float_type_node, NULL_TREE);
10482 types[MIPS_INT_FTYPE_DF_DF]
10483 = build_function_type_list (integer_type_node,
10484 double_type_node, double_type_node, NULL_TREE);
10486 types[MIPS_SF_FTYPE_V2SF]
10487 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
10489 types[MIPS_SF_FTYPE_SF]
10490 = build_function_type_list (float_type_node,
10491 float_type_node, NULL_TREE);
10493 types[MIPS_SF_FTYPE_SF_SF]
10494 = build_function_type_list (float_type_node,
10495 float_type_node, float_type_node, NULL_TREE);
10497 types[MIPS_DF_FTYPE_DF]
10498 = build_function_type_list (double_type_node,
10499 double_type_node, NULL_TREE);
10501 types[MIPS_DF_FTYPE_DF_DF]
10502 = build_function_type_list (double_type_node,
10503 double_type_node, double_type_node, NULL_TREE);
10508 V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
10509 V4QI_type_node = build_vector_type_for_mode (intQI_type_node, V4QImode);
10511 types[MIPS_V2HI_FTYPE_V2HI_V2HI]
10512 = build_function_type_list (V2HI_type_node,
10513 V2HI_type_node, V2HI_type_node,
10516 types[MIPS_SI_FTYPE_SI_SI]
10517 = build_function_type_list (intSI_type_node,
10518 intSI_type_node, intSI_type_node,
10521 types[MIPS_V4QI_FTYPE_V4QI_V4QI]
10522 = build_function_type_list (V4QI_type_node,
10523 V4QI_type_node, V4QI_type_node,
10526 types[MIPS_SI_FTYPE_V4QI]
10527 = build_function_type_list (intSI_type_node,
10531 types[MIPS_V2HI_FTYPE_V2HI]
10532 = build_function_type_list (V2HI_type_node,
10536 types[MIPS_SI_FTYPE_SI]
10537 = build_function_type_list (intSI_type_node,
10541 types[MIPS_V4QI_FTYPE_V2HI_V2HI]
10542 = build_function_type_list (V4QI_type_node,
10543 V2HI_type_node, V2HI_type_node,
10546 types[MIPS_V2HI_FTYPE_SI_SI]
10547 = build_function_type_list (V2HI_type_node,
10548 intSI_type_node, intSI_type_node,
10551 types[MIPS_SI_FTYPE_V2HI]
10552 = build_function_type_list (intSI_type_node,
10556 types[MIPS_V2HI_FTYPE_V4QI]
10557 = build_function_type_list (V2HI_type_node,
10561 types[MIPS_V4QI_FTYPE_V4QI_SI]
10562 = build_function_type_list (V4QI_type_node,
10563 V4QI_type_node, intSI_type_node,
10566 types[MIPS_V2HI_FTYPE_V2HI_SI]
10567 = build_function_type_list (V2HI_type_node,
10568 V2HI_type_node, intSI_type_node,
10571 types[MIPS_V2HI_FTYPE_V4QI_V2HI]
10572 = build_function_type_list (V2HI_type_node,
10573 V4QI_type_node, V2HI_type_node,
10576 types[MIPS_SI_FTYPE_V2HI_V2HI]
10577 = build_function_type_list (intSI_type_node,
10578 V2HI_type_node, V2HI_type_node,
10581 types[MIPS_DI_FTYPE_DI_V4QI_V4QI]
10582 = build_function_type_list (intDI_type_node,
10583 intDI_type_node, V4QI_type_node, V4QI_type_node,
10586 types[MIPS_DI_FTYPE_DI_V2HI_V2HI]
10587 = build_function_type_list (intDI_type_node,
10588 intDI_type_node, V2HI_type_node, V2HI_type_node,
10591 types[MIPS_DI_FTYPE_DI_SI_SI]
10592 = build_function_type_list (intDI_type_node,
10593 intDI_type_node, intSI_type_node, intSI_type_node,
10596 types[MIPS_V4QI_FTYPE_SI]
10597 = build_function_type_list (V4QI_type_node,
10601 types[MIPS_V2HI_FTYPE_SI]
10602 = build_function_type_list (V2HI_type_node,
10606 types[MIPS_VOID_FTYPE_V4QI_V4QI]
10607 = build_function_type_list (void_type_node,
10608 V4QI_type_node, V4QI_type_node,
10611 types[MIPS_SI_FTYPE_V4QI_V4QI]
10612 = build_function_type_list (intSI_type_node,
10613 V4QI_type_node, V4QI_type_node,
10616 types[MIPS_VOID_FTYPE_V2HI_V2HI]
10617 = build_function_type_list (void_type_node,
10618 V2HI_type_node, V2HI_type_node,
10621 types[MIPS_SI_FTYPE_DI_SI]
10622 = build_function_type_list (intSI_type_node,
10623 intDI_type_node, intSI_type_node,
10626 types[MIPS_DI_FTYPE_DI_SI]
10627 = build_function_type_list (intDI_type_node,
10628 intDI_type_node, intSI_type_node,
10631 types[MIPS_VOID_FTYPE_SI_SI]
10632 = build_function_type_list (void_type_node,
10633 intSI_type_node, intSI_type_node,
10636 types[MIPS_SI_FTYPE_PTR_SI]
10637 = build_function_type_list (intSI_type_node,
10638 ptr_type_node, intSI_type_node,
10641 types[MIPS_SI_FTYPE_VOID]
10642 = build_function_type (intSI_type_node, void_list_node);
10646 types[MIPS_V4QI_FTYPE_V4QI]
10647 = build_function_type_list (V4QI_type_node,
10651 types[MIPS_SI_FTYPE_SI_SI_SI]
10652 = build_function_type_list (intSI_type_node,
10653 intSI_type_node, intSI_type_node,
10654 intSI_type_node, NULL_TREE);
10656 types[MIPS_DI_FTYPE_DI_USI_USI]
10657 = build_function_type_list (intDI_type_node,
10659 unsigned_intSI_type_node,
10660 unsigned_intSI_type_node, NULL_TREE);
10662 types[MIPS_DI_FTYPE_SI_SI]
10663 = build_function_type_list (intDI_type_node,
10664 intSI_type_node, intSI_type_node,
10667 types[MIPS_DI_FTYPE_USI_USI]
10668 = build_function_type_list (intDI_type_node,
10669 unsigned_intSI_type_node,
10670 unsigned_intSI_type_node, NULL_TREE);
10672 types[MIPS_V2HI_FTYPE_SI_SI_SI]
10673 = build_function_type_list (V2HI_type_node,
10674 intSI_type_node, intSI_type_node,
10675 intSI_type_node, NULL_TREE);
10680 /* Iterate through all of the bdesc arrays, initializing all of the
10681 builtin functions. */
10684 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
10686 if ((m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
10687 && (m->unsupported_target_flags & target_flags) == 0)
10688 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
10689 if ((d->target_flags & target_flags) == d->target_flags)
10690 add_builtin_function (d->name, types[d->function_type],
10691 d - m->bdesc + offset,
10692 BUILT_IN_MD, NULL, NULL);
10697 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
10698 .md pattern and CALL is the function expr with arguments. TARGET,
10699 if nonnull, suggests a good place to put the result.
10700 HAS_TARGET indicates the function must return something. */
10703 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
10706 rtx ops[MAX_RECOG_OPERANDS];
10712 /* We save target to ops[0]. */
10713 ops[0] = mips_prepare_builtin_target (icode, 0, target);
10717 /* We need to test if the arglist is not zero. Some instructions have extra
10718 clobber registers. */
10719 for (; i < insn_data[icode].n_operands && i <= call_expr_nargs (exp); i++, j++)
10720 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
10725 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
10729 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
10733 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
10737 gcc_unreachable ();
10742 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
10743 function (TYPE says which). EXP is the tree for the function
10744 function, ICODE is the instruction that should be used to compare
10745 the first two arguments, and COND is the condition it should test.
10746 TARGET, if nonnull, suggests a good place to put the result. */
10749 mips_expand_builtin_movtf (enum mips_builtin_type type,
10750 enum insn_code icode, enum mips_fp_condition cond,
10751 rtx target, tree exp)
10753 rtx cmp_result, op0, op1;
10755 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10756 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
10757 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
10758 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
10760 icode = CODE_FOR_mips_cond_move_tf_ps;
10761 target = mips_prepare_builtin_target (icode, 0, target);
10762 if (type == MIPS_BUILTIN_MOVT)
10764 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
10765 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
10769 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
10770 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
10772 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
10776 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
10777 into TARGET otherwise. Return TARGET. */
10780 mips_builtin_branch_and_move (rtx condition, rtx target,
10781 rtx value_if_true, rtx value_if_false)
10783 rtx true_label, done_label;
10785 true_label = gen_label_rtx ();
10786 done_label = gen_label_rtx ();
10788 /* First assume that CONDITION is false. */
10789 emit_move_insn (target, value_if_false);
10791 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
10792 emit_jump_insn (gen_condjump (condition, true_label));
10793 emit_jump_insn (gen_jump (done_label));
10796 /* Fix TARGET if CONDITION is true. */
10797 emit_label (true_label);
10798 emit_move_insn (target, value_if_true);
10800 emit_label (done_label);
10804 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
10805 of the comparison instruction and COND is the condition it should test.
10806 EXP is the function call and arguments and TARGET, if nonnull,
10807 suggests a good place to put the boolean result. */
10810 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
10811 enum insn_code icode, enum mips_fp_condition cond,
10812 rtx target, tree exp)
10814 rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
10818 if (target == 0 || GET_MODE (target) != SImode)
10819 target = gen_reg_rtx (SImode);
10821 /* Prepare the operands to the comparison. */
10822 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10823 for (i = 1; i < insn_data[icode].n_operands - 1; i++, j++)
10824 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
10826 switch (insn_data[icode].n_operands)
10829 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond)));
10833 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2],
10834 ops[3], ops[4], GEN_INT (cond)));
10838 gcc_unreachable ();
10841 /* If the comparison sets more than one register, we define the result
10842 to be 0 if all registers are false and -1 if all registers are true.
10843 The value of the complete result is indeterminate otherwise. */
10844 switch (builtin_type)
10846 case MIPS_BUILTIN_CMP_ALL:
10847 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
10848 return mips_builtin_branch_and_move (condition, target,
10849 const0_rtx, const1_rtx);
10851 case MIPS_BUILTIN_CMP_UPPER:
10852 case MIPS_BUILTIN_CMP_LOWER:
10853 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
10854 condition = gen_single_cc (cmp_result, offset);
10855 return mips_builtin_branch_and_move (condition, target,
10856 const1_rtx, const0_rtx);
10859 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
10860 return mips_builtin_branch_and_move (condition, target,
10861 const1_rtx, const0_rtx);
10865 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
10866 suggests a good place to put the boolean result. */
10869 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
10871 rtx condition, cmp_result;
10874 if (target == 0 || GET_MODE (target) != SImode)
10875 target = gen_reg_rtx (SImode);
10877 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
10879 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
10884 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
10885 return mips_builtin_branch_and_move (condition, target,
10886 const1_rtx, const0_rtx);
10889 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
10890 FIRST is true if this is the first time handling this decl. */
10893 mips_encode_section_info (tree decl, rtx rtl, int first)
10895 default_encode_section_info (decl, rtl, first);
10897 if (TREE_CODE (decl) == FUNCTION_DECL
10898 && lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
10900 rtx symbol = XEXP (rtl, 0);
10901 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
10905 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
10906 value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
10909 mips_extra_live_on_entry (bitmap regs)
10911 if (TARGET_USE_GOT && !TARGET_ABSOLUTE_ABICALLS)
10912 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
10915 /* SImode values are represented as sign-extended to DImode. */
10918 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
10920 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
10921 return SIGN_EXTEND;
10926 #include "gt-mips.h"