1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
4 Contributed by A. Lichnewsky, lich@inria.inria.fr.
5 Changes by Michael Meissner, meissner@osf.org.
6 64 bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
7 Brendan Eich, brendan@microunity.com.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING. If not, write to
23 the Free Software Foundation, 59 Temple Place - Suite 330,
24 Boston, MA 02111-1307, USA. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
60 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
61 #define UNSPEC_ADDRESS_P(X) \
62 (GET_CODE (X) == UNSPEC \
63 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
64 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
66 /* Extract the symbol or label from UNSPEC wrapper X. */
67 #define UNSPEC_ADDRESS(X) \
70 /* Extract the symbol type from UNSPEC wrapper X. */
71 #define UNSPEC_ADDRESS_TYPE(X) \
72 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
74 /* The maximum distance between the top of the stack frame and the
75 value $sp has when we save & restore registers.
77 Use a maximum gap of 0x100 in the mips16 case. We can then use
78 unextended instructions to save and restore registers, and to
79 allocate and deallocate the top part of the frame.
81 The value in the !mips16 case must be a SMALL_OPERAND and must
82 preserve the maximum stack alignment. */
83 #define MIPS_MAX_FIRST_STACK_STEP (TARGET_MIPS16 ? 0x100 : 0x7ff0)
85 /* True if INSN is a mips.md pattern or asm statement. */
86 #define USEFUL_INSN_P(INSN) \
88 && GET_CODE (PATTERN (INSN)) != USE \
89 && GET_CODE (PATTERN (INSN)) != CLOBBER \
90 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
91 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
93 /* If INSN is a delayed branch sequence, return the first instruction
94 in the sequence, otherwise return INSN itself. */
95 #define SEQ_BEGIN(INSN) \
96 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
97 ? XVECEXP (PATTERN (INSN), 0, 0) \
100 /* Likewise for the last instruction in a delayed branch sequence. */
101 #define SEQ_END(INSN) \
102 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
103 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
106 /* Execute the following loop body with SUBINSN set to each instruction
107 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
108 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
109 for ((SUBINSN) = SEQ_BEGIN (INSN); \
110 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
111 (SUBINSN) = NEXT_INSN (SUBINSN))
113 /* Classifies an address.
116 A natural register + offset address. The register satisfies
117 mips_valid_base_register_p and the offset is a const_arith_operand.
120 A LO_SUM rtx. The first operand is a valid base register and
121 the second operand is a symbolic address.
124 A signed 16-bit constant address.
127 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
128 enum mips_address_type {
135 /* Classifies the prototype of a builtin function. */
136 enum mips_function_type
138 MIPS_V2SF_FTYPE_V2SF,
139 MIPS_V2SF_FTYPE_V2SF_V2SF,
140 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
141 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
142 MIPS_V2SF_FTYPE_SF_SF,
143 MIPS_INT_FTYPE_V2SF_V2SF,
144 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
145 MIPS_INT_FTYPE_SF_SF,
146 MIPS_INT_FTYPE_DF_DF,
157 /* Specifies how a builtin function should be converted into rtl. */
158 enum mips_builtin_type
160 /* The builtin corresponds directly to an .md pattern. The return
161 value is mapped to operand 0 and the arguments are mapped to
162 operands 1 and above. */
165 /* The builtin corresponds to a comparison instruction followed by
166 a mips_cond_move_tf_ps pattern. The first two arguments are the
167 values to compare and the second two arguments are the vector
168 operands for the movt.ps or movf.ps instruction (in assembly order). */
172 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
173 of this instruction is the result of the comparison, which has mode
174 CCV2 or CCV4. The function arguments are mapped to operands 1 and
175 above. The function's return value is an SImode boolean that is
176 true under the following conditions:
178 MIPS_BUILTIN_CMP_ANY: one of the registers is true
179 MIPS_BUILTIN_CMP_ALL: all of the registers are true
180 MIPS_BUILTIN_CMP_LOWER: the first register is true
181 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
182 MIPS_BUILTIN_CMP_ANY,
183 MIPS_BUILTIN_CMP_ALL,
184 MIPS_BUILTIN_CMP_UPPER,
185 MIPS_BUILTIN_CMP_LOWER,
187 /* As above, but the instruction only sets a single $fcc register. */
188 MIPS_BUILTIN_CMP_SINGLE
191 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
192 #define MIPS_FP_CONDITIONS(MACRO) \
210 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
211 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
212 enum mips_fp_condition {
213 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
216 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
217 #define STRINGIFY(X) #X
218 static const char *const mips_fp_conditions[] = {
219 MIPS_FP_CONDITIONS (STRINGIFY)
222 /* A function to save or store a register. The first argument is the
223 register and the second is the stack slot. */
224 typedef void (*mips_save_restore_fn) (rtx, rtx);
226 struct mips16_constant;
227 struct mips_arg_info;
228 struct mips_address_info;
229 struct mips_integer_op;
232 static enum mips_symbol_type mips_classify_symbol (rtx);
233 static void mips_split_const (rtx, rtx *, HOST_WIDE_INT *);
234 static bool mips_offset_within_object_p (rtx, HOST_WIDE_INT);
235 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
236 static bool mips_symbolic_address_p (enum mips_symbol_type, enum machine_mode);
237 static bool mips_classify_address (struct mips_address_info *, rtx,
238 enum machine_mode, int);
239 static int mips_symbol_insns (enum mips_symbol_type);
240 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
241 static rtx mips_force_temporary (rtx, rtx);
242 static rtx mips_split_symbol (rtx, rtx);
243 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
244 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
245 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
246 static unsigned int mips_build_lower (struct mips_integer_op *,
247 unsigned HOST_WIDE_INT);
248 static unsigned int mips_build_integer (struct mips_integer_op *,
249 unsigned HOST_WIDE_INT);
250 static void mips_move_integer (rtx, unsigned HOST_WIDE_INT);
251 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
252 static int m16_check_op (rtx, int, int, int);
253 static bool mips_rtx_costs (rtx, int, int, int *);
254 static int mips_address_cost (rtx);
255 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
256 static void mips_load_call_address (rtx, rtx, int);
257 static bool mips_function_ok_for_sibcall (tree, tree);
258 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
259 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
260 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
261 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
262 tree, int, struct mips_arg_info *);
263 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
264 static void mips_set_architecture (const struct mips_cpu_info *);
265 static void mips_set_tune (const struct mips_cpu_info *);
266 static struct machine_function *mips_init_machine_status (void);
267 static void print_operand_reloc (FILE *, rtx, const char **);
269 static void irix_output_external_libcall (rtx);
271 static void mips_file_start (void);
272 static void mips_file_end (void);
273 static bool mips_rewrite_small_data_p (rtx);
274 static int mips_small_data_pattern_1 (rtx *, void *);
275 static int mips_rewrite_small_data_1 (rtx *, void *);
276 static bool mips_function_has_gp_insn (void);
277 static unsigned int mips_global_pointer (void);
278 static bool mips_save_reg_p (unsigned int);
279 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
280 mips_save_restore_fn);
281 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
282 static void mips_output_cplocal (void);
283 static void mips_emit_loadgp (void);
284 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
285 static void mips_set_frame_expr (rtx);
286 static rtx mips_frame_set (rtx, rtx);
287 static void mips_save_reg (rtx, rtx);
288 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
289 static void mips_restore_reg (rtx, rtx);
290 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
291 HOST_WIDE_INT, tree);
292 static int symbolic_expression_p (rtx);
293 static void mips_select_rtx_section (enum machine_mode, rtx,
294 unsigned HOST_WIDE_INT);
295 static void mips_function_rodata_section (tree);
296 static bool mips_in_small_data_p (tree);
297 static int mips_fpr_return_fields (tree, tree *);
298 static bool mips_return_in_msb (tree);
299 static rtx mips_return_fpr_pair (enum machine_mode mode,
300 enum machine_mode mode1, HOST_WIDE_INT,
301 enum machine_mode mode2, HOST_WIDE_INT);
302 static rtx mips16_gp_pseudo_reg (void);
303 static void mips16_fp_args (FILE *, int, int);
304 static void build_mips16_function_stub (FILE *);
305 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
306 static void dump_constants (struct mips16_constant *, rtx);
307 static int mips16_insn_length (rtx);
308 static int mips16_rewrite_pool_refs (rtx *, void *);
309 static void mips16_lay_out_constants (void);
310 static void mips_sim_reset (struct mips_sim *);
311 static void mips_sim_init (struct mips_sim *, state_t);
312 static void mips_sim_next_cycle (struct mips_sim *);
313 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
314 static int mips_sim_wait_regs_2 (rtx *, void *);
315 static void mips_sim_wait_regs_1 (rtx *, void *);
316 static void mips_sim_wait_regs (struct mips_sim *, rtx);
317 static void mips_sim_wait_units (struct mips_sim *, rtx);
318 static void mips_sim_wait_insn (struct mips_sim *, rtx);
319 static void mips_sim_record_set (rtx, rtx, void *);
320 static void mips_sim_issue_insn (struct mips_sim *, rtx);
321 static void mips_sim_issue_nop (struct mips_sim *);
322 static void mips_sim_finish_insn (struct mips_sim *, rtx);
323 static void vr4130_avoid_branch_rt_conflict (rtx);
324 static void vr4130_align_insns (void);
325 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
326 static void mips_avoid_hazards (void);
327 static void mips_reorg (void);
328 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
329 static bool mips_matching_cpu_name_p (const char *, const char *);
330 static const struct mips_cpu_info *mips_parse_cpu (const char *, const char *);
331 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
332 static bool mips_return_in_memory (tree, tree);
333 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
334 static void mips_macc_chains_record (rtx);
335 static void mips_macc_chains_reorder (rtx *, int);
336 static void vr4130_true_reg_dependence_p_1 (rtx, rtx, void *);
337 static bool vr4130_true_reg_dependence_p (rtx);
338 static bool vr4130_swap_insns_p (rtx, rtx);
339 static void vr4130_reorder (rtx *, int);
340 static void mips_promote_ready (rtx *, int, int);
341 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
342 static int mips_variable_issue (FILE *, int, rtx, int);
343 static int mips_adjust_cost (rtx, rtx, rtx, int);
344 static int mips_issue_rate (void);
345 static int mips_multipass_dfa_lookahead (void);
346 static void mips_init_libfuncs (void);
347 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
349 static tree mips_build_builtin_va_list (void);
350 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
351 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
353 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
355 static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
357 static bool mips_valid_pointer_mode (enum machine_mode);
358 static bool mips_scalar_mode_supported_p (enum machine_mode);
359 static bool mips_vector_mode_supported_p (enum machine_mode);
360 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree *);
361 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
362 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
363 static void mips_init_builtins (void);
364 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree);
365 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
366 enum insn_code, enum mips_fp_condition,
368 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
369 enum insn_code, enum mips_fp_condition,
372 /* Structure to be filled in by compute_frame_size with register
373 save masks, and offsets for the current function. */
375 struct mips_frame_info GTY(())
377 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
378 HOST_WIDE_INT var_size; /* # bytes that variables take up */
379 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
380 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
381 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
382 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
383 unsigned int mask; /* mask of saved gp registers */
384 unsigned int fmask; /* mask of saved fp registers */
385 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
386 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
387 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
388 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
389 bool initialized; /* true if frame size already calculated */
390 int num_gp; /* number of gp registers saved */
391 int num_fp; /* number of fp registers saved */
394 struct machine_function GTY(()) {
395 /* Pseudo-reg holding the value of $28 in a mips16 function which
396 refers to GP relative global variables. */
397 rtx mips16_gp_pseudo_rtx;
399 /* Current frame information, calculated by compute_frame_size. */
400 struct mips_frame_info frame;
402 /* The register to use as the global pointer within this function. */
403 unsigned int global_pointer;
405 /* True if mips_adjust_insn_length should ignore an instruction's
407 bool ignore_hazard_length_p;
409 /* True if the whole function is suitable for .set noreorder and
411 bool all_noreorder_p;
413 /* True if the function is known to have an instruction that needs $gp. */
417 /* Information about a single argument. */
420 /* True if the argument is passed in a floating-point register, or
421 would have been if we hadn't run out of registers. */
424 /* The number of words passed in registers, rounded up. */
425 unsigned int reg_words;
427 /* For EABI, the offset of the first register from GP_ARG_FIRST or
428 FP_ARG_FIRST. For other ABIs, the offset of the first register from
429 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
430 comment for details).
432 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
434 unsigned int reg_offset;
436 /* The number of words that must be passed on the stack, rounded up. */
437 unsigned int stack_words;
439 /* The offset from the start of the stack overflow area of the argument's
440 first stack word. Only meaningful when STACK_WORDS is nonzero. */
441 unsigned int stack_offset;
445 /* Information about an address described by mips_address_type.
451 REG is the base register and OFFSET is the constant offset.
454 REG is the register that contains the high part of the address,
455 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
456 is the type of OFFSET's symbol.
459 SYMBOL_TYPE is the type of symbol being referenced. */
461 struct mips_address_info
463 enum mips_address_type type;
466 enum mips_symbol_type symbol_type;
470 /* One stage in a constant building sequence. These sequences have
474 A = A CODE[1] VALUE[1]
475 A = A CODE[2] VALUE[2]
478 where A is an accumulator, each CODE[i] is a binary rtl operation
479 and each VALUE[i] is a constant integer. */
480 struct mips_integer_op {
482 unsigned HOST_WIDE_INT value;
486 /* The largest number of operations needed to load an integer constant.
487 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
488 When the lowest bit is clear, we can try, but reject a sequence with
489 an extra SLL at the end. */
490 #define MIPS_MAX_INTEGER_OPS 7
493 /* Global variables for machine-dependent things. */
495 /* Threshold for data being put into the small data/bss area, instead
496 of the normal data area. */
497 int mips_section_threshold = -1;
499 /* Count the number of .file directives, so that .loc is up to date. */
500 int num_source_filenames = 0;
502 /* Count the number of sdb related labels are generated (to find block
503 start and end boundaries). */
504 int sdb_label_count = 0;
506 /* Next label # for each statement for Silicon Graphics IRIS systems. */
509 /* Linked list of all externals that are to be emitted when optimizing
510 for the global pointer if they haven't been declared by the end of
511 the program with an appropriate .comm or initialization. */
513 struct extern_list GTY (())
515 struct extern_list *next; /* next external */
516 const char *name; /* name of the external */
517 int size; /* size in bytes */
520 static GTY (()) struct extern_list *extern_head = 0;
522 /* Name of the file containing the current function. */
523 const char *current_function_file = "";
525 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
531 /* The next branch instruction is a branch likely, not branch normal. */
532 int mips_branch_likely;
534 /* The operands passed to the last cmpMM expander. */
537 /* The target cpu for code generation. */
538 enum processor_type mips_arch;
539 const struct mips_cpu_info *mips_arch_info;
541 /* The target cpu for optimization and scheduling. */
542 enum processor_type mips_tune;
543 const struct mips_cpu_info *mips_tune_info;
545 /* Which instruction set architecture to use. */
548 /* Which ABI to use. */
551 /* Strings to hold which cpu and instruction set architecture to use. */
552 const char *mips_arch_string; /* for -march=<xxx> */
553 const char *mips_tune_string; /* for -mtune=<xxx> */
554 const char *mips_isa_string; /* for -mips{1,2,3,4} */
555 const char *mips_abi_string; /* for -mabi={32,n32,64,eabi} */
557 /* Whether we are generating mips16 hard float code. In mips16 mode
558 we always set TARGET_SOFT_FLOAT; this variable is nonzero if
559 -msoft-float was not specified by the user, which means that we
560 should arrange to call mips32 hard floating point code. */
561 int mips16_hard_float;
563 const char *mips_cache_flush_func = CACHE_FLUSH_FUNC;
565 /* If TRUE, we split addresses into their high and low parts in the RTL. */
566 int mips_split_addresses;
568 /* Mode used for saving/restoring general purpose registers. */
569 static enum machine_mode gpr_mode;
571 /* Array giving truth value on whether or not a given hard register
572 can support a given mode. */
573 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
575 /* List of all MIPS punctuation characters used by print_operand. */
576 char mips_print_operand_punct[256];
578 /* Map GCC register number to debugger register number. */
579 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
581 /* A copy of the original flag_delayed_branch: see override_options. */
582 static int mips_flag_delayed_branch;
584 static GTY (()) int mips_output_filename_first_time = 1;
586 /* mips_split_p[X] is true if symbols of type X can be split by
587 mips_split_symbol(). */
588 static bool mips_split_p[NUM_SYMBOL_TYPES];
590 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
591 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
592 if they are matched by a special .md file pattern. */
593 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
595 /* Likewise for HIGHs. */
596 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
598 /* Map hard register number to register class */
599 const enum reg_class mips_regno_to_class[] =
601 LEA_REGS, LEA_REGS, M16_NA_REGS, M16_NA_REGS,
602 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
603 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
604 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
605 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
606 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
607 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
608 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
609 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
610 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
611 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
612 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
613 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
614 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
615 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
616 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
617 HI_REG, LO_REG, NO_REGS, ST_REGS,
618 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
619 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
620 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
621 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
622 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
623 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
624 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
625 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
626 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
627 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
628 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
629 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
630 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
631 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
632 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
633 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
634 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
635 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
636 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
637 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
638 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
639 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
640 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
641 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
642 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
643 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
644 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS
647 /* Map register constraint character to register class. */
648 enum reg_class mips_char_to_class[256];
650 /* A table describing all the processors gcc knows about. Names are
651 matched in the order listed. The first mention of an ISA level is
652 taken as the canonical name for that ISA.
654 To ease comparison, please keep this table in the same order as
655 gas's mips_cpu_info_table[]. */
656 const struct mips_cpu_info mips_cpu_info_table[] = {
657 /* Entries for generic ISAs */
658 { "mips1", PROCESSOR_R3000, 1 },
659 { "mips2", PROCESSOR_R6000, 2 },
660 { "mips3", PROCESSOR_R4000, 3 },
661 { "mips4", PROCESSOR_R8000, 4 },
662 { "mips32", PROCESSOR_4KC, 32 },
663 { "mips32r2", PROCESSOR_M4K, 33 },
664 { "mips64", PROCESSOR_5KC, 64 },
667 { "r3000", PROCESSOR_R3000, 1 },
668 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
669 { "r3900", PROCESSOR_R3900, 1 },
672 { "r6000", PROCESSOR_R6000, 2 },
675 { "r4000", PROCESSOR_R4000, 3 },
676 { "vr4100", PROCESSOR_R4100, 3 },
677 { "vr4111", PROCESSOR_R4111, 3 },
678 { "vr4120", PROCESSOR_R4120, 3 },
679 { "vr4130", PROCESSOR_R4130, 3 },
680 { "vr4300", PROCESSOR_R4300, 3 },
681 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
682 { "r4600", PROCESSOR_R4600, 3 },
683 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
684 { "r4650", PROCESSOR_R4650, 3 },
687 { "r8000", PROCESSOR_R8000, 4 },
688 { "vr5000", PROCESSOR_R5000, 4 },
689 { "vr5400", PROCESSOR_R5400, 4 },
690 { "vr5500", PROCESSOR_R5500, 4 },
691 { "rm7000", PROCESSOR_R7000, 4 },
692 { "rm9000", PROCESSOR_R9000, 4 },
695 { "4kc", PROCESSOR_4KC, 32 },
696 { "4kp", PROCESSOR_4KC, 32 }, /* = 4kc */
698 /* MIPS32 Release 2 */
699 { "m4k", PROCESSOR_M4K, 33 },
702 { "5kc", PROCESSOR_5KC, 64 },
703 { "20kc", PROCESSOR_20KC, 64 },
704 { "sb1", PROCESSOR_SB1, 64 },
705 { "sr71000", PROCESSOR_SR71000, 64 },
711 /* Nonzero if -march should decide the default value of MASK_SOFT_FLOAT. */
712 #ifndef MIPS_MARCH_CONTROLS_SOFT_FLOAT
713 #define MIPS_MARCH_CONTROLS_SOFT_FLOAT 0
716 /* Initialize the GCC target structure. */
717 #undef TARGET_ASM_ALIGNED_HI_OP
718 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
719 #undef TARGET_ASM_ALIGNED_SI_OP
720 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
721 #undef TARGET_ASM_ALIGNED_DI_OP
722 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
724 #undef TARGET_ASM_FUNCTION_PROLOGUE
725 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
726 #undef TARGET_ASM_FUNCTION_EPILOGUE
727 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
728 #undef TARGET_ASM_SELECT_RTX_SECTION
729 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
730 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
731 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
733 #undef TARGET_SCHED_REORDER
734 #define TARGET_SCHED_REORDER mips_sched_reorder
735 #undef TARGET_SCHED_VARIABLE_ISSUE
736 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
737 #undef TARGET_SCHED_ADJUST_COST
738 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
739 #undef TARGET_SCHED_ISSUE_RATE
740 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
741 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
742 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
743 mips_multipass_dfa_lookahead
745 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
746 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
748 #undef TARGET_VALID_POINTER_MODE
749 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
750 #undef TARGET_RTX_COSTS
751 #define TARGET_RTX_COSTS mips_rtx_costs
752 #undef TARGET_ADDRESS_COST
753 #define TARGET_ADDRESS_COST mips_address_cost
755 #undef TARGET_IN_SMALL_DATA_P
756 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
758 #undef TARGET_MACHINE_DEPENDENT_REORG
759 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
761 #undef TARGET_ASM_FILE_START
762 #undef TARGET_ASM_FILE_END
763 #define TARGET_ASM_FILE_START mips_file_start
764 #define TARGET_ASM_FILE_END mips_file_end
765 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
766 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
768 #undef TARGET_INIT_LIBFUNCS
769 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
771 #undef TARGET_BUILD_BUILTIN_VA_LIST
772 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
773 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
774 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
776 #undef TARGET_PROMOTE_FUNCTION_ARGS
777 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
778 #undef TARGET_PROMOTE_FUNCTION_RETURN
779 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
780 #undef TARGET_PROMOTE_PROTOTYPES
781 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
783 #undef TARGET_RETURN_IN_MEMORY
784 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
785 #undef TARGET_RETURN_IN_MSB
786 #define TARGET_RETURN_IN_MSB mips_return_in_msb
788 #undef TARGET_ASM_OUTPUT_MI_THUNK
789 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
790 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
791 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
793 #undef TARGET_SETUP_INCOMING_VARARGS
794 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
795 #undef TARGET_STRICT_ARGUMENT_NAMING
796 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
797 #undef TARGET_MUST_PASS_IN_STACK
798 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
799 #undef TARGET_PASS_BY_REFERENCE
800 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
801 #undef TARGET_CALLEE_COPIES
802 #define TARGET_CALLEE_COPIES mips_callee_copies
803 #undef TARGET_ARG_PARTIAL_BYTES
804 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
806 #undef TARGET_VECTOR_MODE_SUPPORTED_P
807 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
809 #undef TARGET_SCALAR_MODE_SUPPORTED_P
810 #define TARGET_SCALAR_MODE_SUPPORTED_P mips_scalar_mode_supported_p
812 #undef TARGET_INIT_BUILTINS
813 #define TARGET_INIT_BUILTINS mips_init_builtins
814 #undef TARGET_EXPAND_BUILTIN
815 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
817 struct gcc_target targetm = TARGET_INITIALIZER;
819 /* Classify symbol X, which must be a SYMBOL_REF or a LABEL_REF. */
821 static enum mips_symbol_type
822 mips_classify_symbol (rtx x)
824 if (GET_CODE (x) == LABEL_REF)
827 return SYMBOL_CONSTANT_POOL;
829 return SYMBOL_GOT_LOCAL;
830 return SYMBOL_GENERAL;
833 gcc_assert (GET_CODE (x) == SYMBOL_REF);
835 if (CONSTANT_POOL_ADDRESS_P (x))
838 return SYMBOL_CONSTANT_POOL;
841 return SYMBOL_GOT_LOCAL;
843 if (GET_MODE_SIZE (get_pool_mode (x)) <= mips_section_threshold)
844 return SYMBOL_SMALL_DATA;
846 return SYMBOL_GENERAL;
849 if (SYMBOL_REF_SMALL_P (x))
850 return SYMBOL_SMALL_DATA;
854 if (SYMBOL_REF_DECL (x) == 0)
855 return SYMBOL_REF_LOCAL_P (x) ? SYMBOL_GOT_LOCAL : SYMBOL_GOT_GLOBAL;
857 /* There are three cases to consider:
859 - o32 PIC (either with or without explicit relocs)
860 - n32/n64 PIC without explicit relocs
861 - n32/n64 PIC with explicit relocs
863 In the first case, both local and global accesses will use an
864 R_MIPS_GOT16 relocation. We must correctly predict which of
865 the two semantics (local or global) the assembler and linker
866 will apply. The choice doesn't depend on the symbol's
867 visibility, so we deliberately ignore decl_visibility and
870 In the second case, the assembler will not use R_MIPS_GOT16
871 relocations, but it chooses between local and global accesses
872 in the same way as for o32 PIC.
874 In the third case we have more freedom since both forms of
875 access will work for any kind of symbol. However, there seems
876 little point in doing things differently. */
877 if (DECL_P (SYMBOL_REF_DECL (x)) && TREE_PUBLIC (SYMBOL_REF_DECL (x)))
878 return SYMBOL_GOT_GLOBAL;
880 return SYMBOL_GOT_LOCAL;
883 return SYMBOL_GENERAL;
887 /* Split X into a base and a constant offset, storing them in *BASE
888 and *OFFSET respectively. */
891 mips_split_const (rtx x, rtx *base, HOST_WIDE_INT *offset)
895 if (GET_CODE (x) == CONST)
898 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
900 *offset += INTVAL (XEXP (x, 1));
907 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
908 to the same object as SYMBOL. */
911 mips_offset_within_object_p (rtx symbol, HOST_WIDE_INT offset)
913 if (GET_CODE (symbol) != SYMBOL_REF)
916 if (CONSTANT_POOL_ADDRESS_P (symbol)
918 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
921 if (SYMBOL_REF_DECL (symbol) != 0
923 && offset < int_size_in_bytes (TREE_TYPE (SYMBOL_REF_DECL (symbol))))
930 /* Return true if X is a symbolic constant that can be calculated in
931 the same way as a bare symbol. If it is, store the type of the
932 symbol in *SYMBOL_TYPE. */
935 mips_symbolic_constant_p (rtx x, enum mips_symbol_type *symbol_type)
937 HOST_WIDE_INT offset;
939 mips_split_const (x, &x, &offset);
940 if (UNSPEC_ADDRESS_P (x))
941 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
942 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
943 *symbol_type = mips_classify_symbol (x);
950 /* Check whether a nonzero offset is valid for the underlying
952 switch (*symbol_type)
958 /* If the target has 64-bit pointers and the object file only
959 supports 32-bit symbols, the values of those symbols will be
960 sign-extended. In this case we can't allow an arbitrary offset
961 in case the 32-bit value X + OFFSET has a different sign from X. */
962 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
963 return mips_offset_within_object_p (x, offset);
965 /* In other cases the relocations can handle any offset. */
968 case SYMBOL_CONSTANT_POOL:
969 /* Allow constant pool references to be converted to LABEL+CONSTANT.
970 In this case, we no longer have access to the underlying constant,
971 but the original symbol-based access was known to be valid. */
972 if (GET_CODE (x) == LABEL_REF)
977 case SYMBOL_SMALL_DATA:
978 /* Make sure that the offset refers to something within the
979 underlying object. This should guarantee that the final
980 PC- or GP-relative offset is within the 16-bit limit. */
981 return mips_offset_within_object_p (x, offset);
983 case SYMBOL_GOT_LOCAL:
984 case SYMBOL_GOTOFF_PAGE:
985 /* The linker should provide enough local GOT entries for a
986 16-bit offset. Larger offsets may lead to GOT overflow. */
987 return SMALL_OPERAND (offset);
989 case SYMBOL_GOT_GLOBAL:
990 case SYMBOL_GOTOFF_GLOBAL:
991 case SYMBOL_GOTOFF_CALL:
992 case SYMBOL_GOTOFF_LOADGP:
999 /* Return true if X is a symbolic constant whose value is not split
1000 into separate relocations. */
1003 mips_atomic_symbolic_constant_p (rtx x)
1005 enum mips_symbol_type type;
1006 return mips_symbolic_constant_p (x, &type) && !mips_split_p[type];
1010 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1013 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1015 if (regno >= FIRST_PSEUDO_REGISTER)
1019 regno = reg_renumber[regno];
1022 /* These fake registers will be eliminated to either the stack or
1023 hard frame pointer, both of which are usually valid base registers.
1024 Reload deals with the cases where the eliminated form isn't valid. */
1025 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1028 /* In mips16 mode, the stack pointer can only address word and doubleword
1029 values, nothing smaller. There are two problems here:
1031 (a) Instantiating virtual registers can introduce new uses of the
1032 stack pointer. If these virtual registers are valid addresses,
1033 the stack pointer should be too.
1035 (b) Most uses of the stack pointer are not made explicit until
1036 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1037 We don't know until that stage whether we'll be eliminating to the
1038 stack pointer (which needs the restriction) or the hard frame
1039 pointer (which doesn't).
1041 All in all, it seems more consistent to only enforce this restriction
1042 during and after reload. */
1043 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1044 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1046 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1050 /* Return true if X is a valid base register for the given mode.
1051 Allow only hard registers if STRICT. */
1054 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1056 if (!strict && GET_CODE (x) == SUBREG)
1060 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1064 /* Return true if symbols of type SYMBOL_TYPE can directly address a value
1065 with mode MODE. This is used for both symbolic and LO_SUM addresses. */
1068 mips_symbolic_address_p (enum mips_symbol_type symbol_type,
1069 enum machine_mode mode)
1071 switch (symbol_type)
1073 case SYMBOL_GENERAL:
1074 return !TARGET_MIPS16;
1076 case SYMBOL_SMALL_DATA:
1079 case SYMBOL_CONSTANT_POOL:
1080 /* PC-relative addressing is only available for lw and ld. */
1081 return GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1083 case SYMBOL_GOT_LOCAL:
1086 case SYMBOL_GOT_GLOBAL:
1087 /* The address will have to be loaded from the GOT first. */
1090 case SYMBOL_GOTOFF_PAGE:
1091 case SYMBOL_GOTOFF_GLOBAL:
1092 case SYMBOL_GOTOFF_CALL:
1093 case SYMBOL_GOTOFF_LOADGP:
1094 case SYMBOL_64_HIGH:
1103 /* Return true if X is a valid address for machine mode MODE. If it is,
1104 fill in INFO appropriately. STRICT is true if we should only accept
1105 hard base registers. */
1108 mips_classify_address (struct mips_address_info *info, rtx x,
1109 enum machine_mode mode, int strict)
1111 switch (GET_CODE (x))
1115 info->type = ADDRESS_REG;
1117 info->offset = const0_rtx;
1118 return mips_valid_base_register_p (info->reg, mode, strict);
1121 info->type = ADDRESS_REG;
1122 info->reg = XEXP (x, 0);
1123 info->offset = XEXP (x, 1);
1124 return (mips_valid_base_register_p (info->reg, mode, strict)
1125 && const_arith_operand (info->offset, VOIDmode));
1128 info->type = ADDRESS_LO_SUM;
1129 info->reg = XEXP (x, 0);
1130 info->offset = XEXP (x, 1);
1131 return (mips_valid_base_register_p (info->reg, mode, strict)
1132 && mips_symbolic_constant_p (info->offset, &info->symbol_type)
1133 && mips_symbolic_address_p (info->symbol_type, mode)
1134 && mips_lo_relocs[info->symbol_type] != 0);
1137 /* Small-integer addresses don't occur very often, but they
1138 are legitimate if $0 is a valid base register. */
1139 info->type = ADDRESS_CONST_INT;
1140 return !TARGET_MIPS16 && SMALL_INT (x);
1145 info->type = ADDRESS_SYMBOLIC;
1146 return (mips_symbolic_constant_p (x, &info->symbol_type)
1147 && mips_symbolic_address_p (info->symbol_type, mode)
1148 && !mips_split_p[info->symbol_type]);
1155 /* Return the number of instructions needed to load a symbol of the
1156 given type into a register. If valid in an address, the same number
1157 of instructions are needed for loads and stores. Treat extended
1158 mips16 instructions as two instructions. */
1161 mips_symbol_insns (enum mips_symbol_type type)
1165 case SYMBOL_GENERAL:
1166 /* In mips16 code, general symbols must be fetched from the
1171 /* When using 64-bit symbols, we need 5 preparatory instructions,
1174 lui $at,%highest(symbol)
1175 daddiu $at,$at,%higher(symbol)
1177 daddiu $at,$at,%hi(symbol)
1180 The final address is then $at + %lo(symbol). With 32-bit
1181 symbols we just need a preparatory lui. */
1182 return (ABI_HAS_64BIT_SYMBOLS ? 6 : 2);
1184 case SYMBOL_SMALL_DATA:
1187 case SYMBOL_CONSTANT_POOL:
1188 /* This case is for mips16 only. Assume we'll need an
1189 extended instruction. */
1192 case SYMBOL_GOT_LOCAL:
1193 case SYMBOL_GOT_GLOBAL:
1194 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1195 the local/global classification is accurate. See override_options
1198 The worst cases are:
1200 (1) For local symbols when generating o32 or o64 code. The assembler
1206 ...and the final address will be $at + %lo(symbol).
1208 (2) For global symbols when -mxgot. The assembler will use:
1210 lui $at,%got_hi(symbol)
1213 ...and the final address will be $at + %got_lo(symbol). */
1216 case SYMBOL_GOTOFF_PAGE:
1217 case SYMBOL_GOTOFF_GLOBAL:
1218 case SYMBOL_GOTOFF_CALL:
1219 case SYMBOL_GOTOFF_LOADGP:
1220 case SYMBOL_64_HIGH:
1223 /* Check whether the offset is a 16- or 32-bit value. */
1224 return mips_split_p[type] ? 2 : 1;
1229 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1232 mips_stack_address_p (rtx x, enum machine_mode mode)
1234 struct mips_address_info addr;
1236 return (mips_classify_address (&addr, x, mode, false)
1237 && addr.type == ADDRESS_REG
1238 && addr.reg == stack_pointer_rtx);
1241 /* Return true if a value at OFFSET bytes from BASE can be accessed
1242 using an unextended mips16 instruction. MODE is the mode of the
1245 Usually the offset in an unextended instruction is a 5-bit field.
1246 The offset is unsigned and shifted left once for HIs, twice
1247 for SIs, and so on. An exception is SImode accesses off the
1248 stack pointer, which have an 8-bit immediate field. */
1251 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
1254 && GET_CODE (offset) == CONST_INT
1255 && INTVAL (offset) >= 0
1256 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
1258 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1259 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
1260 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
1266 /* Return the number of instructions needed to load or store a value
1267 of mode MODE at X. Return 0 if X isn't valid for MODE.
1269 For mips16 code, count extended instructions as two instructions. */
1272 mips_address_insns (rtx x, enum machine_mode mode)
1274 struct mips_address_info addr;
1277 if (mode == BLKmode)
1278 /* BLKmode is used for single unaligned loads and stores. */
1281 /* Each word of a multi-word value will be accessed individually. */
1282 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1284 if (mips_classify_address (&addr, x, mode, false))
1289 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
1293 case ADDRESS_LO_SUM:
1294 return (TARGET_MIPS16 ? factor * 2 : factor);
1296 case ADDRESS_CONST_INT:
1299 case ADDRESS_SYMBOLIC:
1300 return factor * mips_symbol_insns (addr.symbol_type);
1306 /* Likewise for constant X. */
1309 mips_const_insns (rtx x)
1311 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
1312 enum mips_symbol_type symbol_type;
1313 HOST_WIDE_INT offset;
1315 switch (GET_CODE (x))
1319 || !mips_symbolic_constant_p (XEXP (x, 0), &symbol_type)
1320 || !mips_split_p[symbol_type])
1327 /* Unsigned 8-bit constants can be loaded using an unextended
1328 LI instruction. Unsigned 16-bit constants can be loaded
1329 using an extended LI. Negative constants must be loaded
1330 using LI and then negated. */
1331 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
1332 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
1333 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
1334 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
1337 return mips_build_integer (codes, INTVAL (x));
1341 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
1347 /* See if we can refer to X directly. */
1348 if (mips_symbolic_constant_p (x, &symbol_type))
1349 return mips_symbol_insns (symbol_type);
1351 /* Otherwise try splitting the constant into a base and offset.
1352 16-bit offsets can be added using an extra addiu. Larger offsets
1353 must be calculated separately and then added to the base. */
1354 mips_split_const (x, &x, &offset);
1357 int n = mips_const_insns (x);
1360 if (SMALL_OPERAND (offset))
1363 return n + 1 + mips_build_integer (codes, offset);
1370 return mips_symbol_insns (mips_classify_symbol (x));
1378 /* Return the number of instructions needed for memory reference X.
1379 Count extended mips16 instructions as two instructions. */
1382 mips_fetch_insns (rtx x)
1384 gcc_assert (MEM_P (x));
1385 return mips_address_insns (XEXP (x, 0), GET_MODE (x));
1389 /* Return the number of instructions needed for an integer division. */
1392 mips_idiv_insns (void)
1397 if (TARGET_CHECK_ZERO_DIV)
1399 if (GENERATE_DIVIDE_TRAPS)
1405 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
1410 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
1411 returns a nonzero value if X is a legitimate address for a memory
1412 operand of the indicated MODE. STRICT is nonzero if this function
1413 is called during reload. */
1416 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1418 struct mips_address_info addr;
1420 return mips_classify_address (&addr, x, mode, strict);
1424 /* Copy VALUE to a register and return that register. If new psuedos
1425 are allowed, copy it into a new register, otherwise use DEST. */
1428 mips_force_temporary (rtx dest, rtx value)
1430 if (!no_new_pseudos)
1431 return force_reg (Pmode, value);
1434 emit_move_insn (copy_rtx (dest), value);
1440 /* Return a LO_SUM expression for ADDR. TEMP is as for mips_force_temporary
1441 and is used to load the high part into a register. */
1444 mips_split_symbol (rtx temp, rtx addr)
1449 high = mips16_gp_pseudo_reg ();
1451 high = mips_force_temporary (temp, gen_rtx_HIGH (Pmode, copy_rtx (addr)));
1452 return gen_rtx_LO_SUM (Pmode, high, addr);
1456 /* Return an UNSPEC address with underlying address ADDRESS and symbol
1457 type SYMBOL_TYPE. */
1460 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
1463 HOST_WIDE_INT offset;
1465 mips_split_const (address, &base, &offset);
1466 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
1467 UNSPEC_ADDRESS_FIRST + symbol_type);
1468 return plus_constant (gen_rtx_CONST (Pmode, base), offset);
1472 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
1473 high part to BASE and return the result. Just return BASE otherwise.
1474 TEMP is available as a temporary register if needed.
1476 The returned expression can be used as the first operand to a LO_SUM. */
1479 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
1480 enum mips_symbol_type symbol_type)
1482 if (mips_split_p[symbol_type])
1484 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
1485 addr = mips_force_temporary (temp, addr);
1486 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
1492 /* Return a legitimate address for REG + OFFSET. TEMP is as for
1493 mips_force_temporary; it is only needed when OFFSET is not a
1497 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
1499 if (!SMALL_OPERAND (offset))
1504 /* Load the full offset into a register so that we can use
1505 an unextended instruction for the address itself. */
1506 high = GEN_INT (offset);
1511 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
1512 high = GEN_INT (CONST_HIGH_PART (offset));
1513 offset = CONST_LOW_PART (offset);
1515 high = mips_force_temporary (temp, high);
1516 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
1518 return plus_constant (reg, offset);
1522 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
1523 be legitimized in a way that the generic machinery might not expect,
1524 put the new address in *XLOC and return true. MODE is the mode of
1525 the memory being accessed. */
1528 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
1530 enum mips_symbol_type symbol_type;
1532 /* See if the address can split into a high part and a LO_SUM. */
1533 if (mips_symbolic_constant_p (*xloc, &symbol_type)
1534 && mips_symbolic_address_p (symbol_type, mode)
1535 && mips_split_p[symbol_type])
1537 *xloc = mips_split_symbol (0, *xloc);
1541 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
1543 /* Handle REG + CONSTANT using mips_add_offset. */
1546 reg = XEXP (*xloc, 0);
1547 if (!mips_valid_base_register_p (reg, mode, 0))
1548 reg = copy_to_mode_reg (Pmode, reg);
1549 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
1557 /* Subroutine of mips_build_integer (with the same interface).
1558 Assume that the final action in the sequence should be a left shift. */
1561 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
1563 unsigned int i, shift;
1565 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
1566 since signed numbers are easier to load than unsigned ones. */
1568 while ((value & 1) == 0)
1569 value /= 2, shift++;
1571 i = mips_build_integer (codes, value);
1572 codes[i].code = ASHIFT;
1573 codes[i].value = shift;
1578 /* As for mips_build_shift, but assume that the final action will be
1579 an IOR or PLUS operation. */
1582 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
1584 unsigned HOST_WIDE_INT high;
1587 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
1588 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
1590 /* The constant is too complex to load with a simple lui/ori pair
1591 so our goal is to clear as many trailing zeros as possible.
1592 In this case, we know bit 16 is set and that the low 16 bits
1593 form a negative number. If we subtract that number from VALUE,
1594 we will clear at least the lowest 17 bits, maybe more. */
1595 i = mips_build_integer (codes, CONST_HIGH_PART (value));
1596 codes[i].code = PLUS;
1597 codes[i].value = CONST_LOW_PART (value);
1601 i = mips_build_integer (codes, high);
1602 codes[i].code = IOR;
1603 codes[i].value = value & 0xffff;
1609 /* Fill CODES with a sequence of rtl operations to load VALUE.
1610 Return the number of operations needed. */
1613 mips_build_integer (struct mips_integer_op *codes,
1614 unsigned HOST_WIDE_INT value)
1616 if (SMALL_OPERAND (value)
1617 || SMALL_OPERAND_UNSIGNED (value)
1618 || LUI_OPERAND (value))
1620 /* The value can be loaded with a single instruction. */
1621 codes[0].code = UNKNOWN;
1622 codes[0].value = value;
1625 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
1627 /* Either the constant is a simple LUI/ORI combination or its
1628 lowest bit is set. We don't want to shift in this case. */
1629 return mips_build_lower (codes, value);
1631 else if ((value & 0xffff) == 0)
1633 /* The constant will need at least three actions. The lowest
1634 16 bits are clear, so the final action will be a shift. */
1635 return mips_build_shift (codes, value);
1639 /* The final action could be a shift, add or inclusive OR.
1640 Rather than use a complex condition to select the best
1641 approach, try both mips_build_shift and mips_build_lower
1642 and pick the one that gives the shortest sequence.
1643 Note that this case is only used once per constant. */
1644 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
1645 unsigned int cost, alt_cost;
1647 cost = mips_build_shift (codes, value);
1648 alt_cost = mips_build_lower (alt_codes, value);
1649 if (alt_cost < cost)
1651 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
1659 /* Move VALUE into register DEST. */
1662 mips_move_integer (rtx dest, unsigned HOST_WIDE_INT value)
1664 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
1665 enum machine_mode mode;
1666 unsigned int i, cost;
1669 mode = GET_MODE (dest);
1670 cost = mips_build_integer (codes, value);
1672 /* Apply each binary operation to X. Invariant: X is a legitimate
1673 source operand for a SET pattern. */
1674 x = GEN_INT (codes[0].value);
1675 for (i = 1; i < cost; i++)
1678 emit_move_insn (dest, x), x = dest;
1680 x = force_reg (mode, x);
1681 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
1684 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1688 /* Subroutine of mips_legitimize_move. Move constant SRC into register
1689 DEST given that SRC satisfies immediate_operand but doesn't satisfy
1693 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
1696 HOST_WIDE_INT offset;
1697 enum mips_symbol_type symbol_type;
1699 /* Split moves of big integers into smaller pieces. In mips16 code,
1700 it's better to force the constant into memory instead. */
1701 if (GET_CODE (src) == CONST_INT && !TARGET_MIPS16)
1703 mips_move_integer (dest, INTVAL (src));
1707 /* See if the symbol can be split. For mips16, this is often worse than
1708 forcing it in the constant pool since it needs the single-register form
1709 of addiu or daddiu. */
1711 && mips_symbolic_constant_p (src, &symbol_type)
1712 && mips_split_p[symbol_type])
1714 emit_move_insn (dest, mips_split_symbol (dest, src));
1718 /* If we have (const (plus symbol offset)), load the symbol first
1719 and then add in the offset. This is usually better than forcing
1720 the constant into memory, at least in non-mips16 code. */
1721 mips_split_const (src, &base, &offset);
1724 && (!no_new_pseudos || SMALL_OPERAND (offset)))
1726 base = mips_force_temporary (dest, base);
1727 emit_move_insn (dest, mips_add_offset (0, base, offset));
1731 src = force_const_mem (mode, src);
1733 /* When using explicit relocs, constant pool references are sometimes
1734 not legitimate addresses. */
1735 if (!memory_operand (src, VOIDmode))
1736 src = replace_equiv_address (src, mips_split_symbol (dest, XEXP (src, 0)));
1737 emit_move_insn (dest, src);
1741 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
1742 sequence that is valid. */
1745 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
1747 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
1749 emit_move_insn (dest, force_reg (mode, src));
1753 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
1754 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
1755 && REG_P (src) && MD_REG_P (REGNO (src))
1756 && REG_P (dest) && GP_REG_P (REGNO (dest)))
1758 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
1759 if (GET_MODE_SIZE (mode) <= 4)
1760 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
1761 gen_rtx_REG (SImode, REGNO (src)),
1762 gen_rtx_REG (SImode, other_regno)));
1764 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
1765 gen_rtx_REG (DImode, REGNO (src)),
1766 gen_rtx_REG (DImode, other_regno)));
1770 /* We need to deal with constants that would be legitimate
1771 immediate_operands but not legitimate move_operands. */
1772 if (CONSTANT_P (src) && !move_operand (src, mode))
1774 mips_legitimize_const_move (mode, dest, src);
1775 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
1781 /* We need a lot of little routines to check constant values on the
1782 mips16. These are used to figure out how long the instruction will
1783 be. It would be much better to do this using constraints, but
1784 there aren't nearly enough letters available. */
1787 m16_check_op (rtx op, int low, int high, int mask)
1789 return (GET_CODE (op) == CONST_INT
1790 && INTVAL (op) >= low
1791 && INTVAL (op) <= high
1792 && (INTVAL (op) & mask) == 0);
1796 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1798 return m16_check_op (op, 0x1, 0x8, 0);
1802 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1804 return m16_check_op (op, - 0x8, 0x7, 0);
1808 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1810 return m16_check_op (op, - 0x7, 0x8, 0);
1814 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1816 return m16_check_op (op, - 0x10, 0xf, 0);
1820 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1822 return m16_check_op (op, - 0xf, 0x10, 0);
1826 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1828 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
1832 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1834 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
1838 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1840 return m16_check_op (op, - 0x80, 0x7f, 0);
1844 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1846 return m16_check_op (op, - 0x7f, 0x80, 0);
1850 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1852 return m16_check_op (op, 0x0, 0xff, 0);
1856 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1858 return m16_check_op (op, - 0xff, 0x0, 0);
1862 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1864 return m16_check_op (op, - 0x1, 0xfe, 0);
1868 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1870 return m16_check_op (op, 0x0, 0xff << 2, 3);
1874 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1876 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
1880 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1882 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
1886 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1888 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
1892 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
1894 enum machine_mode mode = GET_MODE (x);
1901 /* Always return 0, since we don't have different sized
1902 instructions, hence different costs according to Richard
1908 /* A number between 1 and 8 inclusive is efficient for a shift.
1909 Otherwise, we will need an extended instruction. */
1910 if ((outer_code) == ASHIFT || (outer_code) == ASHIFTRT
1911 || (outer_code) == LSHIFTRT)
1913 if (INTVAL (x) >= 1 && INTVAL (x) <= 8)
1916 *total = COSTS_N_INSNS (1);
1920 /* We can use cmpi for an xor with an unsigned 16 bit value. */
1921 if ((outer_code) == XOR
1922 && INTVAL (x) >= 0 && INTVAL (x) < 0x10000)
1928 /* We may be able to use slt or sltu for a comparison with a
1929 signed 16 bit value. (The boundary conditions aren't quite
1930 right, but this is just a heuristic anyhow.) */
1931 if (((outer_code) == LT || (outer_code) == LE
1932 || (outer_code) == GE || (outer_code) == GT
1933 || (outer_code) == LTU || (outer_code) == LEU
1934 || (outer_code) == GEU || (outer_code) == GTU)
1935 && INTVAL (x) >= -0x8000 && INTVAL (x) < 0x8000)
1941 /* Equality comparisons with 0 are cheap. */
1942 if (((outer_code) == EQ || (outer_code) == NE)
1949 /* Constants in the range 0...255 can be loaded with an unextended
1950 instruction. They are therefore as cheap as a register move.
1952 Given the choice between "li R1,0...255" and "move R1,R2"
1953 (where R2 is a known constant), it is usually better to use "li",
1954 since we do not want to unnecessarily extend the lifetime of R2. */
1955 if (outer_code == SET
1957 && INTVAL (x) < 256)
1963 /* Otherwise fall through to the handling below. */
1969 if (LEGITIMATE_CONSTANT_P (x))
1971 *total = COSTS_N_INSNS (1);
1976 /* The value will need to be fetched from the constant pool. */
1977 *total = CONSTANT_POOL_COST;
1983 /* If the address is legitimate, return the number of
1984 instructions it needs, otherwise use the default handling. */
1985 int n = mips_address_insns (XEXP (x, 0), GET_MODE (x));
1988 *total = COSTS_N_INSNS (1 + n);
1995 *total = COSTS_N_INSNS (6);
1999 *total = COSTS_N_INSNS ((mode == DImode && !TARGET_64BIT) ? 2 : 1);
2005 if (mode == DImode && !TARGET_64BIT)
2007 *total = COSTS_N_INSNS (2);
2015 if (mode == DImode && !TARGET_64BIT)
2017 *total = COSTS_N_INSNS ((GET_CODE (XEXP (x, 1)) == CONST_INT)
2024 if (mode == SFmode || mode == DFmode)
2025 *total = COSTS_N_INSNS (1);
2027 *total = COSTS_N_INSNS (4);
2031 *total = COSTS_N_INSNS (1);
2036 if (mode == SFmode || mode == DFmode)
2038 if (TUNE_MIPS3000 || TUNE_MIPS3900)
2039 *total = COSTS_N_INSNS (2);
2040 else if (TUNE_MIPS6000)
2041 *total = COSTS_N_INSNS (3);
2043 *total = COSTS_N_INSNS (4);
2045 *total = COSTS_N_INSNS (6);
2048 if (mode == DImode && !TARGET_64BIT)
2050 *total = COSTS_N_INSNS (4);
2056 if (mode == DImode && !TARGET_64BIT)
2070 *total = COSTS_N_INSNS (4);
2071 else if (TUNE_MIPS6000
2074 *total = COSTS_N_INSNS (5);
2076 *total = COSTS_N_INSNS (7);
2083 *total = COSTS_N_INSNS (4);
2084 else if (TUNE_MIPS3000
2087 *total = COSTS_N_INSNS (5);
2088 else if (TUNE_MIPS6000
2091 *total = COSTS_N_INSNS (6);
2093 *total = COSTS_N_INSNS (8);
2098 *total = COSTS_N_INSNS (12);
2099 else if (TUNE_MIPS3900)
2100 *total = COSTS_N_INSNS (2);
2101 else if (TUNE_MIPS4130)
2102 *total = COSTS_N_INSNS (mode == DImode ? 6 : 4);
2103 else if (TUNE_MIPS5400 || TUNE_SB1)
2104 *total = COSTS_N_INSNS (mode == DImode ? 4 : 3);
2105 else if (TUNE_MIPS5500 || TUNE_MIPS7000)
2106 *total = COSTS_N_INSNS (mode == DImode ? 9 : 5);
2107 else if (TUNE_MIPS9000)
2108 *total = COSTS_N_INSNS (mode == DImode ? 8 : 3);
2109 else if (TUNE_MIPS6000)
2110 *total = COSTS_N_INSNS (17);
2111 else if (TUNE_MIPS5000)
2112 *total = COSTS_N_INSNS (5);
2114 *total = COSTS_N_INSNS (10);
2123 *total = COSTS_N_INSNS (12);
2124 else if (TUNE_MIPS6000)
2125 *total = COSTS_N_INSNS (15);
2127 *total = COSTS_N_INSNS (24);
2128 else if (TUNE_MIPS5400 || TUNE_MIPS5500)
2129 *total = COSTS_N_INSNS (30);
2131 *total = COSTS_N_INSNS (23);
2139 *total = COSTS_N_INSNS (19);
2140 else if (TUNE_MIPS5400 || TUNE_MIPS5500)
2141 *total = COSTS_N_INSNS (59);
2142 else if (TUNE_MIPS6000)
2143 *total = COSTS_N_INSNS (16);
2145 *total = COSTS_N_INSNS (32);
2147 *total = COSTS_N_INSNS (36);
2156 *total = COSTS_N_INSNS (35);
2157 else if (TUNE_MIPS6000)
2158 *total = COSTS_N_INSNS (38);
2159 else if (TUNE_MIPS5000)
2160 *total = COSTS_N_INSNS (36);
2162 *total = COSTS_N_INSNS ((mode == SImode) ? 36 : 68);
2163 else if (TUNE_MIPS5400 || TUNE_MIPS5500)
2164 *total = COSTS_N_INSNS ((mode == SImode) ? 42 : 74);
2166 *total = COSTS_N_INSNS (69);
2170 /* A sign extend from SImode to DImode in 64 bit mode is often
2171 zero instructions, because the result can often be used
2172 directly by another instruction; we'll call it one. */
2173 if (TARGET_64BIT && mode == DImode
2174 && GET_MODE (XEXP (x, 0)) == SImode)
2175 *total = COSTS_N_INSNS (1);
2177 *total = COSTS_N_INSNS (2);
2181 if (TARGET_64BIT && mode == DImode
2182 && GET_MODE (XEXP (x, 0)) == SImode)
2183 *total = COSTS_N_INSNS (2);
2185 *total = COSTS_N_INSNS (1);
2193 /* Provide the costs of an addressing mode that contains ADDR.
2194 If ADDR is not a valid address, its cost is irrelevant. */
2197 mips_address_cost (rtx addr)
2199 return mips_address_insns (addr, SImode);
2202 /* Return one word of double-word value OP, taking into account the fixed
2203 endianness of certain registers. HIGH_P is true to select the high part,
2204 false to select the low part. */
2207 mips_subword (rtx op, int high_p)
2210 enum machine_mode mode;
2212 mode = GET_MODE (op);
2213 if (mode == VOIDmode)
2216 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
2217 byte = UNITS_PER_WORD;
2223 if (FP_REG_P (REGNO (op)))
2224 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
2225 if (REGNO (op) == HI_REGNUM)
2226 return gen_rtx_REG (word_mode, high_p ? HI_REGNUM : LO_REGNUM);
2230 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
2232 return simplify_gen_subreg (word_mode, op, mode, byte);
2236 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
2239 mips_split_64bit_move_p (rtx dest, rtx src)
2244 /* FP->FP moves can be done in a single instruction. */
2245 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
2248 /* Check for floating-point loads and stores. They can be done using
2249 ldc1 and sdc1 on MIPS II and above. */
2252 if (FP_REG_RTX_P (dest) && MEM_P (src))
2254 if (FP_REG_RTX_P (src) && MEM_P (dest))
2261 /* Split a 64-bit move from SRC to DEST assuming that
2262 mips_split_64bit_move_p holds.
2264 Moves into and out of FPRs cause some difficulty here. Such moves
2265 will always be DFmode, since paired FPRs are not allowed to store
2266 DImode values. The most natural representation would be two separate
2267 32-bit moves, such as:
2269 (set (reg:SI $f0) (mem:SI ...))
2270 (set (reg:SI $f1) (mem:SI ...))
2272 However, the second insn is invalid because odd-numbered FPRs are
2273 not allowed to store independent values. Use the patterns load_df_low,
2274 load_df_high and store_df_high instead. */
2277 mips_split_64bit_move (rtx dest, rtx src)
2279 if (FP_REG_RTX_P (dest))
2281 /* Loading an FPR from memory or from GPRs. */
2282 emit_insn (gen_load_df_low (copy_rtx (dest), mips_subword (src, 0)));
2283 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
2286 else if (FP_REG_RTX_P (src))
2288 /* Storing an FPR into memory or GPRs. */
2289 emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
2290 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
2294 /* The operation can be split into two normal moves. Decide in
2295 which order to do them. */
2298 low_dest = mips_subword (dest, 0);
2299 if (REG_P (low_dest)
2300 && reg_overlap_mentioned_p (low_dest, src))
2302 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2303 emit_move_insn (low_dest, mips_subword (src, 0));
2307 emit_move_insn (low_dest, mips_subword (src, 0));
2308 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2313 /* Return the appropriate instructions to move SRC into DEST. Assume
2314 that SRC is operand 1 and DEST is operand 0. */
2317 mips_output_move (rtx dest, rtx src)
2319 enum rtx_code dest_code, src_code;
2322 dest_code = GET_CODE (dest);
2323 src_code = GET_CODE (src);
2324 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
2326 if (dbl_p && mips_split_64bit_move_p (dest, src))
2329 if ((src_code == REG && GP_REG_P (REGNO (src)))
2330 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
2332 if (dest_code == REG)
2334 if (GP_REG_P (REGNO (dest)))
2335 return "move\t%0,%z1";
2337 if (MD_REG_P (REGNO (dest)))
2340 if (FP_REG_P (REGNO (dest)))
2341 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
2343 if (ALL_COP_REG_P (REGNO (dest)))
2345 static char retval[] = "dmtc_\t%z1,%0";
2347 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2348 return (dbl_p ? retval : retval + 1);
2351 if (dest_code == MEM)
2352 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
2354 if (dest_code == REG && GP_REG_P (REGNO (dest)))
2356 if (src_code == REG)
2358 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
2359 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
2361 if (FP_REG_P (REGNO (src)))
2362 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
2364 if (ALL_COP_REG_P (REGNO (src)))
2366 static char retval[] = "dmfc_\t%0,%1";
2368 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
2369 return (dbl_p ? retval : retval + 1);
2373 if (src_code == MEM)
2374 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
2376 if (src_code == CONST_INT)
2378 /* Don't use the X format, because that will give out of
2379 range numbers for 64 bit hosts and 32 bit targets. */
2381 return "li\t%0,%1\t\t\t# %X1";
2383 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
2386 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
2390 if (src_code == HIGH)
2391 return "lui\t%0,%h1";
2393 if (CONST_GP_P (src))
2394 return "move\t%0,%1";
2396 if (symbolic_operand (src, VOIDmode))
2397 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
2399 if (src_code == REG && FP_REG_P (REGNO (src)))
2401 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2403 if (GET_MODE (dest) == V2SFmode)
2404 return "mov.ps\t%0,%1";
2406 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
2409 if (dest_code == MEM)
2410 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
2412 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2414 if (src_code == MEM)
2415 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
2417 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
2419 static char retval[] = "l_c_\t%0,%1";
2421 retval[1] = (dbl_p ? 'd' : 'w');
2422 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2425 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
2427 static char retval[] = "s_c_\t%1,%0";
2429 retval[1] = (dbl_p ? 'd' : 'w');
2430 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
2436 /* Restore $gp from its save slot. Valid only when using o32 or
2440 mips_restore_gp (void)
2444 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
2446 address = mips_add_offset (pic_offset_table_rtx,
2447 frame_pointer_needed
2448 ? hard_frame_pointer_rtx
2449 : stack_pointer_rtx,
2450 current_function_outgoing_args_size);
2451 slot = gen_rtx_MEM (Pmode, address);
2453 emit_move_insn (pic_offset_table_rtx, slot);
2454 if (!TARGET_EXPLICIT_RELOCS)
2455 emit_insn (gen_blockage ());
2458 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
2461 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
2463 emit_insn (gen_rtx_SET (VOIDmode, target,
2464 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
2467 /* Return true if CMP1 is a suitable second operand for relational
2468 operator CODE. See also the *sCC patterns in mips.md. */
2471 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
2477 return reg_or_0_operand (cmp1, VOIDmode);
2481 return !TARGET_MIPS16 && cmp1 == const1_rtx;
2485 return arith_operand (cmp1, VOIDmode);
2488 return sle_operand (cmp1, VOIDmode);
2491 return sleu_operand (cmp1, VOIDmode);
2498 /* Compare CMP0 and CMP1 using relational operator CODE and store the
2499 result in TARGET. CMP0 and TARGET are register_operands that have
2500 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
2501 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
2504 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
2505 rtx target, rtx cmp0, rtx cmp1)
2507 /* First see if there is a MIPS instruction that can do this operation
2508 with CMP1 in its current form. If not, try doing the same for the
2509 inverse operation. If that also fails, force CMP1 into a register
2511 if (mips_relational_operand_ok_p (code, cmp1))
2512 mips_emit_binary (code, target, cmp0, cmp1);
2515 enum rtx_code inv_code = reverse_condition (code);
2516 if (!mips_relational_operand_ok_p (inv_code, cmp1))
2518 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
2519 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
2521 else if (invert_ptr == 0)
2523 rtx inv_target = gen_reg_rtx (GET_MODE (target));
2524 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
2525 mips_emit_binary (XOR, target, inv_target, const1_rtx);
2529 *invert_ptr = !*invert_ptr;
2530 mips_emit_binary (inv_code, target, cmp0, cmp1);
2535 /* Return a register that is zero iff CMP0 and CMP1 are equal.
2536 The register will have the same mode as CMP0. */
2539 mips_zero_if_equal (rtx cmp0, rtx cmp1)
2541 if (cmp1 == const0_rtx)
2544 if (uns_arith_operand (cmp1, VOIDmode))
2545 return expand_binop (GET_MODE (cmp0), xor_optab,
2546 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
2548 return expand_binop (GET_MODE (cmp0), sub_optab,
2549 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
2552 /* Convert a comparison into something that can be used in a branch or
2553 conditional move. cmp_operands[0] and cmp_operands[1] are the values
2554 being compared and *CODE is the code used to compare them.
2556 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
2557 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
2558 otherwise any standard branch condition can be used. The standard branch
2561 - EQ/NE between two registers.
2562 - any comparison between a register and zero. */
2565 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
2567 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
2569 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
2571 *op0 = cmp_operands[0];
2572 *op1 = cmp_operands[1];
2574 else if (*code == EQ || *code == NE)
2578 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
2583 *op0 = cmp_operands[0];
2584 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
2589 /* The comparison needs a separate scc instruction. Store the
2590 result of the scc in *OP0 and compare it against zero. */
2591 bool invert = false;
2592 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
2594 mips_emit_int_relational (*code, &invert, *op0,
2595 cmp_operands[0], cmp_operands[1]);
2596 *code = (invert ? EQ : NE);
2601 enum rtx_code cmp_code;
2603 /* Floating-point tests use a separate c.cond.fmt comparison to
2604 set a condition code register. The branch or conditional move
2605 will then compare that register against zero.
2607 Set CMP_CODE to the code of the comparison instruction and
2608 *CODE to the code that the branch or move should use. */
2616 cmp_code = reverse_condition_maybe_unordered (*code);
2626 ? gen_reg_rtx (CCmode)
2627 : gen_rtx_REG (CCmode, FPSW_REGNUM));
2629 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
2633 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
2634 Store the result in TARGET and return true if successful.
2636 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
2639 mips_emit_scc (enum rtx_code code, rtx target)
2641 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
2644 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
2645 if (code == EQ || code == NE)
2647 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
2648 mips_emit_binary (code, target, zie, const0_rtx);
2651 mips_emit_int_relational (code, 0, target,
2652 cmp_operands[0], cmp_operands[1]);
2656 /* Emit the common code for doing conditional branches.
2657 operand[0] is the label to jump to.
2658 The comparison operands are saved away by cmp{si,di,sf,df}. */
2661 gen_conditional_branch (rtx *operands, enum rtx_code code)
2663 rtx op0, op1, target;
2665 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
2666 target = gen_rtx_IF_THEN_ELSE (VOIDmode,
2667 gen_rtx_fmt_ee (code, GET_MODE (op0),
2669 gen_rtx_LABEL_REF (VOIDmode, operands[0]),
2671 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, target));
2674 /* Emit the common code for conditional moves. OPERANDS is the array
2675 of operands passed to the conditional move define_expand. */
2678 gen_conditional_move (rtx *operands)
2683 code = GET_CODE (operands[1]);
2684 mips_emit_compare (&code, &op0, &op1, true);
2685 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2686 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2687 gen_rtx_fmt_ee (code,
2690 operands[2], operands[3])));
2693 /* Emit a conditional trap. OPERANDS is the array of operands passed to
2694 the conditional_trap expander. */
2697 mips_gen_conditional_trap (rtx *operands)
2700 enum rtx_code cmp_code = GET_CODE (operands[0]);
2701 enum machine_mode mode = GET_MODE (cmp_operands[0]);
2703 /* MIPS conditional trap machine instructions don't have GT or LE
2704 flavors, so we must invert the comparison and convert to LT and
2705 GE, respectively. */
2708 case GT: cmp_code = LT; break;
2709 case LE: cmp_code = GE; break;
2710 case GTU: cmp_code = LTU; break;
2711 case LEU: cmp_code = GEU; break;
2714 if (cmp_code == GET_CODE (operands[0]))
2716 op0 = cmp_operands[0];
2717 op1 = cmp_operands[1];
2721 op0 = cmp_operands[1];
2722 op1 = cmp_operands[0];
2724 op0 = force_reg (mode, op0);
2725 if (!arith_operand (op1, mode))
2726 op1 = force_reg (mode, op1);
2728 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
2729 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
2733 /* Load function address ADDR into register DEST. SIBCALL_P is true
2734 if the address is needed for a sibling call. */
2737 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
2739 /* If we're generating PIC, and this call is to a global function,
2740 try to allow its address to be resolved lazily. This isn't
2741 possible for NewABI sibcalls since the value of $gp on entry
2742 to the stub would be our caller's gp, not ours. */
2743 if (TARGET_EXPLICIT_RELOCS
2744 && !(sibcall_p && TARGET_NEWABI)
2745 && global_got_operand (addr, VOIDmode))
2747 rtx high, lo_sum_symbol;
2749 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
2750 addr, SYMBOL_GOTOFF_CALL);
2751 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
2752 if (Pmode == SImode)
2753 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
2755 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
2758 emit_move_insn (dest, addr);
2762 /* Expand a call or call_value instruction. RESULT is where the
2763 result will go (null for calls), ADDR is the address of the
2764 function, ARGS_SIZE is the size of the arguments and AUX is
2765 the value passed to us by mips_function_arg. SIBCALL_P is true
2766 if we are expanding a sibling call, false if we're expanding
2770 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
2772 rtx orig_addr, pattern, insn;
2775 if (!call_insn_operand (addr, VOIDmode))
2777 addr = gen_reg_rtx (Pmode);
2778 mips_load_call_address (addr, orig_addr, sibcall_p);
2782 && mips16_hard_float
2783 && build_mips16_call_stub (result, addr, args_size,
2784 aux == 0 ? 0 : (int) GET_MODE (aux)))
2788 pattern = (sibcall_p
2789 ? gen_sibcall_internal (addr, args_size)
2790 : gen_call_internal (addr, args_size));
2791 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
2795 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
2796 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
2799 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
2800 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
2803 pattern = (sibcall_p
2804 ? gen_sibcall_value_internal (result, addr, args_size)
2805 : gen_call_value_internal (result, addr, args_size));
2807 insn = emit_call_insn (pattern);
2809 /* Lazy-binding stubs require $gp to be valid on entry. */
2810 if (global_got_operand (orig_addr, VOIDmode))
2811 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
2815 /* We can handle any sibcall when TARGET_SIBCALLS is true. */
2818 mips_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
2819 tree exp ATTRIBUTE_UNUSED)
2821 return TARGET_SIBCALLS;
2824 /* Emit code to move general operand SRC into condition-code
2825 register DEST. SCRATCH is a scratch TFmode float register.
2832 where FP1 and FP2 are single-precision float registers
2833 taken from SCRATCH. */
2836 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
2840 /* Change the source to SFmode. */
2842 src = adjust_address (src, SFmode, 0);
2843 else if (REG_P (src) || GET_CODE (src) == SUBREG)
2844 src = gen_rtx_REG (SFmode, true_regnum (src));
2846 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
2847 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + FP_INC);
2849 emit_move_insn (copy_rtx (fp1), src);
2850 emit_move_insn (copy_rtx (fp2), CONST0_RTX (SFmode));
2851 emit_insn (gen_slt_sf (dest, fp2, fp1));
2854 /* Emit code to change the current function's return address to
2855 ADDRESS. SCRATCH is available as a scratch register, if needed.
2856 ADDRESS and SCRATCH are both word-mode GPRs. */
2859 mips_set_return_address (rtx address, rtx scratch)
2863 compute_frame_size (get_frame_size ());
2864 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
2865 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
2866 cfun->machine->frame.gp_sp_offset);
2868 emit_move_insn (gen_rtx_MEM (GET_MODE (address), slot_address), address);
2871 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
2872 Assume that the areas do not overlap. */
2875 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
2877 HOST_WIDE_INT offset, delta;
2878 unsigned HOST_WIDE_INT bits;
2880 enum machine_mode mode;
2883 /* Work out how many bits to move at a time. If both operands have
2884 half-word alignment, it is usually better to move in half words.
2885 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
2886 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
2887 Otherwise move word-sized chunks. */
2888 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
2889 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
2890 bits = BITS_PER_WORD / 2;
2892 bits = BITS_PER_WORD;
2894 mode = mode_for_size (bits, MODE_INT, 0);
2895 delta = bits / BITS_PER_UNIT;
2897 /* Allocate a buffer for the temporary registers. */
2898 regs = alloca (sizeof (rtx) * length / delta);
2900 /* Load as many BITS-sized chunks as possible. Use a normal load if
2901 the source has enough alignment, otherwise use left/right pairs. */
2902 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
2904 regs[i] = gen_reg_rtx (mode);
2905 if (MEM_ALIGN (src) >= bits)
2906 emit_move_insn (regs[i], adjust_address (src, mode, offset));
2909 rtx part = adjust_address (src, BLKmode, offset);
2910 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
2915 /* Copy the chunks to the destination. */
2916 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
2917 if (MEM_ALIGN (dest) >= bits)
2918 emit_move_insn (adjust_address (dest, mode, offset), regs[i]);
2921 rtx part = adjust_address (dest, BLKmode, offset);
2922 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
2926 /* Mop up any left-over bytes. */
2927 if (offset < length)
2929 src = adjust_address (src, BLKmode, offset);
2930 dest = adjust_address (dest, BLKmode, offset);
2931 move_by_pieces (dest, src, length - offset,
2932 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
2936 #define MAX_MOVE_REGS 4
2937 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
2940 /* Helper function for doing a loop-based block operation on memory
2941 reference MEM. Each iteration of the loop will operate on LENGTH
2944 Create a new base register for use within the loop and point it to
2945 the start of MEM. Create a new memory reference that uses this
2946 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
2949 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
2950 rtx *loop_reg, rtx *loop_mem)
2952 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
2954 /* Although the new mem does not refer to a known location,
2955 it does keep up to LENGTH bytes of alignment. */
2956 *loop_mem = change_address (mem, BLKmode, *loop_reg);
2957 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
2961 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
2962 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
2963 memory regions do not overlap. */
2966 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
2968 rtx label, src_reg, dest_reg, final_src;
2969 HOST_WIDE_INT leftover;
2971 leftover = length % MAX_MOVE_BYTES;
2974 /* Create registers and memory references for use within the loop. */
2975 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
2976 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
2978 /* Calculate the value that SRC_REG should have after the last iteration
2980 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
2983 /* Emit the start of the loop. */
2984 label = gen_label_rtx ();
2987 /* Emit the loop body. */
2988 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
2990 /* Move on to the next block. */
2991 emit_move_insn (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
2992 emit_move_insn (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
2994 /* Emit the loop condition. */
2995 if (Pmode == DImode)
2996 emit_insn (gen_cmpdi (src_reg, final_src));
2998 emit_insn (gen_cmpsi (src_reg, final_src));
2999 emit_jump_insn (gen_bne (label));
3001 /* Mop up any left-over bytes. */
3003 mips_block_move_straight (dest, src, leftover);
3006 /* Expand a movmemsi instruction. */
3009 mips_expand_block_move (rtx dest, rtx src, rtx length)
3011 if (GET_CODE (length) == CONST_INT)
3013 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
3015 mips_block_move_straight (dest, src, INTVAL (length));
3020 mips_block_move_loop (dest, src, INTVAL (length));
3027 /* Argument support functions. */
3029 /* Initialize CUMULATIVE_ARGS for a function. */
3032 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
3033 rtx libname ATTRIBUTE_UNUSED)
3035 static CUMULATIVE_ARGS zero_cum;
3036 tree param, next_param;
3039 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
3041 /* Determine if this function has variable arguments. This is
3042 indicated by the last argument being 'void_type_mode' if there
3043 are no variable arguments. The standard MIPS calling sequence
3044 passes all arguments in the general purpose registers in this case. */
3046 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
3047 param != 0; param = next_param)
3049 next_param = TREE_CHAIN (param);
3050 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
3051 cum->gp_reg_found = 1;
3056 /* Fill INFO with information about a single argument. CUM is the
3057 cumulative state for earlier arguments. MODE is the mode of this
3058 argument and TYPE is its type (if known). NAMED is true if this
3059 is a named (fixed) argument rather than a variable one. */
3062 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3063 tree type, int named, struct mips_arg_info *info)
3065 bool doubleword_aligned_p;
3066 unsigned int num_bytes, num_words, max_regs;
3068 /* Work out the size of the argument. */
3069 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
3070 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3072 /* Decide whether it should go in a floating-point register, assuming
3073 one is free. Later code checks for availability.
3075 The checks against UNITS_PER_FPVALUE handle the soft-float and
3076 single-float cases. */
3080 /* The EABI conventions have traditionally been defined in terms
3081 of TYPE_MODE, regardless of the actual type. */
3082 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
3083 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3084 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3089 /* Only leading floating-point scalars are passed in
3090 floating-point registers. We also handle vector floats the same
3091 say, which is OK because they are not covered by the standard ABI. */
3092 info->fpr_p = (!cum->gp_reg_found
3093 && cum->arg_number < 2
3094 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
3095 || VECTOR_FLOAT_TYPE_P (type))
3096 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3097 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3098 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3103 /* Scalar and complex floating-point types are passed in
3104 floating-point registers. */
3105 info->fpr_p = (named
3106 && (type == 0 || FLOAT_TYPE_P (type))
3107 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3108 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3109 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3110 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
3112 /* ??? According to the ABI documentation, the real and imaginary
3113 parts of complex floats should be passed in individual registers.
3114 The real and imaginary parts of stack arguments are supposed
3115 to be contiguous and there should be an extra word of padding
3118 This has two problems. First, it makes it impossible to use a
3119 single "void *" va_list type, since register and stack arguments
3120 are passed differently. (At the time of writing, MIPSpro cannot
3121 handle complex float varargs correctly.) Second, it's unclear
3122 what should happen when there is only one register free.
3124 For now, we assume that named complex floats should go into FPRs
3125 if there are two FPRs free, otherwise they should be passed in the
3126 same way as a struct containing two floats. */
3128 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3129 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
3131 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
3132 info->fpr_p = false;
3142 /* See whether the argument has doubleword alignment. */
3143 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
3145 /* Set REG_OFFSET to the register count we're interested in.
3146 The EABI allocates the floating-point registers separately,
3147 but the other ABIs allocate them like integer registers. */
3148 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
3152 /* Advance to an even register if the argument is doubleword-aligned. */
3153 if (doubleword_aligned_p)
3154 info->reg_offset += info->reg_offset & 1;
3156 /* Work out the offset of a stack argument. */
3157 info->stack_offset = cum->stack_words;
3158 if (doubleword_aligned_p)
3159 info->stack_offset += info->stack_offset & 1;
3161 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
3163 /* Partition the argument between registers and stack. */
3164 info->reg_words = MIN (num_words, max_regs);
3165 info->stack_words = num_words - info->reg_words;
3169 /* Implement FUNCTION_ARG_ADVANCE. */
3172 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3173 tree type, int named)
3175 struct mips_arg_info info;
3177 mips_arg_info (cum, mode, type, named, &info);
3180 cum->gp_reg_found = true;
3182 /* See the comment above the cumulative args structure in mips.h
3183 for an explanation of what this code does. It assumes the O32
3184 ABI, which passes at most 2 arguments in float registers. */
3185 if (cum->arg_number < 2 && info.fpr_p)
3186 cum->fp_code += (mode == SFmode ? 1 : 2) << ((cum->arg_number - 1) * 2);
3188 if (mips_abi != ABI_EABI || !info.fpr_p)
3189 cum->num_gprs = info.reg_offset + info.reg_words;
3190 else if (info.reg_words > 0)
3191 cum->num_fprs += FP_INC;
3193 if (info.stack_words > 0)
3194 cum->stack_words = info.stack_offset + info.stack_words;
3199 /* Implement FUNCTION_ARG. */
3202 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3203 tree type, int named)
3205 struct mips_arg_info info;
3207 /* We will be called with a mode of VOIDmode after the last argument
3208 has been seen. Whatever we return will be passed to the call
3209 insn. If we need a mips16 fp_code, return a REG with the code
3210 stored as the mode. */
3211 if (mode == VOIDmode)
3213 if (TARGET_MIPS16 && cum->fp_code != 0)
3214 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
3220 mips_arg_info (cum, mode, type, named, &info);
3222 /* Return straight away if the whole argument is passed on the stack. */
3223 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
3227 && TREE_CODE (type) == RECORD_TYPE
3229 && TYPE_SIZE_UNIT (type)
3230 && host_integerp (TYPE_SIZE_UNIT (type), 1)
3233 /* The Irix 6 n32/n64 ABIs say that if any 64 bit chunk of the
3234 structure contains a double in its entirety, then that 64 bit
3235 chunk is passed in a floating point register. */
3238 /* First check to see if there is any such field. */
3239 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3240 if (TREE_CODE (field) == FIELD_DECL
3241 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3242 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
3243 && host_integerp (bit_position (field), 0)
3244 && int_bit_position (field) % BITS_PER_WORD == 0)
3249 /* Now handle the special case by returning a PARALLEL
3250 indicating where each 64 bit chunk goes. INFO.REG_WORDS
3251 chunks are passed in registers. */
3253 HOST_WIDE_INT bitpos;
3256 /* assign_parms checks the mode of ENTRY_PARM, so we must
3257 use the actual mode here. */
3258 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
3261 field = TYPE_FIELDS (type);
3262 for (i = 0; i < info.reg_words; i++)
3266 for (; field; field = TREE_CHAIN (field))
3267 if (TREE_CODE (field) == FIELD_DECL
3268 && int_bit_position (field) >= bitpos)
3272 && int_bit_position (field) == bitpos
3273 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3274 && !TARGET_SOFT_FLOAT
3275 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
3276 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
3278 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
3281 = gen_rtx_EXPR_LIST (VOIDmode, reg,
3282 GEN_INT (bitpos / BITS_PER_UNIT));
3284 bitpos += BITS_PER_WORD;
3290 /* Handle the n32/n64 conventions for passing complex floating-point
3291 arguments in FPR pairs. The real part goes in the lower register
3292 and the imaginary part goes in the upper register. */
3295 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
3298 enum machine_mode inner;
3301 inner = GET_MODE_INNER (mode);
3302 reg = FP_ARG_FIRST + info.reg_offset;
3303 real = gen_rtx_EXPR_LIST (VOIDmode,
3304 gen_rtx_REG (inner, reg),
3306 imag = gen_rtx_EXPR_LIST (VOIDmode,
3307 gen_rtx_REG (inner, reg + info.reg_words / 2),
3308 GEN_INT (GET_MODE_SIZE (inner)));
3309 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
3313 return gen_rtx_REG (mode, GP_ARG_FIRST + info.reg_offset);
3314 else if (info.reg_offset == 1)
3315 /* This code handles the special o32 case in which the second word
3316 of the argument structure is passed in floating-point registers. */
3317 return gen_rtx_REG (mode, FP_ARG_FIRST + FP_INC);
3319 return gen_rtx_REG (mode, FP_ARG_FIRST + info.reg_offset);
3323 /* Implement TARGET_ARG_PARTIAL_BYTES. */
3326 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
3327 enum machine_mode mode, tree type, bool named)
3329 struct mips_arg_info info;
3331 mips_arg_info (cum, mode, type, named, &info);
3332 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
3336 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
3337 PARM_BOUNDARY bits of alignment, but will be given anything up
3338 to STACK_BOUNDARY bits if the type requires it. */
3341 function_arg_boundary (enum machine_mode mode, tree type)
3343 unsigned int alignment;
3345 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
3346 if (alignment < PARM_BOUNDARY)
3347 alignment = PARM_BOUNDARY;
3348 if (alignment > STACK_BOUNDARY)
3349 alignment = STACK_BOUNDARY;
3353 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
3354 upward rather than downward. In other words, return true if the
3355 first byte of the stack slot has useful data, false if the last
3359 mips_pad_arg_upward (enum machine_mode mode, tree type)
3361 /* On little-endian targets, the first byte of every stack argument
3362 is passed in the first byte of the stack slot. */
3363 if (!BYTES_BIG_ENDIAN)
3366 /* Otherwise, integral types are padded downward: the last byte of a
3367 stack argument is passed in the last byte of the stack slot. */
3369 ? INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)
3370 : GET_MODE_CLASS (mode) == MODE_INT)
3373 /* Big-endian o64 pads floating-point arguments downward. */
3374 if (mips_abi == ABI_O64)
3375 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
3378 /* Other types are padded upward for o32, o64, n32 and n64. */
3379 if (mips_abi != ABI_EABI)
3382 /* Arguments smaller than a stack slot are padded downward. */
3383 if (mode != BLKmode)
3384 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
3386 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
3390 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
3391 if the least significant byte of the register has useful data. Return
3392 the opposite if the most significant byte does. */
3395 mips_pad_reg_upward (enum machine_mode mode, tree type)
3397 /* No shifting is required for floating-point arguments. */
3398 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
3399 return !BYTES_BIG_ENDIAN;
3401 /* Otherwise, apply the same padding to register arguments as we do
3402 to stack arguments. */
3403 return mips_pad_arg_upward (mode, type);
3407 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3408 tree type, int *pretend_size, int no_rtl)
3410 CUMULATIVE_ARGS local_cum;
3411 int gp_saved, fp_saved;
3413 /* The caller has advanced CUM up to, but not beyond, the last named
3414 argument. Advance a local copy of CUM past the last "real" named
3415 argument, to find out how many registers are left over. */
3418 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
3420 /* Found out how many registers we need to save. */
3421 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
3422 fp_saved = (EABI_FLOAT_VARARGS_P
3423 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
3432 ptr = virtual_incoming_args_rtx;
3437 ptr = plus_constant (ptr, local_cum.num_gprs * UNITS_PER_WORD);
3441 ptr = plus_constant (ptr, -gp_saved * UNITS_PER_WORD);
3444 mem = gen_rtx_MEM (BLKmode, ptr);
3445 set_mem_alias_set (mem, get_varargs_alias_set ());
3447 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
3452 /* We can't use move_block_from_reg, because it will use
3454 enum machine_mode mode;
3457 /* Set OFF to the offset from virtual_incoming_args_rtx of
3458 the first float register. The FP save area lies below
3459 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
3460 off = -gp_saved * UNITS_PER_WORD;
3461 off &= ~(UNITS_PER_FPVALUE - 1);
3462 off -= fp_saved * UNITS_PER_FPREG;
3464 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
3466 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS; i += FP_INC)
3470 ptr = plus_constant (virtual_incoming_args_rtx, off);
3471 mem = gen_rtx_MEM (mode, ptr);
3472 set_mem_alias_set (mem, get_varargs_alias_set ());
3473 emit_move_insn (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
3474 off += UNITS_PER_HWFPVALUE;
3480 /* No need for pretend arguments: the register parameter area was
3481 allocated by the caller. */
3485 *pretend_size = (gp_saved * UNITS_PER_WORD) + (fp_saved * UNITS_PER_FPREG);
3488 /* Create the va_list data type.
3489 We keep 3 pointers, and two offsets.
3490 Two pointers are to the overflow area, which starts at the CFA.
3491 One of these is constant, for addressing into the GPR save area below it.
3492 The other is advanced up the stack through the overflow region.
3493 The third pointer is to the GPR save area. Since the FPR save area
3494 is just below it, we can address FPR slots off this pointer.
3495 We also keep two one-byte offsets, which are to be subtracted from the
3496 constant pointers to yield addresses in the GPR and FPR save areas.
3497 These are downcounted as float or non-float arguments are used,
3498 and when they get to zero, the argument must be obtained from the
3500 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
3501 pointer is enough. It's started at the GPR save area, and is
3503 Note that the GPR save area is not constant size, due to optimization
3504 in the prologue. Hence, we can't use a design with two pointers
3505 and two offsets, although we could have designed this with two pointers
3506 and three offsets. */
3509 mips_build_builtin_va_list (void)
3511 if (EABI_FLOAT_VARARGS_P)
3513 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
3516 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
3518 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
3520 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
3522 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
3524 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
3525 unsigned_char_type_node);
3526 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
3527 unsigned_char_type_node);
3528 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
3529 warn on every user file. */
3530 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
3531 array = build_array_type (unsigned_char_type_node,
3532 build_index_type (index));
3533 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
3535 DECL_FIELD_CONTEXT (f_ovfl) = record;
3536 DECL_FIELD_CONTEXT (f_gtop) = record;
3537 DECL_FIELD_CONTEXT (f_ftop) = record;
3538 DECL_FIELD_CONTEXT (f_goff) = record;
3539 DECL_FIELD_CONTEXT (f_foff) = record;
3540 DECL_FIELD_CONTEXT (f_res) = record;
3542 TYPE_FIELDS (record) = f_ovfl;
3543 TREE_CHAIN (f_ovfl) = f_gtop;
3544 TREE_CHAIN (f_gtop) = f_ftop;
3545 TREE_CHAIN (f_ftop) = f_goff;
3546 TREE_CHAIN (f_goff) = f_foff;
3547 TREE_CHAIN (f_foff) = f_res;
3549 layout_type (record);
3552 else if (TARGET_IRIX && TARGET_IRIX6)
3553 /* On IRIX 6, this type is 'char *'. */
3554 return build_pointer_type (char_type_node);
3556 /* Otherwise, we use 'void *'. */
3557 return ptr_type_node;
3560 /* Implement va_start. */
3563 mips_va_start (tree valist, rtx nextarg)
3565 const CUMULATIVE_ARGS *cum = ¤t_function_args_info;
3567 /* ARG_POINTER_REGNUM is initialized to STACK_POINTER_BOUNDARY, but
3568 since the stack is aligned for a pair of argument-passing slots,
3569 and the beginning of a variable argument list may be an odd slot,
3570 we have to decrease its alignment. */
3571 if (cfun && cfun->emit->regno_pointer_align)
3572 while (((current_function_pretend_args_size * BITS_PER_UNIT)
3573 & (REGNO_POINTER_ALIGN (ARG_POINTER_REGNUM) - 1)) != 0)
3574 REGNO_POINTER_ALIGN (ARG_POINTER_REGNUM) /= 2;
3576 if (mips_abi == ABI_EABI)
3578 int gpr_save_area_size;
3581 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
3583 if (EABI_FLOAT_VARARGS_P)
3585 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
3586 tree ovfl, gtop, ftop, goff, foff;
3589 int fpr_save_area_size;
3591 f_ovfl = TYPE_FIELDS (va_list_type_node);
3592 f_gtop = TREE_CHAIN (f_ovfl);
3593 f_ftop = TREE_CHAIN (f_gtop);
3594 f_goff = TREE_CHAIN (f_ftop);
3595 f_foff = TREE_CHAIN (f_goff);
3597 ovfl = build (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
3599 gtop = build (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
3601 ftop = build (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
3603 goff = build (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
3605 foff = build (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
3608 /* Emit code to initialize OVFL, which points to the next varargs
3609 stack argument. CUM->STACK_WORDS gives the number of stack
3610 words used by named arguments. */
3611 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
3612 if (cum->stack_words > 0)
3613 t = build (PLUS_EXPR, TREE_TYPE (ovfl), t,
3614 build_int_cst (NULL_TREE,
3615 cum->stack_words * UNITS_PER_WORD));
3616 t = build (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
3617 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3619 /* Emit code to initialize GTOP, the top of the GPR save area. */
3620 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
3621 t = build (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
3622 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3624 /* Emit code to initialize FTOP, the top of the FPR save area.
3625 This address is gpr_save_area_bytes below GTOP, rounded
3626 down to the next fp-aligned boundary. */
3627 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
3628 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
3629 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
3631 t = build (PLUS_EXPR, TREE_TYPE (ftop), t,
3632 build_int_cst (NULL_TREE, -fpr_offset));
3633 t = build (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
3634 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3636 /* Emit code to initialize GOFF, the offset from GTOP of the
3637 next GPR argument. */
3638 t = build (MODIFY_EXPR, TREE_TYPE (goff), goff,
3639 build_int_cst (NULL_TREE, gpr_save_area_size));
3640 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3642 /* Likewise emit code to initialize FOFF, the offset from FTOP
3643 of the next FPR argument. */
3645 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
3646 t = build (MODIFY_EXPR, TREE_TYPE (foff), foff,
3647 build_int_cst (NULL_TREE, fpr_save_area_size));
3648 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3652 /* Everything is in the GPR save area, or in the overflow
3653 area which is contiguous with it. */
3654 nextarg = plus_constant (nextarg, -gpr_save_area_size);
3655 std_expand_builtin_va_start (valist, nextarg);
3659 std_expand_builtin_va_start (valist, nextarg);
3662 /* Implement va_arg. */
3665 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
3667 HOST_WIDE_INT size, rsize;
3671 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
3674 type = build_pointer_type (type);
3676 size = int_size_in_bytes (type);
3677 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
3679 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
3680 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3683 /* Not a simple merged stack. */
3685 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
3686 tree ovfl, top, off, align;
3687 HOST_WIDE_INT osize;
3690 f_ovfl = TYPE_FIELDS (va_list_type_node);
3691 f_gtop = TREE_CHAIN (f_ovfl);
3692 f_ftop = TREE_CHAIN (f_gtop);
3693 f_goff = TREE_CHAIN (f_ftop);
3694 f_foff = TREE_CHAIN (f_goff);
3696 /* We maintain separate pointers and offsets for floating-point
3697 and integer arguments, but we need similar code in both cases.
3700 TOP be the top of the register save area;
3701 OFF be the offset from TOP of the next register;
3702 ADDR_RTX be the address of the argument;
3703 RSIZE be the number of bytes used to store the argument
3704 when it's in the register save area;
3705 OSIZE be the number of bytes used to store it when it's
3706 in the stack overflow area; and
3707 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
3709 The code we want is:
3711 1: off &= -rsize; // round down
3714 4: addr_rtx = top - off;
3719 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
3720 10: addr_rtx = ovfl + PADDING;
3724 [1] and [9] can sometimes be optimized away. */
3726 ovfl = build (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
3729 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
3730 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
3732 top = build (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
3734 off = build (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
3737 /* When floating-point registers are saved to the stack,
3738 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
3739 of the float's precision. */
3740 rsize = UNITS_PER_HWFPVALUE;
3742 /* Overflow arguments are padded to UNITS_PER_WORD bytes
3743 (= PARM_BOUNDARY bits). This can be different from RSIZE
3746 (1) On 32-bit targets when TYPE is a structure such as:
3748 struct s { float f; };
3750 Such structures are passed in paired FPRs, so RSIZE
3751 will be 8 bytes. However, the structure only takes
3752 up 4 bytes of memory, so OSIZE will only be 4.
3754 (2) In combinations such as -mgp64 -msingle-float
3755 -fshort-double. Doubles passed in registers
3756 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
3757 but those passed on the stack take up
3758 UNITS_PER_WORD bytes. */
3759 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
3763 top = build (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
3765 off = build (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
3767 if (rsize > UNITS_PER_WORD)
3769 /* [1] Emit code for: off &= -rsize. */
3770 t = build (BIT_AND_EXPR, TREE_TYPE (off), off,
3771 build_int_cst (NULL_TREE, -rsize));
3772 t = build (MODIFY_EXPR, TREE_TYPE (off), off, t);
3773 gimplify_and_add (t, pre_p);
3778 /* [2] Emit code to branch if off == 0. */
3779 t = lang_hooks.truthvalue_conversion (off);
3780 addr = build (COND_EXPR, ptr_type_node, t, NULL, NULL);
3782 /* [5] Emit code for: off -= rsize. We do this as a form of
3783 post-increment not available to C. Also widen for the
3784 coming pointer arithmetic. */
3785 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
3786 t = build (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
3787 t = fold_convert (sizetype, t);
3788 t = fold_convert (TREE_TYPE (top), t);
3790 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
3791 the argument has RSIZE - SIZE bytes of leading padding. */
3792 t = build (MINUS_EXPR, TREE_TYPE (top), top, t);
3793 if (BYTES_BIG_ENDIAN && rsize > size)
3795 u = fold_convert (TREE_TYPE (t), build_int_cst (NULL_TREE,
3797 t = build (PLUS_EXPR, TREE_TYPE (t), t, u);
3799 COND_EXPR_THEN (addr) = t;
3801 if (osize > UNITS_PER_WORD)
3803 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
3804 u = fold_convert (TREE_TYPE (ovfl),
3805 build_int_cst (NULL_TREE, osize - 1));
3806 t = build (PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
3807 u = fold_convert (TREE_TYPE (ovfl),
3808 build_int_cst (NULL_TREE, -osize));
3809 t = build (BIT_AND_EXPR, TREE_TYPE (ovfl), t, u);
3810 align = build (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
3815 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
3816 post-increment ovfl by osize. On big-endian machines,
3817 the argument has OSIZE - SIZE bytes of leading padding. */
3818 u = fold_convert (TREE_TYPE (ovfl),
3819 build_int_cst (NULL_TREE, osize));
3820 t = build (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
3821 if (BYTES_BIG_ENDIAN && osize > size)
3823 u = fold_convert (TREE_TYPE (t),
3824 build_int_cst (NULL_TREE, osize - size));
3825 t = build (PLUS_EXPR, TREE_TYPE (t), t, u);
3828 /* String [9] and [10,11] together. */
3830 t = build (COMPOUND_EXPR, TREE_TYPE (t), align, t);
3831 COND_EXPR_ELSE (addr) = t;
3833 addr = fold_convert (build_pointer_type (type), addr);
3834 addr = build_fold_indirect_ref (addr);
3838 addr = build_fold_indirect_ref (addr);
3843 /* Return true if it is possible to use left/right accesses for a
3844 bitfield of WIDTH bits starting BITPOS bits into *OP. When
3845 returning true, update *OP, *LEFT and *RIGHT as follows:
3847 *OP is a BLKmode reference to the whole field.
3849 *LEFT is a QImode reference to the first byte if big endian or
3850 the last byte if little endian. This address can be used in the
3851 left-side instructions (lwl, swl, ldl, sdl).
3853 *RIGHT is a QImode reference to the opposite end of the field and
3854 can be used in the parterning right-side instruction. */
3857 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
3858 rtx *left, rtx *right)
3862 /* Check that the operand really is a MEM. Not all the extv and
3863 extzv predicates are checked. */
3867 /* Check that the size is valid. */
3868 if (width != 32 && (!TARGET_64BIT || width != 64))
3871 /* We can only access byte-aligned values. Since we are always passed
3872 a reference to the first byte of the field, it is not necessary to
3873 do anything with BITPOS after this check. */
3874 if (bitpos % BITS_PER_UNIT != 0)
3877 /* Reject aligned bitfields: we want to use a normal load or store
3878 instead of a left/right pair. */
3879 if (MEM_ALIGN (*op) >= width)
3882 /* Adjust *OP to refer to the whole field. This also has the effect
3883 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
3884 *op = adjust_address (*op, BLKmode, 0);
3885 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
3887 /* Get references to both ends of the field. We deliberately don't
3888 use the original QImode *OP for FIRST since the new BLKmode one
3889 might have a simpler address. */
3890 first = adjust_address (*op, QImode, 0);
3891 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
3893 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
3894 be the upper word and RIGHT the lower word. */
3895 if (TARGET_BIG_ENDIAN)
3896 *left = first, *right = last;
3898 *left = last, *right = first;
3904 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
3905 Return true on success. We only handle cases where zero_extract is
3906 equivalent to sign_extract. */
3909 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
3911 rtx left, right, temp;
3913 /* If TARGET_64BIT, the destination of a 32-bit load will be a
3914 paradoxical word_mode subreg. This is the only case in which
3915 we allow the destination to be larger than the source. */
3916 if (GET_CODE (dest) == SUBREG
3917 && GET_MODE (dest) == DImode
3918 && SUBREG_BYTE (dest) == 0
3919 && GET_MODE (SUBREG_REG (dest)) == SImode)
3920 dest = SUBREG_REG (dest);
3922 /* After the above adjustment, the destination must be the same
3923 width as the source. */
3924 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
3927 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
3930 temp = gen_reg_rtx (GET_MODE (dest));
3931 if (GET_MODE (dest) == DImode)
3933 emit_insn (gen_mov_ldl (temp, src, left));
3934 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
3938 emit_insn (gen_mov_lwl (temp, src, left));
3939 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
3945 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
3949 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
3953 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
3956 src = gen_lowpart (mode_for_size (width, MODE_INT, 0), src);
3958 if (GET_MODE (src) == DImode)
3960 emit_insn (gen_mov_sdl (dest, src, left));
3961 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
3965 emit_insn (gen_mov_swl (dest, src, left));
3966 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
3971 /* Set up globals to generate code for the ISA or processor
3972 described by INFO. */
3975 mips_set_architecture (const struct mips_cpu_info *info)
3979 mips_arch_info = info;
3980 mips_arch = info->cpu;
3981 mips_isa = info->isa;
3986 /* Likewise for tuning. */
3989 mips_set_tune (const struct mips_cpu_info *info)
3993 mips_tune_info = info;
3994 mips_tune = info->cpu;
3999 /* Set up the threshold for data to go into the small data area, instead
4000 of the normal data area, and detect any conflicts in the switches. */
4003 override_options (void)
4005 int i, start, regno;
4006 enum machine_mode mode;
4008 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
4010 /* Interpret -mabi. */
4011 mips_abi = MIPS_ABI_DEFAULT;
4012 if (mips_abi_string != 0)
4014 if (strcmp (mips_abi_string, "32") == 0)
4016 else if (strcmp (mips_abi_string, "o64") == 0)
4018 else if (strcmp (mips_abi_string, "n32") == 0)
4020 else if (strcmp (mips_abi_string, "64") == 0)
4022 else if (strcmp (mips_abi_string, "eabi") == 0)
4023 mips_abi = ABI_EABI;
4025 fatal_error ("bad value (%s) for -mabi= switch", mips_abi_string);
4028 /* The following code determines the architecture and register size.
4029 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
4030 The GAS and GCC code should be kept in sync as much as possible. */
4032 if (mips_arch_string != 0)
4033 mips_set_architecture (mips_parse_cpu ("-march", mips_arch_string));
4035 if (mips_isa_string != 0)
4037 /* Handle -mipsN. */
4038 char *whole_isa_str = concat ("mips", mips_isa_string, NULL);
4039 const struct mips_cpu_info *isa_info;
4041 isa_info = mips_parse_cpu ("-mips option", whole_isa_str);
4042 free (whole_isa_str);
4044 /* -march takes precedence over -mipsN, since it is more descriptive.
4045 There's no harm in specifying both as long as the ISA levels
4047 if (mips_arch_info != 0 && mips_isa != isa_info->isa)
4048 error ("-mips%s conflicts with the other architecture options, "
4049 "which specify a MIPS%d processor",
4050 mips_isa_string, mips_isa);
4052 /* Set architecture based on the given option. */
4053 mips_set_architecture (isa_info);
4056 if (mips_arch_info == 0)
4058 #ifdef MIPS_CPU_STRING_DEFAULT
4059 mips_set_architecture (mips_parse_cpu ("default CPU",
4060 MIPS_CPU_STRING_DEFAULT));
4062 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
4066 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
4067 error ("-march=%s is not compatible with the selected ABI",
4068 mips_arch_info->name);
4070 /* Optimize for mips_arch, unless -mtune selects a different processor. */
4071 if (mips_tune_string != 0)
4072 mips_set_tune (mips_parse_cpu ("-mtune", mips_tune_string));
4074 if (mips_tune_info == 0)
4075 mips_set_tune (mips_arch_info);
4077 if ((target_flags_explicit & MASK_64BIT) != 0)
4079 /* The user specified the size of the integer registers. Make sure
4080 it agrees with the ABI and ISA. */
4081 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
4082 error ("-mgp64 used with a 32-bit processor");
4083 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
4084 error ("-mgp32 used with a 64-bit ABI");
4085 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
4086 error ("-mgp64 used with a 32-bit ABI");
4090 /* Infer the integer register size from the ABI and processor.
4091 Restrict ourselves to 32-bit registers if that's all the
4092 processor has, or if the ABI cannot handle 64-bit registers. */
4093 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
4094 target_flags &= ~MASK_64BIT;
4096 target_flags |= MASK_64BIT;
4099 if ((target_flags_explicit & MASK_FLOAT64) != 0)
4101 /* Really, -mfp32 and -mfp64 are ornamental options. There's
4102 only one right answer here. */
4103 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
4104 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
4105 else if (!TARGET_64BIT && TARGET_FLOAT64)
4106 error ("unsupported combination: %s", "-mgp32 -mfp64");
4107 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
4108 error ("unsupported combination: %s", "-mfp64 -msingle-float");
4112 /* -msingle-float selects 32-bit float registers. Otherwise the
4113 float registers should be the same size as the integer ones. */
4114 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
4115 target_flags |= MASK_FLOAT64;
4117 target_flags &= ~MASK_FLOAT64;
4120 /* End of code shared with GAS. */
4122 if ((target_flags_explicit & MASK_LONG64) == 0)
4124 /* If no type size setting options (-mlong64,-mint64,-mlong32)
4125 were used, then set the type sizes. In the EABI in 64 bit mode,
4126 longs and pointers are 64 bits. Likewise for the SGI Irix6 N64
4128 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
4129 target_flags |= MASK_LONG64;
4131 target_flags &= ~MASK_LONG64;
4134 if (MIPS_MARCH_CONTROLS_SOFT_FLOAT
4135 && (target_flags_explicit & MASK_SOFT_FLOAT) == 0)
4137 /* For some configurations, it is useful to have -march control
4138 the default setting of MASK_SOFT_FLOAT. */
4139 switch ((int) mips_arch)
4141 case PROCESSOR_R4100:
4142 case PROCESSOR_R4111:
4143 case PROCESSOR_R4120:
4144 case PROCESSOR_R4130:
4145 target_flags |= MASK_SOFT_FLOAT;
4149 target_flags &= ~MASK_SOFT_FLOAT;
4155 flag_pcc_struct_return = 0;
4157 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
4159 /* If neither -mbranch-likely nor -mno-branch-likely was given
4160 on the command line, set MASK_BRANCHLIKELY based on the target
4163 By default, we enable use of Branch Likely instructions on
4164 all architectures which support them with the following
4165 exceptions: when creating MIPS32 or MIPS64 code, and when
4166 tuning for architectures where their use tends to hurt
4169 The MIPS32 and MIPS64 architecture specifications say "Software
4170 is strongly encouraged to avoid use of Branch Likely
4171 instructions, as they will be removed from a future revision
4172 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
4173 issue those instructions unless instructed to do so by
4175 if (ISA_HAS_BRANCHLIKELY
4176 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
4177 && !(TUNE_MIPS5500 || TUNE_SB1))
4178 target_flags |= MASK_BRANCHLIKELY;
4180 target_flags &= ~MASK_BRANCHLIKELY;
4182 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
4183 warning ("generation of Branch Likely instructions enabled, but not supported by architecture");
4185 /* The effect of -mabicalls isn't defined for the EABI. */
4186 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
4188 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
4189 target_flags &= ~MASK_ABICALLS;
4192 /* -fpic (-KPIC) is the default when TARGET_ABICALLS is defined. We need
4193 to set flag_pic so that the LEGITIMATE_PIC_OPERAND_P macro will work. */
4194 /* ??? -non_shared turns off pic code generation, but this is not
4196 if (TARGET_ABICALLS)
4199 if (mips_section_threshold > 0)
4200 warning ("-G is incompatible with PIC code which is the default");
4203 /* mips_split_addresses is a half-way house between explicit
4204 relocations and the traditional assembler macros. It can
4205 split absolute 32-bit symbolic constants into a high/lo_sum
4206 pair but uses macros for other sorts of access.
4208 Like explicit relocation support for REL targets, it relies
4209 on GNU extensions in the assembler and the linker.
4211 Although this code should work for -O0, it has traditionally
4212 been treated as an optimization. */
4213 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
4214 && optimize && !flag_pic
4215 && !ABI_HAS_64BIT_SYMBOLS)
4216 mips_split_addresses = 1;
4218 mips_split_addresses = 0;
4220 /* -mvr4130-align is a "speed over size" optimization: it usually produces
4221 faster code, but at the expense of more nops. Enable it at -O3 and
4223 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
4224 target_flags |= MASK_VR4130_ALIGN;
4226 /* When compiling for the mips16, we cannot use floating point. We
4227 record the original hard float value in mips16_hard_float. */
4230 if (TARGET_SOFT_FLOAT)
4231 mips16_hard_float = 0;
4233 mips16_hard_float = 1;
4234 target_flags |= MASK_SOFT_FLOAT;
4236 /* Don't run the scheduler before reload, since it tends to
4237 increase register pressure. */
4238 flag_schedule_insns = 0;
4240 /* Don't do hot/cold partitioning. The constant layout code expects
4241 the whole function to be in a single section. */
4242 flag_reorder_blocks_and_partition = 0;
4244 /* Silently disable -mexplicit-relocs since it doesn't apply
4245 to mips16 code. Even so, it would overly pedantic to warn
4246 about "-mips16 -mexplicit-relocs", especially given that
4247 we use a %gprel() operator. */
4248 target_flags &= ~MASK_EXPLICIT_RELOCS;
4251 /* When using explicit relocs, we call dbr_schedule from within
4253 if (TARGET_EXPLICIT_RELOCS)
4255 mips_flag_delayed_branch = flag_delayed_branch;
4256 flag_delayed_branch = 0;
4259 #ifdef MIPS_TFMODE_FORMAT
4260 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
4263 /* Make sure that the user didn't turn off paired single support when
4264 MIPS-3D support is requested. */
4265 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE)
4266 && !TARGET_PAIRED_SINGLE_FLOAT)
4267 error ("-mips3d requires -mpaired-single");
4269 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE. */
4271 target_flags |= MASK_PAIRED_SINGLE;
4273 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
4274 and TARGET_HARD_FLOAT are both true. */
4275 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
4276 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
4278 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
4280 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
4281 error ("-mips3d/-mpaired-single must be used with -mips64");
4283 mips_print_operand_punct['?'] = 1;
4284 mips_print_operand_punct['#'] = 1;
4285 mips_print_operand_punct['/'] = 1;
4286 mips_print_operand_punct['&'] = 1;
4287 mips_print_operand_punct['!'] = 1;
4288 mips_print_operand_punct['*'] = 1;
4289 mips_print_operand_punct['@'] = 1;
4290 mips_print_operand_punct['.'] = 1;
4291 mips_print_operand_punct['('] = 1;
4292 mips_print_operand_punct[')'] = 1;
4293 mips_print_operand_punct['['] = 1;
4294 mips_print_operand_punct[']'] = 1;
4295 mips_print_operand_punct['<'] = 1;
4296 mips_print_operand_punct['>'] = 1;
4297 mips_print_operand_punct['{'] = 1;
4298 mips_print_operand_punct['}'] = 1;
4299 mips_print_operand_punct['^'] = 1;
4300 mips_print_operand_punct['$'] = 1;
4301 mips_print_operand_punct['+'] = 1;
4302 mips_print_operand_punct['~'] = 1;
4304 mips_char_to_class['d'] = TARGET_MIPS16 ? M16_REGS : GR_REGS;
4305 mips_char_to_class['t'] = T_REG;
4306 mips_char_to_class['f'] = (TARGET_HARD_FLOAT ? FP_REGS : NO_REGS);
4307 mips_char_to_class['h'] = HI_REG;
4308 mips_char_to_class['l'] = LO_REG;
4309 mips_char_to_class['x'] = MD_REGS;
4310 mips_char_to_class['b'] = ALL_REGS;
4311 mips_char_to_class['c'] = (TARGET_ABICALLS ? PIC_FN_ADDR_REG :
4312 TARGET_MIPS16 ? M16_NA_REGS :
4314 mips_char_to_class['e'] = LEA_REGS;
4315 mips_char_to_class['j'] = PIC_FN_ADDR_REG;
4316 mips_char_to_class['y'] = GR_REGS;
4317 mips_char_to_class['z'] = ST_REGS;
4318 mips_char_to_class['B'] = COP0_REGS;
4319 mips_char_to_class['C'] = COP2_REGS;
4320 mips_char_to_class['D'] = COP3_REGS;
4322 /* Set up array to map GCC register number to debug register number.
4323 Ignore the special purpose register numbers. */
4325 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4326 mips_dbx_regno[i] = -1;
4328 start = GP_DBX_FIRST - GP_REG_FIRST;
4329 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
4330 mips_dbx_regno[i] = i + start;
4332 start = FP_DBX_FIRST - FP_REG_FIRST;
4333 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
4334 mips_dbx_regno[i] = i + start;
4336 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
4337 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
4339 /* Set up array giving whether a given register can hold a given mode. */
4341 for (mode = VOIDmode;
4342 mode != MAX_MACHINE_MODE;
4343 mode = (enum machine_mode) ((int)mode + 1))
4345 register int size = GET_MODE_SIZE (mode);
4346 register enum mode_class class = GET_MODE_CLASS (mode);
4348 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4352 if (mode == CCV2mode)
4355 && (regno - ST_REG_FIRST) % 2 == 0);
4357 else if (mode == CCV4mode)
4360 && (regno - ST_REG_FIRST) % 4 == 0);
4362 else if (mode == CCmode)
4365 temp = (regno == FPSW_REGNUM);
4367 temp = (ST_REG_P (regno) || GP_REG_P (regno)
4368 || FP_REG_P (regno));
4371 else if (GP_REG_P (regno))
4372 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
4374 else if (FP_REG_P (regno))
4375 temp = ((regno % FP_INC) == 0)
4376 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
4377 || class == MODE_VECTOR_FLOAT)
4378 && size <= UNITS_PER_FPVALUE)
4379 /* Allow integer modes that fit into a single
4380 register. We need to put integers into FPRs
4381 when using instructions like cvt and trunc. */
4382 || (class == MODE_INT && size <= UNITS_PER_FPREG)
4383 /* Allow TFmode for CCmode reloads. */
4384 || (ISA_HAS_8CC && mode == TFmode));
4386 else if (MD_REG_P (regno))
4387 temp = (INTEGRAL_MODE_P (mode)
4388 && (size <= UNITS_PER_WORD
4389 || (regno == MD_REG_FIRST
4390 && size == 2 * UNITS_PER_WORD)));
4392 else if (ALL_COP_REG_P (regno))
4393 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
4397 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
4401 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
4402 initialized yet, so we can't use that here. */
4403 gpr_mode = TARGET_64BIT ? DImode : SImode;
4405 /* Provide default values for align_* for 64-bit targets. */
4406 if (TARGET_64BIT && !TARGET_MIPS16)
4408 if (align_loops == 0)
4410 if (align_jumps == 0)
4412 if (align_functions == 0)
4413 align_functions = 8;
4416 /* Function to allocate machine-dependent function status. */
4417 init_machine_status = &mips_init_machine_status;
4419 if (ABI_HAS_64BIT_SYMBOLS)
4421 if (TARGET_EXPLICIT_RELOCS)
4423 mips_split_p[SYMBOL_64_HIGH] = true;
4424 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
4425 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
4427 mips_split_p[SYMBOL_64_MID] = true;
4428 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
4429 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
4431 mips_split_p[SYMBOL_64_LOW] = true;
4432 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
4433 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
4435 mips_split_p[SYMBOL_GENERAL] = true;
4436 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
4441 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses)
4443 mips_split_p[SYMBOL_GENERAL] = true;
4444 mips_hi_relocs[SYMBOL_GENERAL] = "%hi(";
4445 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
4451 /* The high part is provided by a pseudo copy of $gp. */
4452 mips_split_p[SYMBOL_SMALL_DATA] = true;
4453 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gprel(";
4456 if (TARGET_EXPLICIT_RELOCS)
4458 /* Small data constants are kept whole until after reload,
4459 then lowered by mips_rewrite_small_data. */
4460 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gp_rel(";
4462 mips_split_p[SYMBOL_GOT_LOCAL] = true;
4465 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
4466 mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%got_ofst(";
4470 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
4471 mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%lo(";
4476 /* The HIGH and LO_SUM are matched by special .md patterns. */
4477 mips_split_p[SYMBOL_GOT_GLOBAL] = true;
4479 mips_split_p[SYMBOL_GOTOFF_GLOBAL] = true;
4480 mips_hi_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_hi(";
4481 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_lo(";
4483 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
4484 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
4485 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
4490 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_disp(";
4492 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got(";
4493 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
4499 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
4500 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
4501 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
4504 /* Default to working around R4000 errata only if the processor
4505 was selected explicitly. */
4506 if ((target_flags_explicit & MASK_FIX_R4000) == 0
4507 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
4508 target_flags |= MASK_FIX_R4000;
4510 /* Default to working around R4400 errata only if the processor
4511 was selected explicitly. */
4512 if ((target_flags_explicit & MASK_FIX_R4400) == 0
4513 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
4514 target_flags |= MASK_FIX_R4400;
4517 /* Implement CONDITIONAL_REGISTER_USAGE. */
4520 mips_conditional_register_usage (void)
4522 if (!TARGET_HARD_FLOAT)
4526 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
4527 fixed_regs[regno] = call_used_regs[regno] = 1;
4528 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
4529 fixed_regs[regno] = call_used_regs[regno] = 1;
4531 else if (! ISA_HAS_8CC)
4535 /* We only have a single condition code register. We
4536 implement this by hiding all the condition code registers,
4537 and generating RTL that refers directly to ST_REG_FIRST. */
4538 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
4539 fixed_regs[regno] = call_used_regs[regno] = 1;
4541 /* In mips16 mode, we permit the $t temporary registers to be used
4542 for reload. We prohibit the unused $s registers, since they
4543 are caller saved, and saving them via a mips16 register would
4544 probably waste more time than just reloading the value. */
4547 fixed_regs[18] = call_used_regs[18] = 1;
4548 fixed_regs[19] = call_used_regs[19] = 1;
4549 fixed_regs[20] = call_used_regs[20] = 1;
4550 fixed_regs[21] = call_used_regs[21] = 1;
4551 fixed_regs[22] = call_used_regs[22] = 1;
4552 fixed_regs[23] = call_used_regs[23] = 1;
4553 fixed_regs[26] = call_used_regs[26] = 1;
4554 fixed_regs[27] = call_used_regs[27] = 1;
4555 fixed_regs[30] = call_used_regs[30] = 1;
4557 /* fp20-23 are now caller saved. */
4558 if (mips_abi == ABI_64)
4561 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
4562 call_really_used_regs[regno] = call_used_regs[regno] = 1;
4564 /* Odd registers from fp21 to fp31 are now caller saved. */
4565 if (mips_abi == ABI_N32)
4568 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
4569 call_really_used_regs[regno] = call_used_regs[regno] = 1;
4573 /* Allocate a chunk of memory for per-function machine-dependent data. */
4574 static struct machine_function *
4575 mips_init_machine_status (void)
4577 return ((struct machine_function *)
4578 ggc_alloc_cleared (sizeof (struct machine_function)));
4581 /* On the mips16, we want to allocate $24 (T_REG) before other
4582 registers for instructions for which it is possible. This helps
4583 avoid shuffling registers around in order to set up for an xor,
4584 encouraging the compiler to use a cmp instead. */
4587 mips_order_regs_for_local_alloc (void)
4591 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4592 reg_alloc_order[i] = i;
4596 /* It really doesn't matter where we put register 0, since it is
4597 a fixed register anyhow. */
4598 reg_alloc_order[0] = 24;
4599 reg_alloc_order[24] = 0;
4604 /* The MIPS debug format wants all automatic variables and arguments
4605 to be in terms of the virtual frame pointer (stack pointer before
4606 any adjustment in the function), while the MIPS 3.0 linker wants
4607 the frame pointer to be the stack pointer after the initial
4608 adjustment. So, we do the adjustment here. The arg pointer (which
4609 is eliminated) points to the virtual frame pointer, while the frame
4610 pointer (which may be eliminated) points to the stack pointer after
4611 the initial adjustments. */
4614 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
4616 rtx offset2 = const0_rtx;
4617 rtx reg = eliminate_constant_term (addr, &offset2);
4620 offset = INTVAL (offset2);
4622 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
4623 || reg == hard_frame_pointer_rtx)
4625 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
4626 ? compute_frame_size (get_frame_size ())
4627 : cfun->machine->frame.total_size;
4629 /* MIPS16 frame is smaller */
4630 if (frame_pointer_needed && TARGET_MIPS16)
4631 frame_size -= cfun->machine->frame.args_size;
4633 offset = offset - frame_size;
4636 /* sdbout_parms does not want this to crash for unrecognized cases. */
4638 else if (reg != arg_pointer_rtx)
4639 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
4646 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
4648 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
4649 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
4650 'h' OP is HIGH, prints %hi(X),
4651 'd' output integer constant in decimal,
4652 'z' if the operand is 0, use $0 instead of normal operand.
4653 'D' print second part of double-word register or memory operand.
4654 'L' print low-order register of double-word register operand.
4655 'M' print high-order register of double-word register operand.
4656 'C' print part of opcode for a branch condition.
4657 'F' print part of opcode for a floating-point branch condition.
4658 'N' print part of opcode for a branch condition, inverted.
4659 'W' print part of opcode for a floating-point branch condition, inverted.
4660 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
4661 'z' for (eq:?I ...), 'n' for (ne:?I ...).
4662 't' like 'T', but with the EQ/NE cases reversed
4663 'Y' for a CONST_INT X, print mips_fp_conditions[X]
4664 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
4665 'R' print the reloc associated with LO_SUM
4667 The punctuation characters are:
4669 '(' Turn on .set noreorder
4670 ')' Turn on .set reorder
4671 '[' Turn on .set noat
4673 '<' Turn on .set nomacro
4674 '>' Turn on .set macro
4675 '{' Turn on .set volatile (not GAS)
4676 '}' Turn on .set novolatile (not GAS)
4677 '&' Turn on .set noreorder if filling delay slots
4678 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
4679 '!' Turn on .set nomacro if filling delay slots
4680 '#' Print nop if in a .set noreorder section.
4681 '/' Like '#', but does nothing within a delayed branch sequence
4682 '?' Print 'l' if we are to use a branch likely instead of normal branch.
4683 '@' Print the name of the assembler temporary register (at or $1).
4684 '.' Print the name of the register with a hard-wired zero (zero or $0).
4685 '^' Print the name of the pic call-through register (t9 or $25).
4686 '$' Print the name of the stack pointer register (sp or $29).
4687 '+' Print the name of the gp register (usually gp or $28).
4688 '~' Output a branch alignment to LABEL_ALIGN(NULL). */
4691 print_operand (FILE *file, rtx op, int letter)
4693 register enum rtx_code code;
4695 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
4700 if (mips_branch_likely)
4705 fputs (reg_names [GP_REG_FIRST + 1], file);
4709 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
4713 fputs (reg_names [GP_REG_FIRST + 0], file);
4717 fputs (reg_names[STACK_POINTER_REGNUM], file);
4721 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
4725 if (final_sequence != 0 && set_noreorder++ == 0)
4726 fputs (".set\tnoreorder\n\t", file);
4730 if (final_sequence != 0)
4732 if (set_noreorder++ == 0)
4733 fputs (".set\tnoreorder\n\t", file);
4735 if (set_nomacro++ == 0)
4736 fputs (".set\tnomacro\n\t", file);
4741 if (final_sequence != 0 && set_nomacro++ == 0)
4742 fputs ("\n\t.set\tnomacro", file);
4746 if (set_noreorder != 0)
4747 fputs ("\n\tnop", file);
4751 /* Print an extra newline so that the delayed insn is separated
4752 from the following ones. This looks neater and is consistent
4753 with non-nop delayed sequences. */
4754 if (set_noreorder != 0 && final_sequence == 0)
4755 fputs ("\n\tnop\n", file);
4759 if (set_noreorder++ == 0)
4760 fputs (".set\tnoreorder\n\t", file);
4764 if (set_noreorder == 0)
4765 error ("internal error: %%) found without a %%( in assembler pattern");
4767 else if (--set_noreorder == 0)
4768 fputs ("\n\t.set\treorder", file);
4773 if (set_noat++ == 0)
4774 fputs (".set\tnoat\n\t", file);
4779 error ("internal error: %%] found without a %%[ in assembler pattern");
4780 else if (--set_noat == 0)
4781 fputs ("\n\t.set\tat", file);
4786 if (set_nomacro++ == 0)
4787 fputs (".set\tnomacro\n\t", file);
4791 if (set_nomacro == 0)
4792 error ("internal error: %%> found without a %%< in assembler pattern");
4793 else if (--set_nomacro == 0)
4794 fputs ("\n\t.set\tmacro", file);
4799 if (set_volatile++ == 0)
4800 fputs ("#.set\tvolatile\n\t", file);
4804 if (set_volatile == 0)
4805 error ("internal error: %%} found without a %%{ in assembler pattern");
4806 else if (--set_volatile == 0)
4807 fputs ("\n\t#.set\tnovolatile", file);
4813 if (align_labels_log > 0)
4814 ASM_OUTPUT_ALIGN (file, align_labels_log);
4819 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
4828 error ("PRINT_OPERAND null pointer");
4832 code = GET_CODE (op);
4837 case EQ: fputs ("eq", file); break;
4838 case NE: fputs ("ne", file); break;
4839 case GT: fputs ("gt", file); break;
4840 case GE: fputs ("ge", file); break;
4841 case LT: fputs ("lt", file); break;
4842 case LE: fputs ("le", file); break;
4843 case GTU: fputs ("gtu", file); break;
4844 case GEU: fputs ("geu", file); break;
4845 case LTU: fputs ("ltu", file); break;
4846 case LEU: fputs ("leu", file); break;
4848 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
4851 else if (letter == 'N')
4854 case EQ: fputs ("ne", file); break;
4855 case NE: fputs ("eq", file); break;
4856 case GT: fputs ("le", file); break;
4857 case GE: fputs ("lt", file); break;
4858 case LT: fputs ("ge", file); break;
4859 case LE: fputs ("gt", file); break;
4860 case GTU: fputs ("leu", file); break;
4861 case GEU: fputs ("ltu", file); break;
4862 case LTU: fputs ("geu", file); break;
4863 case LEU: fputs ("gtu", file); break;
4865 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
4868 else if (letter == 'F')
4871 case EQ: fputs ("c1f", file); break;
4872 case NE: fputs ("c1t", file); break;
4874 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
4877 else if (letter == 'W')
4880 case EQ: fputs ("c1t", file); break;
4881 case NE: fputs ("c1f", file); break;
4883 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
4886 else if (letter == 'h')
4888 if (GET_CODE (op) == HIGH)
4891 print_operand_reloc (file, op, mips_hi_relocs);
4894 else if (letter == 'R')
4895 print_operand_reloc (file, op, mips_lo_relocs);
4897 else if (letter == 'Y')
4899 if (GET_CODE (op) == CONST_INT
4900 && ((unsigned HOST_WIDE_INT) INTVAL (op)
4901 < ARRAY_SIZE (mips_fp_conditions)))
4902 fputs (mips_fp_conditions[INTVAL (op)], file);
4904 output_operand_lossage ("invalid %%Y value");
4907 else if (letter == 'Z')
4911 print_operand (file, op, 0);
4916 else if (code == REG || code == SUBREG)
4918 register int regnum;
4921 regnum = REGNO (op);
4923 regnum = true_regnum (op);
4925 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
4926 || (letter == 'L' && WORDS_BIG_ENDIAN)
4930 fprintf (file, "%s", reg_names[regnum]);
4933 else if (code == MEM)
4936 output_address (plus_constant (XEXP (op, 0), 4));
4938 output_address (XEXP (op, 0));
4941 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
4942 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
4944 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
4945 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
4947 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
4948 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
4950 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
4951 fputs (reg_names[GP_REG_FIRST], file);
4953 else if (letter == 'd' || letter == 'x' || letter == 'X')
4954 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
4956 else if (letter == 'T' || letter == 't')
4958 int truth = (code == NE) == (letter == 'T');
4959 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
4962 else if (CONST_GP_P (op))
4963 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
4966 output_addr_const (file, op);
4970 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM.
4971 RELOCS is the array of relocations to use. */
4974 print_operand_reloc (FILE *file, rtx op, const char **relocs)
4976 enum mips_symbol_type symbol_type;
4979 HOST_WIDE_INT offset;
4981 if (!mips_symbolic_constant_p (op, &symbol_type) || relocs[symbol_type] == 0)
4982 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
4984 /* If OP uses an UNSPEC address, we want to print the inner symbol. */
4985 mips_split_const (op, &base, &offset);
4986 if (UNSPEC_ADDRESS_P (base))
4987 op = plus_constant (UNSPEC_ADDRESS (base), offset);
4989 fputs (relocs[symbol_type], file);
4990 output_addr_const (file, op);
4991 for (p = relocs[symbol_type]; *p != 0; p++)
4996 /* Output address operand X to FILE. */
4999 print_operand_address (FILE *file, rtx x)
5001 struct mips_address_info addr;
5003 if (mips_classify_address (&addr, x, word_mode, true))
5007 print_operand (file, addr.offset, 0);
5008 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
5011 case ADDRESS_LO_SUM:
5012 print_operand (file, addr.offset, 'R');
5013 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
5016 case ADDRESS_CONST_INT:
5017 output_addr_const (file, x);
5018 fprintf (file, "(%s)", reg_names[0]);
5021 case ADDRESS_SYMBOLIC:
5022 output_addr_const (file, x);
5028 /* When using assembler macros, keep track of all of small-data externs
5029 so that mips_file_end can emit the appropriate declarations for them.
5031 In most cases it would be safe (though pointless) to emit .externs
5032 for other symbols too. One exception is when an object is within
5033 the -G limit but declared by the user to be in a section other
5034 than .sbss or .sdata. */
5037 mips_output_external (FILE *file ATTRIBUTE_UNUSED, tree decl, const char *name)
5039 register struct extern_list *p;
5041 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
5043 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5044 p->next = extern_head;
5046 p->size = int_size_in_bytes (TREE_TYPE (decl));
5050 if (TARGET_IRIX && mips_abi == ABI_32 && TREE_CODE (decl) == FUNCTION_DECL)
5052 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5053 p->next = extern_head;
5064 irix_output_external_libcall (rtx fun)
5066 register struct extern_list *p;
5068 if (mips_abi == ABI_32)
5070 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5071 p->next = extern_head;
5072 p->name = XSTR (fun, 0);
5079 /* Emit a new filename to a stream. If we are smuggling stabs, try to
5080 put out a MIPS ECOFF file and a stab. */
5083 mips_output_filename (FILE *stream, const char *name)
5086 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
5088 if (write_symbols == DWARF2_DEBUG)
5090 else if (mips_output_filename_first_time)
5092 mips_output_filename_first_time = 0;
5093 num_source_filenames += 1;
5094 current_function_file = name;
5095 fprintf (stream, "\t.file\t%d ", num_source_filenames);
5096 output_quoted_string (stream, name);
5097 putc ('\n', stream);
5100 /* If we are emitting stabs, let dbxout.c handle this (except for
5101 the mips_output_filename_first_time case). */
5102 else if (write_symbols == DBX_DEBUG)
5105 else if (name != current_function_file
5106 && strcmp (name, current_function_file) != 0)
5108 num_source_filenames += 1;
5109 current_function_file = name;
5110 fprintf (stream, "\t.file\t%d ", num_source_filenames);
5111 output_quoted_string (stream, name);
5112 putc ('\n', stream);
5116 /* Output an ASCII string, in a space-saving way. PREFIX is the string
5117 that should be written before the opening quote, such as "\t.ascii\t"
5118 for real string data or "\t# " for a comment. */
5121 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
5126 register const unsigned char *string =
5127 (const unsigned char *)string_param;
5129 fprintf (stream, "%s\"", prefix);
5130 for (i = 0; i < len; i++)
5132 register int c = string[i];
5138 putc ('\\', stream);
5143 case TARGET_NEWLINE:
5144 fputs ("\\n", stream);
5146 && (((c = string[i+1]) >= '\040' && c <= '~')
5147 || c == TARGET_TAB))
5148 cur_pos = 32767; /* break right here */
5154 fputs ("\\t", stream);
5159 fputs ("\\f", stream);
5164 fputs ("\\b", stream);
5169 fputs ("\\r", stream);
5174 if (c >= ' ' && c < 0177)
5181 fprintf (stream, "\\%03o", c);
5186 if (cur_pos > 72 && i+1 < len)
5189 fprintf (stream, "\"\n%s\"", prefix);
5192 fprintf (stream, "\"\n");
5195 /* Implement TARGET_ASM_FILE_START. */
5198 mips_file_start (void)
5200 default_file_start ();
5204 /* Generate a special section to describe the ABI switches used to
5205 produce the resultant binary. This used to be done by the assembler
5206 setting bits in the ELF header's flags field, but we have run out of
5207 bits. GDB needs this information in order to be able to correctly
5208 debug these binaries. See the function mips_gdbarch_init() in
5209 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
5210 causes unnecessary IRIX 6 ld warnings. */
5211 const char * abi_string = NULL;
5215 case ABI_32: abi_string = "abi32"; break;
5216 case ABI_N32: abi_string = "abiN32"; break;
5217 case ABI_64: abi_string = "abi64"; break;
5218 case ABI_O64: abi_string = "abiO64"; break;
5219 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
5223 /* Note - we use fprintf directly rather than called named_section()
5224 because in this way we can avoid creating an allocated section. We
5225 do not want this section to take up any space in the running
5227 fprintf (asm_out_file, "\t.section .mdebug.%s\n", abi_string);
5229 /* There is no ELF header flag to distinguish long32 forms of the
5230 EABI from long64 forms. Emit a special section to help tools
5232 if (mips_abi == ABI_EABI)
5233 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n",
5234 TARGET_LONG64 ? 64 : 32);
5236 /* Restore the default section. */
5237 fprintf (asm_out_file, "\t.previous\n");
5240 /* Generate the pseudo ops that System V.4 wants. */
5241 if (TARGET_ABICALLS)
5242 /* ??? but do not want this (or want pic0) if -non-shared? */
5243 fprintf (asm_out_file, "\t.abicalls\n");
5246 fprintf (asm_out_file, "\t.set\tmips16\n");
5248 if (flag_verbose_asm)
5249 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
5251 mips_section_threshold, mips_arch_info->name, mips_isa);
5254 #ifdef BSS_SECTION_ASM_OP
5255 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
5256 in the use of sbss. */
5259 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
5260 unsigned HOST_WIDE_INT size, int align)
5262 extern tree last_assemble_variable_decl;
5264 if (mips_in_small_data_p (decl))
5265 named_section (0, ".sbss", 0);
5268 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
5269 last_assemble_variable_decl = decl;
5270 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
5271 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
5275 /* Implement TARGET_ASM_FILE_END. When using assembler macros, emit
5276 .externs for any small-data variables that turned out to be external. */
5279 mips_file_end (void)
5282 struct extern_list *p;
5286 fputs ("\n", asm_out_file);
5288 for (p = extern_head; p != 0; p = p->next)
5290 name_tree = get_identifier (p->name);
5292 /* Positively ensure only one .extern for any given symbol. */
5293 if (!TREE_ASM_WRITTEN (name_tree)
5294 && TREE_SYMBOL_REFERENCED (name_tree))
5296 TREE_ASM_WRITTEN (name_tree) = 1;
5297 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
5298 `.global name .text' directive for every used but
5299 undefined function. If we don't, the linker may perform
5300 an optimization (skipping over the insns that set $gp)
5301 when it is unsafe. */
5302 if (TARGET_IRIX && mips_abi == ABI_32 && p->size == -1)
5304 fputs ("\t.globl ", asm_out_file);
5305 assemble_name (asm_out_file, p->name);
5306 fputs (" .text\n", asm_out_file);
5310 fputs ("\t.extern\t", asm_out_file);
5311 assemble_name (asm_out_file, p->name);
5312 fprintf (asm_out_file, ", %d\n", p->size);
5319 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
5320 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
5323 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
5324 unsigned HOST_WIDE_INT size,
5327 /* If the target wants uninitialized const declarations in
5328 .rdata then don't put them in .comm. */
5329 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
5330 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
5331 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
5333 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
5334 targetm.asm_out.globalize_label (stream, name);
5336 readonly_data_section ();
5337 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
5338 mips_declare_object (stream, name, "",
5339 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
5343 mips_declare_common_object (stream, name, "\n\t.comm\t",
5347 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
5348 NAME is the name of the object and ALIGN is the required alignment
5349 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
5350 alignment argument. */
5353 mips_declare_common_object (FILE *stream, const char *name,
5354 const char *init_string,
5355 unsigned HOST_WIDE_INT size,
5356 unsigned int align, bool takes_alignment_p)
5358 if (!takes_alignment_p)
5360 size += (align / BITS_PER_UNIT) - 1;
5361 size -= size % (align / BITS_PER_UNIT);
5362 mips_declare_object (stream, name, init_string,
5363 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
5366 mips_declare_object (stream, name, init_string,
5367 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
5368 size, align / BITS_PER_UNIT);
5371 /* Emit either a label, .comm, or .lcomm directive. When using assembler
5372 macros, mark the symbol as written so that mips_file_end won't emit an
5373 .extern for it. STREAM is the output file, NAME is the name of the
5374 symbol, INIT_STRING is the string that should be written before the
5375 symbol and FINAL_STRING is the string that should be written after it.
5376 FINAL_STRING is a printf() format that consumes the remaining arguments. */
5379 mips_declare_object (FILE *stream, const char *name, const char *init_string,
5380 const char *final_string, ...)
5384 fputs (init_string, stream);
5385 assemble_name (stream, name);
5386 va_start (ap, final_string);
5387 vfprintf (stream, final_string, ap);
5390 if (!TARGET_EXPLICIT_RELOCS)
5392 tree name_tree = get_identifier (name);
5393 TREE_ASM_WRITTEN (name_tree) = 1;
5397 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
5398 extern int size_directive_output;
5400 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
5401 definitions except that it uses mips_declare_object() to emit the label. */
5404 mips_declare_object_name (FILE *stream, const char *name,
5405 tree decl ATTRIBUTE_UNUSED)
5407 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5408 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
5411 size_directive_output = 0;
5412 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
5416 size_directive_output = 1;
5417 size = int_size_in_bytes (TREE_TYPE (decl));
5418 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
5421 mips_declare_object (stream, name, "", ":\n", 0);
5424 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
5427 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
5431 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
5432 if (!flag_inhibit_size_directive
5433 && DECL_SIZE (decl) != 0
5434 && !at_end && top_level
5435 && DECL_INITIAL (decl) == error_mark_node
5436 && !size_directive_output)
5440 size_directive_output = 1;
5441 size = int_size_in_bytes (TREE_TYPE (decl));
5442 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
5447 /* Return true if X is a small data address that can be rewritten
5451 mips_rewrite_small_data_p (rtx x)
5453 enum mips_symbol_type symbol_type;
5455 return (TARGET_EXPLICIT_RELOCS
5456 && mips_symbolic_constant_p (x, &symbol_type)
5457 && symbol_type == SYMBOL_SMALL_DATA);
5461 /* A for_each_rtx callback for mips_small_data_pattern_p. */
5464 mips_small_data_pattern_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
5466 if (GET_CODE (*loc) == LO_SUM)
5469 return mips_rewrite_small_data_p (*loc);
5472 /* Return true if OP refers to small data symbols directly, not through
5476 mips_small_data_pattern_p (rtx op)
5478 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
5481 /* A for_each_rtx callback, used by mips_rewrite_small_data. */
5484 mips_rewrite_small_data_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
5486 if (mips_rewrite_small_data_p (*loc))
5487 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
5489 if (GET_CODE (*loc) == LO_SUM)
5495 /* If possible, rewrite OP so that it refers to small data using
5496 explicit relocations. */
5499 mips_rewrite_small_data (rtx op)
5501 op = copy_insn (op);
5502 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
5506 /* Return true if the current function has an insn that implicitly
5510 mips_function_has_gp_insn (void)
5512 /* Don't bother rechecking if we found one last time. */
5513 if (!cfun->machine->has_gp_insn_p)
5517 push_topmost_sequence ();
5518 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5520 && GET_CODE (PATTERN (insn)) != USE
5521 && GET_CODE (PATTERN (insn)) != CLOBBER
5522 && (get_attr_got (insn) != GOT_UNSET
5523 || small_data_pattern (PATTERN (insn), VOIDmode)))
5525 pop_topmost_sequence ();
5527 cfun->machine->has_gp_insn_p = (insn != 0);
5529 return cfun->machine->has_gp_insn_p;
5533 /* Return the register that should be used as the global pointer
5534 within this function. Return 0 if the function doesn't need
5535 a global pointer. */
5538 mips_global_pointer (void)
5542 /* $gp is always available in non-abicalls code. */
5543 if (!TARGET_ABICALLS)
5544 return GLOBAL_POINTER_REGNUM;
5546 /* We must always provide $gp when it is used implicitly. */
5547 if (!TARGET_EXPLICIT_RELOCS)
5548 return GLOBAL_POINTER_REGNUM;
5550 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
5552 if (current_function_profile)
5553 return GLOBAL_POINTER_REGNUM;
5555 /* If the function has a nonlocal goto, $gp must hold the correct
5556 global pointer for the target function. */
5557 if (current_function_has_nonlocal_goto)
5558 return GLOBAL_POINTER_REGNUM;
5560 /* If the gp is never referenced, there's no need to initialize it.
5561 Note that reload can sometimes introduce constant pool references
5562 into a function that otherwise didn't need them. For example,
5563 suppose we have an instruction like:
5565 (set (reg:DF R1) (float:DF (reg:SI R2)))
5567 If R2 turns out to be constant such as 1, the instruction may have a
5568 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
5569 using this constant if R2 doesn't get allocated to a register.
5571 In cases like these, reload will have added the constant to the pool
5572 but no instruction will yet refer to it. */
5573 if (!regs_ever_live[GLOBAL_POINTER_REGNUM]
5574 && !current_function_uses_const_pool
5575 && !mips_function_has_gp_insn ())
5578 /* We need a global pointer, but perhaps we can use a call-clobbered
5579 register instead of $gp. */
5580 if (TARGET_NEWABI && current_function_is_leaf)
5581 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
5582 if (!regs_ever_live[regno]
5583 && call_used_regs[regno]
5584 && !fixed_regs[regno]
5585 && regno != PIC_FUNCTION_ADDR_REGNUM)
5588 return GLOBAL_POINTER_REGNUM;
5592 /* Return true if the current function must save REGNO. */
5595 mips_save_reg_p (unsigned int regno)
5597 /* We only need to save $gp for NewABI PIC. */
5598 if (regno == GLOBAL_POINTER_REGNUM)
5599 return (TARGET_ABICALLS && TARGET_NEWABI
5600 && cfun->machine->global_pointer == regno);
5602 /* Check call-saved registers. */
5603 if (regs_ever_live[regno] && !call_used_regs[regno])
5606 /* We need to save the old frame pointer before setting up a new one. */
5607 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
5610 /* We need to save the incoming return address if it is ever clobbered
5611 within the function. */
5612 if (regno == GP_REG_FIRST + 31 && regs_ever_live[regno])
5619 return_type = DECL_RESULT (current_function_decl);
5621 /* $18 is a special case in mips16 code. It may be used to call
5622 a function which returns a floating point value, but it is
5623 marked in call_used_regs. */
5624 if (regno == GP_REG_FIRST + 18 && regs_ever_live[regno])
5627 /* $31 is also a special case. It will be used to copy a return
5628 value into the floating point registers if the return value is
5630 if (regno == GP_REG_FIRST + 31
5631 && mips16_hard_float
5632 && !aggregate_value_p (return_type, current_function_decl)
5633 && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
5634 && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
5642 /* Return the bytes needed to compute the frame pointer from the current
5643 stack pointer. SIZE is the size (in bytes) of the local variables.
5645 Mips stack frames look like:
5647 Before call After call
5648 +-----------------------+ +-----------------------+
5651 | caller's temps. | | caller's temps. |
5653 +-----------------------+ +-----------------------+
5655 | arguments on stack. | | arguments on stack. |
5657 +-----------------------+ +-----------------------+
5658 | 4 words to save | | 4 words to save |
5659 | arguments passed | | arguments passed |
5660 | in registers, even | | in registers, even |
5661 SP->| if not passed. | VFP->| if not passed. |
5662 +-----------------------+ +-----------------------+
5664 | fp register save |
5666 +-----------------------+
5668 | gp register save |
5670 +-----------------------+
5674 +-----------------------+
5676 | alloca allocations |
5678 +-----------------------+
5680 | GP save for V.4 abi |
5682 +-----------------------+
5684 | arguments on stack |
5686 +-----------------------+
5688 | arguments passed |
5689 | in registers, even |
5690 low SP->| if not passed. |
5691 memory +-----------------------+
5696 compute_frame_size (HOST_WIDE_INT size)
5699 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
5700 HOST_WIDE_INT var_size; /* # bytes that variables take up */
5701 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
5702 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
5703 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
5704 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
5705 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
5706 unsigned int mask; /* mask of saved gp registers */
5707 unsigned int fmask; /* mask of saved fp registers */
5709 cfun->machine->global_pointer = mips_global_pointer ();
5715 var_size = MIPS_STACK_ALIGN (size);
5716 args_size = current_function_outgoing_args_size;
5717 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
5719 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
5720 functions. If the function has local variables, we're committed
5721 to allocating it anyway. Otherwise reclaim it here. */
5722 if (var_size == 0 && current_function_is_leaf)
5723 cprestore_size = args_size = 0;
5725 /* The MIPS 3.0 linker does not like functions that dynamically
5726 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
5727 looks like we are trying to create a second frame pointer to the
5728 function, so allocate some stack space to make it happy. */
5730 if (args_size == 0 && current_function_calls_alloca)
5731 args_size = 4 * UNITS_PER_WORD;
5733 total_size = var_size + args_size + cprestore_size;
5735 /* Calculate space needed for gp registers. */
5736 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
5737 if (mips_save_reg_p (regno))
5739 gp_reg_size += GET_MODE_SIZE (gpr_mode);
5740 mask |= 1 << (regno - GP_REG_FIRST);
5743 /* We need to restore these for the handler. */
5744 if (current_function_calls_eh_return)
5749 regno = EH_RETURN_DATA_REGNO (i);
5750 if (regno == INVALID_REGNUM)
5752 gp_reg_size += GET_MODE_SIZE (gpr_mode);
5753 mask |= 1 << (regno - GP_REG_FIRST);
5757 /* This loop must iterate over the same space as its companion in
5758 save_restore_insns. */
5759 for (regno = (FP_REG_LAST - FP_INC + 1);
5760 regno >= FP_REG_FIRST;
5763 if (mips_save_reg_p (regno))
5765 fp_reg_size += FP_INC * UNITS_PER_FPREG;
5766 fmask |= ((1 << FP_INC) - 1) << (regno - FP_REG_FIRST);
5770 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
5771 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
5773 /* Add in space reserved on the stack by the callee for storing arguments
5774 passed in registers. */
5776 total_size += MIPS_STACK_ALIGN (current_function_pretend_args_size);
5778 /* Save other computed information. */
5779 cfun->machine->frame.total_size = total_size;
5780 cfun->machine->frame.var_size = var_size;
5781 cfun->machine->frame.args_size = args_size;
5782 cfun->machine->frame.cprestore_size = cprestore_size;
5783 cfun->machine->frame.gp_reg_size = gp_reg_size;
5784 cfun->machine->frame.fp_reg_size = fp_reg_size;
5785 cfun->machine->frame.mask = mask;
5786 cfun->machine->frame.fmask = fmask;
5787 cfun->machine->frame.initialized = reload_completed;
5788 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
5789 cfun->machine->frame.num_fp = fp_reg_size / (FP_INC * UNITS_PER_FPREG);
5793 HOST_WIDE_INT offset;
5795 offset = (args_size + cprestore_size + var_size
5796 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
5797 cfun->machine->frame.gp_sp_offset = offset;
5798 cfun->machine->frame.gp_save_offset = offset - total_size;
5802 cfun->machine->frame.gp_sp_offset = 0;
5803 cfun->machine->frame.gp_save_offset = 0;
5808 HOST_WIDE_INT offset;
5810 offset = (args_size + cprestore_size + var_size
5811 + gp_reg_rounded + fp_reg_size
5812 - FP_INC * UNITS_PER_FPREG);
5813 cfun->machine->frame.fp_sp_offset = offset;
5814 cfun->machine->frame.fp_save_offset = offset - total_size;
5818 cfun->machine->frame.fp_sp_offset = 0;
5819 cfun->machine->frame.fp_save_offset = 0;
5822 /* Ok, we're done. */
5826 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
5827 pointer or argument pointer. TO is either the stack pointer or
5828 hard frame pointer. */
5831 mips_initial_elimination_offset (int from, int to)
5833 HOST_WIDE_INT offset;
5835 compute_frame_size (get_frame_size ());
5837 /* Set OFFSET to the offset from the stack pointer. */
5840 case FRAME_POINTER_REGNUM:
5844 case ARG_POINTER_REGNUM:
5845 offset = cfun->machine->frame.total_size;
5847 offset -= current_function_pretend_args_size;
5854 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
5855 offset -= cfun->machine->frame.args_size;
5860 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
5861 back to a previous frame. */
5863 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
5868 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
5871 /* Use FN to save or restore register REGNO. MODE is the register's
5872 mode and OFFSET is the offset of its save slot from the current
5876 mips_save_restore_reg (enum machine_mode mode, int regno,
5877 HOST_WIDE_INT offset, mips_save_restore_fn fn)
5881 mem = gen_rtx_MEM (mode, plus_constant (stack_pointer_rtx, offset));
5883 fn (gen_rtx_REG (mode, regno), mem);
5887 /* Call FN for each register that is saved by the current function.
5888 SP_OFFSET is the offset of the current stack pointer from the start
5892 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
5894 #define BITSET_P(VALUE, BIT) (((VALUE) & (1L << (BIT))) != 0)
5896 enum machine_mode fpr_mode;
5897 HOST_WIDE_INT offset;
5900 /* Save registers starting from high to low. The debuggers prefer at least
5901 the return register be stored at func+4, and also it allows us not to
5902 need a nop in the epilog if at least one register is reloaded in
5903 addition to return address. */
5904 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
5905 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
5906 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
5908 mips_save_restore_reg (gpr_mode, regno, offset, fn);
5909 offset -= GET_MODE_SIZE (gpr_mode);
5912 /* This loop must iterate over the same space as its companion in
5913 compute_frame_size. */
5914 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
5915 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
5916 for (regno = (FP_REG_LAST - FP_INC + 1);
5917 regno >= FP_REG_FIRST;
5919 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
5921 mips_save_restore_reg (fpr_mode, regno, offset, fn);
5922 offset -= GET_MODE_SIZE (fpr_mode);
5927 /* If we're generating n32 or n64 abicalls, and the current function
5928 does not use $28 as its global pointer, emit a cplocal directive.
5929 Use pic_offset_table_rtx as the argument to the directive. */
5932 mips_output_cplocal (void)
5934 if (!TARGET_EXPLICIT_RELOCS
5935 && cfun->machine->global_pointer > 0
5936 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
5937 output_asm_insn (".cplocal %+", 0);
5940 /* If we're generating n32 or n64 abicalls, emit instructions
5941 to set up the global pointer. */
5944 mips_emit_loadgp (void)
5946 if (TARGET_ABICALLS && TARGET_NEWABI && cfun->machine->global_pointer > 0)
5948 rtx addr, offset, incoming_address;
5950 addr = XEXP (DECL_RTL (current_function_decl), 0);
5951 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
5952 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
5953 emit_insn (gen_loadgp (offset, incoming_address));
5954 if (!TARGET_EXPLICIT_RELOCS)
5955 emit_insn (gen_loadgp_blockage ());
5959 /* Set up the stack and frame (if desired) for the function. */
5962 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
5965 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
5967 #ifdef SDB_DEBUGGING_INFO
5968 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
5969 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
5972 /* In mips16 mode, we may need to generate a 32 bit to handle
5973 floating point arguments. The linker will arrange for any 32 bit
5974 functions to call this stub, which will then jump to the 16 bit
5976 if (TARGET_MIPS16 && !TARGET_SOFT_FLOAT
5977 && current_function_args_info.fp_code != 0)
5978 build_mips16_function_stub (file);
5980 if (!FUNCTION_NAME_ALREADY_DECLARED)
5982 /* Get the function name the same way that toplev.c does before calling
5983 assemble_start_function. This is needed so that the name used here
5984 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
5985 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
5987 if (!flag_inhibit_size_directive)
5989 fputs ("\t.ent\t", file);
5990 assemble_name (file, fnname);
5994 assemble_name (file, fnname);
5995 fputs (":\n", file);
5998 /* Stop mips_file_end from treating this function as external. */
5999 if (TARGET_IRIX && mips_abi == ABI_32)
6000 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
6002 if (!flag_inhibit_size_directive)
6004 /* .frame FRAMEREG, FRAMESIZE, RETREG */
6006 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
6007 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
6008 ", args= " HOST_WIDE_INT_PRINT_DEC
6009 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
6010 (reg_names[(frame_pointer_needed)
6011 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
6012 ((frame_pointer_needed && TARGET_MIPS16)
6013 ? tsize - cfun->machine->frame.args_size
6015 reg_names[GP_REG_FIRST + 31],
6016 cfun->machine->frame.var_size,
6017 cfun->machine->frame.num_gp,
6018 cfun->machine->frame.num_fp,
6019 cfun->machine->frame.args_size,
6020 cfun->machine->frame.cprestore_size);
6022 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
6023 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
6024 cfun->machine->frame.mask,
6025 cfun->machine->frame.gp_save_offset);
6026 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
6027 cfun->machine->frame.fmask,
6028 cfun->machine->frame.fp_save_offset);
6031 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
6032 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
6035 if (TARGET_ABICALLS && !TARGET_NEWABI && cfun->machine->global_pointer > 0)
6037 /* Handle the initialization of $gp for SVR4 PIC. */
6038 if (!cfun->machine->all_noreorder_p)
6039 output_asm_insn ("%(.cpload\t%^%)", 0);
6041 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
6043 else if (cfun->machine->all_noreorder_p)
6044 output_asm_insn ("%(%<", 0);
6046 /* Tell the assembler which register we're using as the global
6047 pointer. This is needed for thunks, since they can use either
6048 explicit relocs or assembler macros. */
6049 mips_output_cplocal ();
6052 /* Make the last instruction frame related and note that it performs
6053 the operation described by FRAME_PATTERN. */
6056 mips_set_frame_expr (rtx frame_pattern)
6060 insn = get_last_insn ();
6061 RTX_FRAME_RELATED_P (insn) = 1;
6062 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
6068 /* Return a frame-related rtx that stores REG at MEM.
6069 REG must be a single register. */
6072 mips_frame_set (rtx mem, rtx reg)
6074 rtx set = gen_rtx_SET (VOIDmode, mem, reg);
6075 RTX_FRAME_RELATED_P (set) = 1;
6080 /* Save register REG to MEM. Make the instruction frame-related. */
6083 mips_save_reg (rtx reg, rtx mem)
6085 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
6089 if (mips_split_64bit_move_p (mem, reg))
6090 mips_split_64bit_move (mem, reg);
6092 emit_move_insn (mem, reg);
6094 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
6095 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
6096 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
6101 && REGNO (reg) != GP_REG_FIRST + 31
6102 && !M16_REG_P (REGNO (reg)))
6104 /* Save a non-mips16 register by moving it through a temporary.
6105 We don't need to do this for $31 since there's a special
6106 instruction for it. */
6107 emit_move_insn (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
6108 emit_move_insn (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
6111 emit_move_insn (mem, reg);
6113 mips_set_frame_expr (mips_frame_set (mem, reg));
6118 /* Expand the prologue into a bunch of separate insns. */
6121 mips_expand_prologue (void)
6125 if (cfun->machine->global_pointer > 0)
6126 REGNO (pic_offset_table_rtx) = cfun->machine->global_pointer;
6128 size = compute_frame_size (get_frame_size ());
6130 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
6131 bytes beforehand; this is enough to cover the register save area
6132 without going out of range. */
6133 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
6135 HOST_WIDE_INT step1;
6137 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
6138 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
6140 GEN_INT (-step1)))) = 1;
6142 mips_for_each_saved_reg (size, mips_save_reg);
6145 /* Allocate the rest of the frame. */
6148 if (SMALL_OPERAND (-size))
6149 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
6151 GEN_INT (-size)))) = 1;
6154 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
6157 /* There are no instructions to add or subtract registers
6158 from the stack pointer, so use the frame pointer as a
6159 temporary. We should always be using a frame pointer
6160 in this case anyway. */
6161 gcc_assert (frame_pointer_needed);
6162 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
6163 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
6164 hard_frame_pointer_rtx,
6165 MIPS_PROLOGUE_TEMP (Pmode)));
6166 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
6169 emit_insn (gen_sub3_insn (stack_pointer_rtx,
6171 MIPS_PROLOGUE_TEMP (Pmode)));
6173 /* Describe the combined effect of the previous instructions. */
6175 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
6176 plus_constant (stack_pointer_rtx, -size)));
6180 /* Set up the frame pointer, if we're using one. In mips16 code,
6181 we point the frame pointer ahead of the outgoing argument area.
6182 This should allow more variables & incoming arguments to be
6183 accessed with unextended instructions. */
6184 if (frame_pointer_needed)
6186 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
6188 rtx offset = GEN_INT (cfun->machine->frame.args_size);
6190 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
6195 RTX_FRAME_RELATED_P (emit_move_insn (hard_frame_pointer_rtx,
6196 stack_pointer_rtx)) = 1;
6199 /* If generating o32/o64 abicalls, save $gp on the stack. */
6200 if (TARGET_ABICALLS && !TARGET_NEWABI && !current_function_is_leaf)
6201 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
6203 mips_emit_loadgp ();
6205 /* If we are profiling, make sure no instructions are scheduled before
6206 the call to mcount. */
6208 if (current_function_profile)
6209 emit_insn (gen_blockage ());
6212 /* Do any necessary cleanup after a function to restore stack, frame,
6215 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
6218 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
6219 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6221 /* Reinstate the normal $gp. */
6222 REGNO (pic_offset_table_rtx) = GLOBAL_POINTER_REGNUM;
6223 mips_output_cplocal ();
6225 if (cfun->machine->all_noreorder_p)
6227 /* Avoid using %>%) since it adds excess whitespace. */
6228 output_asm_insn (".set\tmacro", 0);
6229 output_asm_insn (".set\treorder", 0);
6230 set_noreorder = set_nomacro = 0;
6233 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
6237 /* Get the function name the same way that toplev.c does before calling
6238 assemble_start_function. This is needed so that the name used here
6239 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
6240 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
6241 fputs ("\t.end\t", file);
6242 assemble_name (file, fnname);
6247 /* Emit instructions to restore register REG from slot MEM. */
6250 mips_restore_reg (rtx reg, rtx mem)
6252 /* There's no mips16 instruction to load $31 directly. Load into
6253 $7 instead and adjust the return insn appropriately. */
6254 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
6255 reg = gen_rtx_REG (GET_MODE (reg), 7);
6257 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
6259 /* Can't restore directly; move through a temporary. */
6260 emit_move_insn (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
6261 emit_move_insn (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
6264 emit_move_insn (reg, mem);
6268 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
6269 if this epilogue precedes a sibling call, false if it is for a normal
6270 "epilogue" pattern. */
6273 mips_expand_epilogue (int sibcall_p)
6275 HOST_WIDE_INT step1, step2;
6278 if (!sibcall_p && mips_can_use_return_insn ())
6280 emit_jump_insn (gen_return ());
6284 /* Split the frame into two. STEP1 is the amount of stack we should
6285 deallocate before restoring the registers. STEP2 is the amount we
6286 should deallocate afterwards.
6288 Start off by assuming that no registers need to be restored. */
6289 step1 = cfun->machine->frame.total_size;
6292 /* Work out which register holds the frame address. Account for the
6293 frame pointer offset used by mips16 code. */
6294 if (!frame_pointer_needed)
6295 base = stack_pointer_rtx;
6298 base = hard_frame_pointer_rtx;
6300 step1 -= cfun->machine->frame.args_size;
6303 /* If we need to restore registers, deallocate as much stack as
6304 possible in the second step without going out of range. */
6305 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
6307 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
6311 /* Set TARGET to BASE + STEP1. */
6317 /* Get an rtx for STEP1 that we can add to BASE. */
6318 adjust = GEN_INT (step1);
6319 if (!SMALL_OPERAND (step1))
6321 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), adjust);
6322 adjust = MIPS_EPILOGUE_TEMP (Pmode);
6325 /* Normal mode code can copy the result straight into $sp. */
6327 target = stack_pointer_rtx;
6329 emit_insn (gen_add3_insn (target, base, adjust));
6332 /* Copy TARGET into the stack pointer. */
6333 if (target != stack_pointer_rtx)
6334 emit_move_insn (stack_pointer_rtx, target);
6336 /* If we're using addressing macros for n32/n64 abicalls, $gp is
6337 implicitly used by all SYMBOL_REFs. We must emit a blockage
6338 insn before restoring it. */
6339 if (TARGET_ABICALLS && TARGET_NEWABI && !TARGET_EXPLICIT_RELOCS)
6340 emit_insn (gen_blockage ());
6342 /* Restore the registers. */
6343 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
6346 /* Deallocate the final bit of the frame. */
6348 emit_insn (gen_add3_insn (stack_pointer_rtx,
6352 /* Add in the __builtin_eh_return stack adjustment. We need to
6353 use a temporary in mips16 code. */
6354 if (current_function_calls_eh_return)
6358 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
6359 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
6360 MIPS_EPILOGUE_TEMP (Pmode),
6361 EH_RETURN_STACKADJ_RTX));
6362 emit_move_insn (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
6365 emit_insn (gen_add3_insn (stack_pointer_rtx,
6367 EH_RETURN_STACKADJ_RTX));
6372 /* The mips16 loads the return address into $7, not $31. */
6373 if (TARGET_MIPS16 && (cfun->machine->frame.mask & RA_MASK) != 0)
6374 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
6375 GP_REG_FIRST + 7)));
6377 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
6378 GP_REG_FIRST + 31)));
6382 /* Return nonzero if this function is known to have a null epilogue.
6383 This allows the optimizer to omit jumps to jumps if no stack
6387 mips_can_use_return_insn (void)
6391 if (! reload_completed)
6394 if (regs_ever_live[31] || current_function_profile)
6397 return_type = DECL_RESULT (current_function_decl);
6399 /* In mips16 mode, a function which returns a floating point value
6400 needs to arrange to copy the return value into the floating point
6403 && mips16_hard_float
6404 && ! aggregate_value_p (return_type, current_function_decl)
6405 && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
6406 && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
6409 if (cfun->machine->frame.initialized)
6410 return cfun->machine->frame.total_size == 0;
6412 return compute_frame_size (get_frame_size ()) == 0;
6415 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
6416 in order to avoid duplicating too much logic from elsewhere. */
6419 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
6420 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
6423 rtx this, temp1, temp2, insn, fnaddr;
6425 /* Pretend to be a post-reload pass while generating rtl. */
6427 reload_completed = 1;
6428 reset_block_changes ();
6430 /* Pick a global pointer for -mabicalls. Use $15 rather than $28
6431 for TARGET_NEWABI since the latter is a call-saved register. */
6432 if (TARGET_ABICALLS)
6433 cfun->machine->global_pointer
6434 = REGNO (pic_offset_table_rtx)
6435 = TARGET_NEWABI ? 15 : GLOBAL_POINTER_REGNUM;
6437 /* Set up the global pointer for n32 or n64 abicalls. */
6438 mips_emit_loadgp ();
6440 /* We need two temporary registers in some cases. */
6441 temp1 = gen_rtx_REG (Pmode, 2);
6442 temp2 = gen_rtx_REG (Pmode, 3);
6444 /* Find out which register contains the "this" pointer. */
6445 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
6446 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
6448 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
6450 /* Add DELTA to THIS. */
6453 rtx offset = GEN_INT (delta);
6454 if (!SMALL_OPERAND (delta))
6456 emit_move_insn (temp1, offset);
6459 emit_insn (gen_add3_insn (this, this, offset));
6462 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
6463 if (vcall_offset != 0)
6467 /* Set TEMP1 to *THIS. */
6468 emit_move_insn (temp1, gen_rtx_MEM (Pmode, this));
6470 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
6471 addr = mips_add_offset (temp2, temp1, vcall_offset);
6473 /* Load the offset and add it to THIS. */
6474 emit_move_insn (temp1, gen_rtx_MEM (Pmode, addr));
6475 emit_insn (gen_add3_insn (this, this, temp1));
6478 /* Jump to the target function. Use a sibcall if direct jumps are
6479 allowed, otherwise load the address into a register first. */
6480 fnaddr = XEXP (DECL_RTL (function), 0);
6481 if (TARGET_MIPS16 || TARGET_ABICALLS || TARGET_LONG_CALLS)
6483 /* This is messy. gas treats "la $25,foo" as part of a call
6484 sequence and may allow a global "foo" to be lazily bound.
6485 The general move patterns therefore reject this combination.
6487 In this context, lazy binding would actually be OK for o32 and o64,
6488 but it's still wrong for n32 and n64; see mips_load_call_address.
6489 We must therefore load the address via a temporary register if
6490 mips_dangerous_for_la25_p.
6492 If we jump to the temporary register rather than $25, the assembler
6493 can use the move insn to fill the jump's delay slot. */
6494 if (TARGET_ABICALLS && !mips_dangerous_for_la25_p (fnaddr))
6495 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
6496 mips_load_call_address (temp1, fnaddr, true);
6498 if (TARGET_ABICALLS && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
6499 emit_move_insn (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
6500 emit_jump_insn (gen_indirect_jump (temp1));
6504 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
6505 SIBLING_CALL_P (insn) = 1;
6508 /* Run just enough of rest_of_compilation. This sequence was
6509 "borrowed" from alpha.c. */
6510 insn = get_insns ();
6511 insn_locators_initialize ();
6512 split_all_insns_noflow ();
6514 mips16_lay_out_constants ();
6515 shorten_branches (insn);
6516 final_start_function (insn, file, 1);
6517 final (insn, file, 1, 0);
6518 final_end_function ();
6520 /* Clean up the vars set above. Note that final_end_function resets
6521 the global pointer for us. */
6522 reload_completed = 0;
6526 /* Returns nonzero if X contains a SYMBOL_REF. */
6529 symbolic_expression_p (rtx x)
6531 if (GET_CODE (x) == SYMBOL_REF)
6534 if (GET_CODE (x) == CONST)
6535 return symbolic_expression_p (XEXP (x, 0));
6538 return symbolic_expression_p (XEXP (x, 0));
6540 if (ARITHMETIC_P (x))
6541 return (symbolic_expression_p (XEXP (x, 0))
6542 || symbolic_expression_p (XEXP (x, 1)));
6547 /* Choose the section to use for the constant rtx expression X that has
6551 mips_select_rtx_section (enum machine_mode mode, rtx x,
6552 unsigned HOST_WIDE_INT align)
6556 /* In mips16 mode, the constant table always goes in the same section
6557 as the function, so that constants can be loaded using PC relative
6559 function_section (current_function_decl);
6561 else if (TARGET_EMBEDDED_DATA)
6563 /* For embedded applications, always put constants in read-only data,
6564 in order to reduce RAM usage. */
6565 mergeable_constant_section (mode, align, 0);
6569 /* For hosted applications, always put constants in small data if
6570 possible, as this gives the best performance. */
6571 /* ??? Consider using mergeable small data sections. */
6573 if (GET_MODE_SIZE (mode) <= (unsigned) mips_section_threshold
6574 && mips_section_threshold > 0)
6575 named_section (0, ".sdata", 0);
6576 else if (flag_pic && symbolic_expression_p (x))
6577 named_section (0, ".data.rel.ro", 3);
6579 mergeable_constant_section (mode, align, 0);
6583 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
6585 The complication here is that, with the combination TARGET_ABICALLS
6586 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
6587 therefore not be included in the read-only part of a DSO. Handle such
6588 cases by selecting a normal data section instead of a read-only one.
6589 The logic apes that in default_function_rodata_section. */
6592 mips_function_rodata_section (tree decl)
6594 if (!TARGET_ABICALLS || TARGET_GPWORD)
6595 default_function_rodata_section (decl);
6596 else if (decl && DECL_SECTION_NAME (decl))
6598 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6599 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
6601 char *rname = ASTRDUP (name);
6603 named_section_real (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
6605 else if (flag_function_sections && flag_data_sections
6606 && strncmp (name, ".text.", 6) == 0)
6608 char *rname = ASTRDUP (name);
6609 memcpy (rname + 1, "data", 4);
6610 named_section_flags (rname, SECTION_WRITE);
6619 /* Implement TARGET_IN_SMALL_DATA_P. Return true if it would be safe to
6620 access DECL using %gp_rel(...)($gp). */
6623 mips_in_small_data_p (tree decl)
6627 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
6630 /* We don't yet generate small-data references for -mabicalls. See related
6631 -G handling in override_options. */
6632 if (TARGET_ABICALLS)
6635 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
6639 /* Reject anything that isn't in a known small-data section. */
6640 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6641 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
6644 /* If a symbol is defined externally, the assembler will use the
6645 usual -G rules when deciding how to implement macros. */
6646 if (TARGET_EXPLICIT_RELOCS || !DECL_EXTERNAL (decl))
6649 else if (TARGET_EMBEDDED_DATA)
6651 /* Don't put constants into the small data section: we want them
6652 to be in ROM rather than RAM. */
6653 if (TREE_CODE (decl) != VAR_DECL)
6656 if (TREE_READONLY (decl)
6657 && !TREE_SIDE_EFFECTS (decl)
6658 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
6662 size = int_size_in_bytes (TREE_TYPE (decl));
6663 return (size > 0 && size <= mips_section_threshold);
6666 /* See whether VALTYPE is a record whose fields should be returned in
6667 floating-point registers. If so, return the number of fields and
6668 list them in FIELDS (which should have two elements). Return 0
6671 For n32 & n64, a structure with one or two fields is returned in
6672 floating-point registers as long as every field has a floating-point
6676 mips_fpr_return_fields (tree valtype, tree *fields)
6684 if (TREE_CODE (valtype) != RECORD_TYPE)
6688 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
6690 if (TREE_CODE (field) != FIELD_DECL)
6693 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
6699 fields[i++] = field;
6705 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
6706 a value in the most significant part of $2/$3 if:
6708 - the target is big-endian;
6710 - the value has a structure or union type (we generalize this to
6711 cover aggregates from other languages too); and
6713 - the structure is not returned in floating-point registers. */
6716 mips_return_in_msb (tree valtype)
6720 return (TARGET_NEWABI
6721 && TARGET_BIG_ENDIAN
6722 && AGGREGATE_TYPE_P (valtype)
6723 && mips_fpr_return_fields (valtype, fields) == 0);
6727 /* Return a composite value in a pair of floating-point registers.
6728 MODE1 and OFFSET1 are the mode and byte offset for the first value,
6729 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
6732 For n32 & n64, $f0 always holds the first value and $f2 the second.
6733 Otherwise the values are packed together as closely as possible. */
6736 mips_return_fpr_pair (enum machine_mode mode,
6737 enum machine_mode mode1, HOST_WIDE_INT offset1,
6738 enum machine_mode mode2, HOST_WIDE_INT offset2)
6742 inc = (TARGET_NEWABI ? 2 : FP_INC);
6743 return gen_rtx_PARALLEL
6746 gen_rtx_EXPR_LIST (VOIDmode,
6747 gen_rtx_REG (mode1, FP_RETURN),
6749 gen_rtx_EXPR_LIST (VOIDmode,
6750 gen_rtx_REG (mode2, FP_RETURN + inc),
6751 GEN_INT (offset2))));
6756 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
6757 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
6758 VALTYPE is null and MODE is the mode of the return value. */
6761 mips_function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
6762 enum machine_mode mode)
6769 mode = TYPE_MODE (valtype);
6770 unsignedp = TYPE_UNSIGNED (valtype);
6772 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
6773 true, we must promote the mode just as PROMOTE_MODE does. */
6774 mode = promote_mode (valtype, mode, &unsignedp, 1);
6776 /* Handle structures whose fields are returned in $f0/$f2. */
6777 switch (mips_fpr_return_fields (valtype, fields))
6780 return gen_rtx_REG (mode, FP_RETURN);
6783 return mips_return_fpr_pair (mode,
6784 TYPE_MODE (TREE_TYPE (fields[0])),
6785 int_byte_position (fields[0]),
6786 TYPE_MODE (TREE_TYPE (fields[1])),
6787 int_byte_position (fields[1]));
6790 /* If a value is passed in the most significant part of a register, see
6791 whether we have to round the mode up to a whole number of words. */
6792 if (mips_return_in_msb (valtype))
6794 HOST_WIDE_INT size = int_size_in_bytes (valtype);
6795 if (size % UNITS_PER_WORD != 0)
6797 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
6798 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
6802 /* For EABI, the class of return register depends entirely on MODE.
6803 For example, "struct { some_type x; }" and "union { some_type x; }"
6804 are returned in the same way as a bare "some_type" would be.
6805 Other ABIs only use FPRs for scalar, complex or vector types. */
6806 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
6807 return gen_rtx_REG (mode, GP_RETURN);
6810 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
6811 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
6812 && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE)
6813 return gen_rtx_REG (mode, FP_RETURN);
6815 /* Handle long doubles for n32 & n64. */
6817 return mips_return_fpr_pair (mode,
6819 DImode, GET_MODE_SIZE (mode) / 2);
6821 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
6822 && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE * 2)
6823 return mips_return_fpr_pair (mode,
6824 GET_MODE_INNER (mode), 0,
6825 GET_MODE_INNER (mode),
6826 GET_MODE_SIZE (mode) / 2);
6828 return gen_rtx_REG (mode, GP_RETURN);
6831 /* Return nonzero when an argument must be passed by reference. */
6834 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6835 enum machine_mode mode, tree type,
6836 bool named ATTRIBUTE_UNUSED)
6838 if (mips_abi == ABI_EABI)
6842 /* ??? How should SCmode be handled? */
6843 if (type == NULL_TREE || mode == DImode || mode == DFmode)
6846 size = int_size_in_bytes (type);
6847 return size == -1 || size > UNITS_PER_WORD;
6851 /* If we have a variable-sized parameter, we have no choice. */
6852 return targetm.calls.must_pass_in_stack (mode, type);
6857 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
6858 enum machine_mode mode ATTRIBUTE_UNUSED,
6859 tree type ATTRIBUTE_UNUSED, bool named)
6861 return mips_abi == ABI_EABI && named;
6864 /* Return true if registers of class CLASS cannot change from mode FROM
6868 mips_cannot_change_mode_class (enum machine_mode from,
6869 enum machine_mode to, enum reg_class class)
6871 if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
6872 && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
6874 if (TARGET_BIG_ENDIAN)
6876 /* When a multi-word value is stored in paired floating-point
6877 registers, the first register always holds the low word.
6878 We therefore can't allow FPRs to change between single-word
6879 and multi-word modes. */
6880 if (FP_INC > 1 && reg_classes_intersect_p (FP_REGS, class))
6885 /* LO_REGNO == HI_REGNO + 1, so if a multi-word value is stored
6886 in LO and HI, the high word always comes first. We therefore
6887 can't allow values stored in HI to change between single-word
6888 and multi-word modes. */
6889 if (reg_classes_intersect_p (HI_REG, class))
6893 /* Loading a 32-bit value into a 64-bit floating-point register
6894 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
6895 We can't allow 64-bit float registers to change from SImode to
6899 && GET_MODE_SIZE (to) >= UNITS_PER_WORD
6900 && reg_classes_intersect_p (FP_REGS, class))
6905 /* Return true if X should not be moved directly into register $25.
6906 We need this because many versions of GAS will treat "la $25,foo" as
6907 part of a call sequence and so allow a global "foo" to be lazily bound. */
6910 mips_dangerous_for_la25_p (rtx x)
6912 HOST_WIDE_INT offset;
6914 if (TARGET_EXPLICIT_RELOCS)
6917 mips_split_const (x, &x, &offset);
6918 return global_got_operand (x, VOIDmode);
6921 /* Implement PREFERRED_RELOAD_CLASS. */
6924 mips_preferred_reload_class (rtx x, enum reg_class class)
6926 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
6929 if (TARGET_HARD_FLOAT
6930 && FLOAT_MODE_P (GET_MODE (x))
6931 && reg_class_subset_p (FP_REGS, class))
6934 if (reg_class_subset_p (GR_REGS, class))
6937 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
6943 /* This function returns the register class required for a secondary
6944 register when copying between one of the registers in CLASS, and X,
6945 using MODE. If IN_P is nonzero, the copy is going from X to the
6946 register, otherwise the register is the source. A return value of
6947 NO_REGS means that no secondary register is required. */
6950 mips_secondary_reload_class (enum reg_class class,
6951 enum machine_mode mode, rtx x, int in_p)
6953 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
6957 if (REG_P (x)|| GET_CODE (x) == SUBREG)
6958 regno = true_regnum (x);
6960 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
6962 if (mips_dangerous_for_la25_p (x))
6965 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
6969 /* Copying from HI or LO to anywhere other than a general register
6970 requires a general register. */
6971 if (class == HI_REG || class == LO_REG || class == MD_REGS)
6973 if (TARGET_MIPS16 && in_p)
6975 /* We can't really copy to HI or LO at all in mips16 mode. */
6978 return gp_reg_p ? NO_REGS : gr_regs;
6980 if (MD_REG_P (regno))
6982 if (TARGET_MIPS16 && ! in_p)
6984 /* We can't really copy to HI or LO at all in mips16 mode. */
6987 return class == gr_regs ? NO_REGS : gr_regs;
6990 /* We can only copy a value to a condition code register from a
6991 floating point register, and even then we require a scratch
6992 floating point register. We can only copy a value out of a
6993 condition code register into a general register. */
6994 if (class == ST_REGS)
6998 return gp_reg_p ? NO_REGS : gr_regs;
7000 if (ST_REG_P (regno))
7004 return class == gr_regs ? NO_REGS : gr_regs;
7007 if (class == FP_REGS)
7011 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
7014 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
7016 /* We can use the l.s and l.d macros to load floating-point
7017 constants. ??? For l.s, we could probably get better
7018 code by returning GR_REGS here. */
7021 else if (gp_reg_p || x == CONST0_RTX (mode))
7023 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
7026 else if (FP_REG_P (regno))
7028 /* In this case we can use mov.s or mov.d. */
7033 /* Otherwise, we need to reload through an integer register. */
7038 /* In mips16 mode, going between memory and anything but M16_REGS
7039 requires an M16_REG. */
7042 if (class != M16_REGS && class != M16_NA_REGS)
7050 if (class == M16_REGS || class == M16_NA_REGS)
7059 /* Implement CLASS_MAX_NREGS.
7061 Usually all registers are word-sized. The only supported exception
7062 is -mgp64 -msingle-float, which has 64-bit words but 32-bit float
7063 registers. A word-based calculation is correct even in that case,
7064 since -msingle-float disallows multi-FPR values.
7066 The FP status registers are an exception to this rule. They are always
7067 4 bytes wide as they only hold condition code modes, and CCmode is always
7068 considered to be 4 bytes wide. */
7071 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
7072 enum machine_mode mode)
7074 if (class == ST_REGS)
7075 return (GET_MODE_SIZE (mode) + 3) / 4;
7077 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7081 mips_valid_pointer_mode (enum machine_mode mode)
7083 return (mode == SImode || (TARGET_64BIT && mode == DImode));
7086 /* Define this so that we can deal with a testcase like:
7088 char foo __attribute__ ((mode (SI)));
7090 then compiled with -mabi=64 and -mint64. We have no
7091 32-bit type at that point and so the default case
7095 mips_scalar_mode_supported_p (enum machine_mode mode)
7105 /* Handled via optabs.c. */
7107 return TARGET_64BIT;
7113 /* LONG_DOUBLE_TYPE_SIZE is 128 for TARGET_NEWABI only. */
7115 return TARGET_NEWABI;
7123 /* Target hook for vector_mode_supported_p. */
7125 mips_vector_mode_supported_p (enum machine_mode mode)
7127 if (mode == V2SFmode && TARGET_PAIRED_SINGLE_FLOAT)
7133 /* If we can access small data directly (using gp-relative relocation
7134 operators) return the small data pointer, otherwise return null.
7136 For each mips16 function which refers to GP relative symbols, we
7137 use a pseudo register, initialized at the start of the function, to
7138 hold the $gp value. */
7141 mips16_gp_pseudo_reg (void)
7143 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
7148 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
7150 /* We want to initialize this to a value which gcc will believe
7153 unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_GP);
7154 emit_move_insn (cfun->machine->mips16_gp_pseudo_rtx,
7155 gen_rtx_CONST (Pmode, unspec));
7156 insn = get_insns ();
7159 push_topmost_sequence ();
7160 /* We need to emit the initialization after the FUNCTION_BEG
7161 note, so that it will be integrated. */
7162 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
7164 && NOTE_LINE_NUMBER (scan) == NOTE_INSN_FUNCTION_BEG)
7166 if (scan == NULL_RTX)
7167 scan = get_insns ();
7168 insn = emit_insn_after (insn, scan);
7169 pop_topmost_sequence ();
7172 return cfun->machine->mips16_gp_pseudo_rtx;
7175 /* Write out code to move floating point arguments in or out of
7176 general registers. Output the instructions to FILE. FP_CODE is
7177 the code describing which arguments are present (see the comment at
7178 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
7179 we are copying from the floating point registers. */
7182 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
7188 /* This code only works for the original 32 bit ABI and the O64 ABI. */
7189 gcc_assert (TARGET_OLDABI);
7195 gparg = GP_ARG_FIRST;
7196 fparg = FP_ARG_FIRST;
7197 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
7201 if ((fparg & 1) != 0)
7203 fprintf (file, "\t%s\t%s,%s\n", s,
7204 reg_names[gparg], reg_names[fparg]);
7206 else if ((f & 3) == 2)
7209 fprintf (file, "\td%s\t%s,%s\n", s,
7210 reg_names[gparg], reg_names[fparg]);
7213 if ((fparg & 1) != 0)
7215 if (TARGET_BIG_ENDIAN)
7216 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
7217 reg_names[gparg], reg_names[fparg + 1], s,
7218 reg_names[gparg + 1], reg_names[fparg]);
7220 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
7221 reg_names[gparg], reg_names[fparg], s,
7222 reg_names[gparg + 1], reg_names[fparg + 1]);
7235 /* Build a mips16 function stub. This is used for functions which
7236 take arguments in the floating point registers. It is 32 bit code
7237 that moves the floating point args into the general registers, and
7238 then jumps to the 16 bit code. */
7241 build_mips16_function_stub (FILE *file)
7244 char *secname, *stubname;
7245 tree stubid, stubdecl;
7249 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7250 secname = (char *) alloca (strlen (fnname) + 20);
7251 sprintf (secname, ".mips16.fn.%s", fnname);
7252 stubname = (char *) alloca (strlen (fnname) + 20);
7253 sprintf (stubname, "__fn_stub_%s", fnname);
7254 stubid = get_identifier (stubname);
7255 stubdecl = build_decl (FUNCTION_DECL, stubid,
7256 build_function_type (void_type_node, NULL_TREE));
7257 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
7259 fprintf (file, "\t# Stub function for %s (", current_function_name ());
7261 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
7263 fprintf (file, "%s%s",
7264 need_comma ? ", " : "",
7265 (f & 3) == 1 ? "float" : "double");
7268 fprintf (file, ")\n");
7270 fprintf (file, "\t.set\tnomips16\n");
7271 function_section (stubdecl);
7272 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
7274 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
7275 within a .ent, and we cannot emit another .ent. */
7276 if (!FUNCTION_NAME_ALREADY_DECLARED)
7278 fputs ("\t.ent\t", file);
7279 assemble_name (file, stubname);
7283 assemble_name (file, stubname);
7284 fputs (":\n", file);
7286 /* We don't want the assembler to insert any nops here. */
7287 fprintf (file, "\t.set\tnoreorder\n");
7289 mips16_fp_args (file, current_function_args_info.fp_code, 1);
7291 fprintf (asm_out_file, "\t.set\tnoat\n");
7292 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
7293 assemble_name (file, fnname);
7294 fprintf (file, "\n");
7295 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
7296 fprintf (asm_out_file, "\t.set\tat\n");
7298 /* Unfortunately, we can't fill the jump delay slot. We can't fill
7299 with one of the mfc1 instructions, because the result is not
7300 available for one instruction, so if the very first instruction
7301 in the function refers to the register, it will see the wrong
7303 fprintf (file, "\tnop\n");
7305 fprintf (file, "\t.set\treorder\n");
7307 if (!FUNCTION_NAME_ALREADY_DECLARED)
7309 fputs ("\t.end\t", file);
7310 assemble_name (file, stubname);
7314 fprintf (file, "\t.set\tmips16\n");
7316 function_section (current_function_decl);
7319 /* We keep a list of functions for which we have already built stubs
7320 in build_mips16_call_stub. */
7324 struct mips16_stub *next;
7329 static struct mips16_stub *mips16_stubs;
7331 /* Build a call stub for a mips16 call. A stub is needed if we are
7332 passing any floating point values which should go into the floating
7333 point registers. If we are, and the call turns out to be to a 32
7334 bit function, the stub will be used to move the values into the
7335 floating point registers before calling the 32 bit function. The
7336 linker will magically adjust the function call to either the 16 bit
7337 function or the 32 bit stub, depending upon where the function call
7338 is actually defined.
7340 Similarly, we need a stub if the return value might come back in a
7341 floating point register.
7343 RETVAL is the location of the return value, or null if this is
7344 a call rather than a call_value. FN is the address of the
7345 function and ARG_SIZE is the size of the arguments. FP_CODE
7346 is the code built by function_arg. This function returns a nonzero
7347 value if it builds the call instruction itself. */
7350 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
7354 char *secname, *stubname;
7355 struct mips16_stub *l;
7356 tree stubid, stubdecl;
7360 /* We don't need to do anything if we aren't in mips16 mode, or if
7361 we were invoked with the -msoft-float option. */
7362 if (! TARGET_MIPS16 || ! mips16_hard_float)
7365 /* Figure out whether the value might come back in a floating point
7367 fpret = (retval != 0
7368 && GET_MODE_CLASS (GET_MODE (retval)) == MODE_FLOAT
7369 && GET_MODE_SIZE (GET_MODE (retval)) <= UNITS_PER_FPVALUE);
7371 /* We don't need to do anything if there were no floating point
7372 arguments and the value will not be returned in a floating point
7374 if (fp_code == 0 && ! fpret)
7377 /* We don't need to do anything if this is a call to a special
7378 mips16 support function. */
7379 if (GET_CODE (fn) == SYMBOL_REF
7380 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
7383 /* This code will only work for o32 and o64 abis. The other ABI's
7384 require more sophisticated support. */
7385 gcc_assert (TARGET_OLDABI);
7387 /* We can only handle SFmode and DFmode floating point return
7390 gcc_assert (GET_MODE (retval) == SFmode || GET_MODE (retval) == DFmode);
7392 /* If we're calling via a function pointer, then we must always call
7393 via a stub. There are magic stubs provided in libgcc.a for each
7394 of the required cases. Each of them expects the function address
7395 to arrive in register $2. */
7397 if (GET_CODE (fn) != SYMBOL_REF)
7403 /* ??? If this code is modified to support other ABI's, we need
7404 to handle PARALLEL return values here. */
7406 sprintf (buf, "__mips16_call_stub_%s%d",
7408 ? (GET_MODE (retval) == SFmode ? "sf_" : "df_")
7411 id = get_identifier (buf);
7412 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
7414 emit_move_insn (gen_rtx_REG (Pmode, 2), fn);
7416 if (retval == NULL_RTX)
7417 insn = gen_call_internal (stub_fn, arg_size);
7419 insn = gen_call_value_internal (retval, stub_fn, arg_size);
7420 insn = emit_call_insn (insn);
7422 /* Put the register usage information on the CALL. */
7423 CALL_INSN_FUNCTION_USAGE (insn) =
7424 gen_rtx_EXPR_LIST (VOIDmode,
7425 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
7426 CALL_INSN_FUNCTION_USAGE (insn));
7428 /* If we are handling a floating point return value, we need to
7429 save $18 in the function prologue. Putting a note on the
7430 call will mean that regs_ever_live[$18] will be true if the
7431 call is not eliminated, and we can check that in the prologue
7434 CALL_INSN_FUNCTION_USAGE (insn) =
7435 gen_rtx_EXPR_LIST (VOIDmode,
7436 gen_rtx_USE (VOIDmode,
7437 gen_rtx_REG (word_mode, 18)),
7438 CALL_INSN_FUNCTION_USAGE (insn));
7440 /* Return 1 to tell the caller that we've generated the call
7445 /* We know the function we are going to call. If we have already
7446 built a stub, we don't need to do anything further. */
7448 fnname = XSTR (fn, 0);
7449 for (l = mips16_stubs; l != NULL; l = l->next)
7450 if (strcmp (l->name, fnname) == 0)
7455 /* Build a special purpose stub. When the linker sees a
7456 function call in mips16 code, it will check where the target
7457 is defined. If the target is a 32 bit call, the linker will
7458 search for the section defined here. It can tell which
7459 symbol this section is associated with by looking at the
7460 relocation information (the name is unreliable, since this
7461 might be a static function). If such a section is found, the
7462 linker will redirect the call to the start of the magic
7465 If the function does not return a floating point value, the
7466 special stub section is named
7469 If the function does return a floating point value, the stub
7471 .mips16.call.fp.FNNAME
7474 secname = (char *) alloca (strlen (fnname) + 40);
7475 sprintf (secname, ".mips16.call.%s%s",
7478 stubname = (char *) alloca (strlen (fnname) + 20);
7479 sprintf (stubname, "__call_stub_%s%s",
7482 stubid = get_identifier (stubname);
7483 stubdecl = build_decl (FUNCTION_DECL, stubid,
7484 build_function_type (void_type_node, NULL_TREE));
7485 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
7487 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
7489 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
7493 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
7495 fprintf (asm_out_file, "%s%s",
7496 need_comma ? ", " : "",
7497 (f & 3) == 1 ? "float" : "double");
7500 fprintf (asm_out_file, ")\n");
7502 fprintf (asm_out_file, "\t.set\tnomips16\n");
7503 assemble_start_function (stubdecl, stubname);
7505 if (!FUNCTION_NAME_ALREADY_DECLARED)
7507 fputs ("\t.ent\t", asm_out_file);
7508 assemble_name (asm_out_file, stubname);
7509 fputs ("\n", asm_out_file);
7511 assemble_name (asm_out_file, stubname);
7512 fputs (":\n", asm_out_file);
7515 /* We build the stub code by hand. That's the only way we can
7516 do it, since we can't generate 32 bit code during a 16 bit
7519 /* We don't want the assembler to insert any nops here. */
7520 fprintf (asm_out_file, "\t.set\tnoreorder\n");
7522 mips16_fp_args (asm_out_file, fp_code, 0);
7526 fprintf (asm_out_file, "\t.set\tnoat\n");
7527 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
7529 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
7530 fprintf (asm_out_file, "\t.set\tat\n");
7531 /* Unfortunately, we can't fill the jump delay slot. We
7532 can't fill with one of the mtc1 instructions, because the
7533 result is not available for one instruction, so if the
7534 very first instruction in the function refers to the
7535 register, it will see the wrong value. */
7536 fprintf (asm_out_file, "\tnop\n");
7540 fprintf (asm_out_file, "\tmove\t%s,%s\n",
7541 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
7542 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
7543 /* As above, we can't fill the delay slot. */
7544 fprintf (asm_out_file, "\tnop\n");
7545 if (GET_MODE (retval) == SFmode)
7546 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7547 reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
7550 if (TARGET_BIG_ENDIAN)
7552 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7553 reg_names[GP_REG_FIRST + 2],
7554 reg_names[FP_REG_FIRST + 1]);
7555 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7556 reg_names[GP_REG_FIRST + 3],
7557 reg_names[FP_REG_FIRST + 0]);
7561 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7562 reg_names[GP_REG_FIRST + 2],
7563 reg_names[FP_REG_FIRST + 0]);
7564 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7565 reg_names[GP_REG_FIRST + 3],
7566 reg_names[FP_REG_FIRST + 1]);
7569 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
7570 /* As above, we can't fill the delay slot. */
7571 fprintf (asm_out_file, "\tnop\n");
7574 fprintf (asm_out_file, "\t.set\treorder\n");
7576 #ifdef ASM_DECLARE_FUNCTION_SIZE
7577 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
7580 if (!FUNCTION_NAME_ALREADY_DECLARED)
7582 fputs ("\t.end\t", asm_out_file);
7583 assemble_name (asm_out_file, stubname);
7584 fputs ("\n", asm_out_file);
7587 fprintf (asm_out_file, "\t.set\tmips16\n");
7589 /* Record this stub. */
7590 l = (struct mips16_stub *) xmalloc (sizeof *l);
7591 l->name = xstrdup (fnname);
7593 l->next = mips16_stubs;
7597 /* If we expect a floating point return value, but we've built a
7598 stub which does not expect one, then we're in trouble. We can't
7599 use the existing stub, because it won't handle the floating point
7600 value. We can't build a new stub, because the linker won't know
7601 which stub to use for the various calls in this object file.
7602 Fortunately, this case is illegal, since it means that a function
7603 was declared in two different ways in a single compilation. */
7604 if (fpret && ! l->fpret)
7605 error ("cannot handle inconsistent calls to %qs", fnname);
7607 /* If we are calling a stub which handles a floating point return
7608 value, we need to arrange to save $18 in the prologue. We do
7609 this by marking the function call as using the register. The
7610 prologue will later see that it is used, and emit code to save
7617 if (retval == NULL_RTX)
7618 insn = gen_call_internal (fn, arg_size);
7620 insn = gen_call_value_internal (retval, fn, arg_size);
7621 insn = emit_call_insn (insn);
7623 CALL_INSN_FUNCTION_USAGE (insn) =
7624 gen_rtx_EXPR_LIST (VOIDmode,
7625 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
7626 CALL_INSN_FUNCTION_USAGE (insn));
7628 /* Return 1 to tell the caller that we've generated the call
7633 /* Return 0 to let the caller generate the call insn. */
7637 /* An entry in the mips16 constant pool. VALUE is the pool constant,
7638 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
7640 struct mips16_constant {
7641 struct mips16_constant *next;
7644 enum machine_mode mode;
7647 /* Information about an incomplete mips16 constant pool. FIRST is the
7648 first constant, HIGHEST_ADDRESS is the highest address that the first
7649 byte of the pool can have, and INSN_ADDRESS is the current instruction
7652 struct mips16_constant_pool {
7653 struct mips16_constant *first;
7654 int highest_address;
7658 /* Add constant VALUE to POOL and return its label. MODE is the
7659 value's mode (used for CONST_INTs, etc.). */
7662 add_constant (struct mips16_constant_pool *pool,
7663 rtx value, enum machine_mode mode)
7665 struct mips16_constant **p, *c;
7666 bool first_of_size_p;
7668 /* See whether the constant is already in the pool. If so, return the
7669 existing label, otherwise leave P pointing to the place where the
7670 constant should be added.
7672 Keep the pool sorted in increasing order of mode size so that we can
7673 reduce the number of alignments needed. */
7674 first_of_size_p = true;
7675 for (p = &pool->first; *p != 0; p = &(*p)->next)
7677 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
7679 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
7681 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
7682 first_of_size_p = false;
7685 /* In the worst case, the constant needed by the earliest instruction
7686 will end up at the end of the pool. The entire pool must then be
7687 accessible from that instruction.
7689 When adding the first constant, set the pool's highest address to
7690 the address of the first out-of-range byte. Adjust this address
7691 downwards each time a new constant is added. */
7692 if (pool->first == 0)
7693 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
7694 is the address of the instruction with the lowest two bits clear.
7695 The base PC value for ld has the lowest three bits clear. Assume
7696 the worst case here. */
7697 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
7698 pool->highest_address -= GET_MODE_SIZE (mode);
7699 if (first_of_size_p)
7700 /* Take into account the worst possible padding due to alignment. */
7701 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
7703 /* Create a new entry. */
7704 c = (struct mips16_constant *) xmalloc (sizeof *c);
7707 c->label = gen_label_rtx ();
7714 /* Output constant VALUE after instruction INSN and return the last
7715 instruction emitted. MODE is the mode of the constant. */
7718 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
7720 switch (GET_MODE_CLASS (mode))
7724 rtx size = GEN_INT (GET_MODE_SIZE (mode));
7725 return emit_insn_after (gen_consttable_int (value, size), insn);
7729 return emit_insn_after (gen_consttable_float (value), insn);
7731 case MODE_VECTOR_FLOAT:
7732 case MODE_VECTOR_INT:
7735 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
7736 insn = dump_constants_1 (GET_MODE_INNER (mode),
7737 CONST_VECTOR_ELT (value, i), insn);
7747 /* Dump out the constants in CONSTANTS after INSN. */
7750 dump_constants (struct mips16_constant *constants, rtx insn)
7752 struct mips16_constant *c, *next;
7756 for (c = constants; c != NULL; c = next)
7758 /* If necessary, increase the alignment of PC. */
7759 if (align < GET_MODE_SIZE (c->mode))
7761 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
7762 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
7764 align = GET_MODE_SIZE (c->mode);
7766 insn = emit_label_after (c->label, insn);
7767 insn = dump_constants_1 (c->mode, c->value, insn);
7773 emit_barrier_after (insn);
7776 /* Return the length of instruction INSN.
7778 ??? MIPS16 switch tables go in .text, but we don't define
7779 JUMP_TABLES_IN_TEXT_SECTION, so get_attr_length will not
7780 compute their lengths correctly. */
7783 mips16_insn_length (rtx insn)
7787 rtx body = PATTERN (insn);
7788 if (GET_CODE (body) == ADDR_VEC)
7789 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
7790 if (GET_CODE (body) == ADDR_DIFF_VEC)
7791 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
7793 return get_attr_length (insn);
7796 /* Rewrite *X so that constant pool references refer to the constant's
7797 label instead. DATA points to the constant pool structure. */
7800 mips16_rewrite_pool_refs (rtx *x, void *data)
7802 struct mips16_constant_pool *pool = data;
7803 if (GET_CODE (*x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (*x))
7804 *x = gen_rtx_LABEL_REF (Pmode, add_constant (pool,
7805 get_pool_constant (*x),
7806 get_pool_mode (*x)));
7810 /* Build MIPS16 constant pools. */
7813 mips16_lay_out_constants (void)
7815 struct mips16_constant_pool pool;
7819 memset (&pool, 0, sizeof (pool));
7820 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7822 /* Rewrite constant pool references in INSN. */
7824 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &pool);
7826 pool.insn_address += mips16_insn_length (insn);
7828 if (pool.first != NULL)
7830 /* If there are no natural barriers between the first user of
7831 the pool and the highest acceptable address, we'll need to
7832 create a new instruction to jump around the constant pool.
7833 In the worst case, this instruction will be 4 bytes long.
7835 If it's too late to do this transformation after INSN,
7836 do it immediately before INSN. */
7837 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
7841 label = gen_label_rtx ();
7843 jump = emit_jump_insn_before (gen_jump (label), insn);
7844 JUMP_LABEL (jump) = label;
7845 LABEL_NUSES (label) = 1;
7846 barrier = emit_barrier_after (jump);
7848 emit_label_after (label, barrier);
7849 pool.insn_address += 4;
7852 /* See whether the constant pool is now out of range of the first
7853 user. If so, output the constants after the previous barrier.
7854 Note that any instructions between BARRIER and INSN (inclusive)
7855 will use negative offsets to refer to the pool. */
7856 if (pool.insn_address > pool.highest_address)
7858 dump_constants (pool.first, barrier);
7862 else if (BARRIER_P (insn))
7866 dump_constants (pool.first, get_last_insn ());
7869 /* A temporary variable used by for_each_rtx callbacks, etc. */
7870 static rtx mips_sim_insn;
7872 /* A structure representing the state of the processor pipeline.
7873 Used by the mips_sim_* family of functions. */
7875 /* The maximum number of instructions that can be issued in a cycle.
7876 (Caches mips_issue_rate.) */
7877 unsigned int issue_rate;
7879 /* The current simulation time. */
7882 /* How many more instructions can be issued in the current cycle. */
7883 unsigned int insns_left;
7885 /* LAST_SET[X].INSN is the last instruction to set register X.
7886 LAST_SET[X].TIME is the time at which that instruction was issued.
7887 INSN is null if no instruction has yet set register X. */
7891 } last_set[FIRST_PSEUDO_REGISTER];
7893 /* The pipeline's current DFA state. */
7897 /* Reset STATE to the initial simulation state. */
7900 mips_sim_reset (struct mips_sim *state)
7903 state->insns_left = state->issue_rate;
7904 memset (&state->last_set, 0, sizeof (state->last_set));
7905 state_reset (state->dfa_state);
7908 /* Initialize STATE before its first use. DFA_STATE points to an
7909 allocated but uninitialized DFA state. */
7912 mips_sim_init (struct mips_sim *state, state_t dfa_state)
7914 state->issue_rate = mips_issue_rate ();
7915 state->dfa_state = dfa_state;
7916 mips_sim_reset (state);
7919 /* Advance STATE by one clock cycle. */
7922 mips_sim_next_cycle (struct mips_sim *state)
7925 state->insns_left = state->issue_rate;
7926 state_transition (state->dfa_state, 0);
7929 /* Advance simulation state STATE until instruction INSN can read
7933 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
7937 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
7938 if (state->last_set[REGNO (reg) + i].insn != 0)
7942 t = state->last_set[REGNO (reg) + i].time;
7943 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
7944 while (state->time < t)
7945 mips_sim_next_cycle (state);
7949 /* A for_each_rtx callback. If *X is a register, advance simulation state
7950 DATA until mips_sim_insn can read the register's value. */
7953 mips_sim_wait_regs_2 (rtx *x, void *data)
7956 mips_sim_wait_reg (data, mips_sim_insn, *x);
7960 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
7963 mips_sim_wait_regs_1 (rtx *x, void *data)
7965 for_each_rtx (x, mips_sim_wait_regs_2, data);
7968 /* Advance simulation state STATE until all of INSN's register
7969 dependencies are satisfied. */
7972 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
7974 mips_sim_insn = insn;
7975 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
7978 /* Advance simulation state STATE until the units required by
7979 instruction INSN are available. */
7982 mips_sim_wait_units (struct mips_sim *state, rtx insn)
7986 tmp_state = alloca (state_size ());
7987 while (state->insns_left == 0
7988 || (memcpy (tmp_state, state->dfa_state, state_size ()),
7989 state_transition (tmp_state, insn) >= 0))
7990 mips_sim_next_cycle (state);
7993 /* Advance simulation state STATE until INSN is ready to issue. */
7996 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
7998 mips_sim_wait_regs (state, insn);
7999 mips_sim_wait_units (state, insn);
8002 /* mips_sim_insn has just set X. Update the LAST_SET array
8003 in simulation state DATA. */
8006 mips_sim_record_set (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
8008 struct mips_sim *state;
8013 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
8015 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
8016 state->last_set[REGNO (x) + i].time = state->time;
8020 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
8021 can issue immediately (i.e., that mips_sim_wait_insn has already
8025 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
8027 state_transition (state->dfa_state, insn);
8028 state->insns_left--;
8030 mips_sim_insn = insn;
8031 note_stores (PATTERN (insn), mips_sim_record_set, state);
8034 /* Simulate issuing a NOP in state STATE. */
8037 mips_sim_issue_nop (struct mips_sim *state)
8039 if (state->insns_left == 0)
8040 mips_sim_next_cycle (state);
8041 state->insns_left--;
8044 /* Update simulation state STATE so that it's ready to accept the instruction
8045 after INSN. INSN should be part of the main rtl chain, not a member of a
8049 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
8051 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
8053 mips_sim_issue_nop (state);
8055 switch (GET_CODE (SEQ_BEGIN (insn)))
8059 /* We can't predict the processor state after a call or label. */
8060 mips_sim_reset (state);
8064 /* The delay slots of branch likely instructions are only executed
8065 when the branch is taken. Therefore, if the caller has simulated
8066 the delay slot instruction, STATE does not really reflect the state
8067 of the pipeline for the instruction after the delay slot. Also,
8068 branch likely instructions tend to incur a penalty when not taken,
8069 so there will probably be an extra delay between the branch and
8070 the instruction after the delay slot. */
8071 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
8072 mips_sim_reset (state);
8080 /* The VR4130 pipeline issues aligned pairs of instructions together,
8081 but it stalls the second instruction if it depends on the first.
8082 In order to cut down the amount of logic required, this dependence
8083 check is not based on a full instruction decode. Instead, any non-SPECIAL
8084 instruction is assumed to modify the register specified by bits 20-16
8085 (which is usually the "rt" field).
8087 In beq, beql, bne and bnel instructions, the rt field is actually an
8088 input, so we can end up with a false dependence between the branch
8089 and its delay slot. If this situation occurs in instruction INSN,
8090 try to avoid it by swapping rs and rt. */
8093 vr4130_avoid_branch_rt_conflict (rtx insn)
8097 first = SEQ_BEGIN (insn);
8098 second = SEQ_END (insn);
8100 && NONJUMP_INSN_P (second)
8101 && GET_CODE (PATTERN (first)) == SET
8102 && GET_CODE (SET_DEST (PATTERN (first))) == PC
8103 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
8105 /* Check for the right kind of condition. */
8106 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
8107 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
8108 && REG_P (XEXP (cond, 0))
8109 && REG_P (XEXP (cond, 1))
8110 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
8111 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
8113 /* SECOND mentions the rt register but not the rs register. */
8114 rtx tmp = XEXP (cond, 0);
8115 XEXP (cond, 0) = XEXP (cond, 1);
8116 XEXP (cond, 1) = tmp;
8121 /* Implement -mvr4130-align. Go through each basic block and simulate the
8122 processor pipeline. If we find that a pair of instructions could execute
8123 in parallel, and the first of those instruction is not 8-byte aligned,
8124 insert a nop to make it aligned. */
8127 vr4130_align_insns (void)
8129 struct mips_sim state;
8130 rtx insn, subinsn, last, last2, next;
8135 /* LAST is the last instruction before INSN to have a nonzero length.
8136 LAST2 is the last such instruction before LAST. */
8140 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
8143 mips_sim_init (&state, alloca (state_size ()));
8144 for (insn = get_insns (); insn != 0; insn = next)
8146 unsigned int length;
8148 next = NEXT_INSN (insn);
8150 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
8151 This isn't really related to the alignment pass, but we do it on
8152 the fly to avoid a separate instruction walk. */
8153 vr4130_avoid_branch_rt_conflict (insn);
8155 if (USEFUL_INSN_P (insn))
8156 FOR_EACH_SUBINSN (subinsn, insn)
8158 mips_sim_wait_insn (&state, subinsn);
8160 /* If we want this instruction to issue in parallel with the
8161 previous one, make sure that the previous instruction is
8162 aligned. There are several reasons why this isn't worthwhile
8163 when the second instruction is a call:
8165 - Calls are less likely to be performance critical,
8166 - There's a good chance that the delay slot can execute
8167 in parallel with the call.
8168 - The return address would then be unaligned.
8170 In general, if we're going to insert a nop between instructions
8171 X and Y, it's better to insert it immediately after X. That
8172 way, if the nop makes Y aligned, it will also align any labels
8174 if (state.insns_left != state.issue_rate
8175 && !CALL_P (subinsn))
8177 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
8179 /* SUBINSN is the first instruction in INSN and INSN is
8180 aligned. We want to align the previous instruction
8181 instead, so insert a nop between LAST2 and LAST.
8183 Note that LAST could be either a single instruction
8184 or a branch with a delay slot. In the latter case,
8185 LAST, like INSN, is already aligned, but the delay
8186 slot must have some extra delay that stops it from
8187 issuing at the same time as the branch. We therefore
8188 insert a nop before the branch in order to align its
8190 emit_insn_after (gen_nop (), last2);
8193 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
8195 /* SUBINSN is the delay slot of INSN, but INSN is
8196 currently unaligned. Insert a nop between
8197 LAST and INSN to align it. */
8198 emit_insn_after (gen_nop (), last);
8202 mips_sim_issue_insn (&state, subinsn);
8204 mips_sim_finish_insn (&state, insn);
8206 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
8207 length = get_attr_length (insn);
8210 /* If the instruction is an asm statement or multi-instruction
8211 mips.md patern, the length is only an estimate. Insert an
8212 8 byte alignment after it so that the following instructions
8213 can be handled correctly. */
8214 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
8215 && (recog_memoized (insn) < 0 || length >= 8))
8217 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
8218 next = NEXT_INSN (next);
8219 mips_sim_next_cycle (&state);
8222 else if (length & 4)
8223 aligned_p = !aligned_p;
8228 /* See whether INSN is an aligned label. */
8229 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
8235 /* Subroutine of mips_reorg. If there is a hazard between INSN
8236 and a previous instruction, avoid it by inserting nops after
8239 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
8240 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
8241 before using the value of that register. *HILO_DELAY counts the
8242 number of instructions since the last hilo hazard (that is,
8243 the number of instructions since the last mflo or mfhi).
8245 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
8246 for the next instruction.
8248 LO_REG is an rtx for the LO register, used in dependence checking. */
8251 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
8252 rtx *delayed_reg, rtx lo_reg)
8260 pattern = PATTERN (insn);
8262 /* Do not put the whole function in .set noreorder if it contains
8263 an asm statement. We don't know whether there will be hazards
8264 between the asm statement and the gcc-generated code. */
8265 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
8266 cfun->machine->all_noreorder_p = false;
8268 /* Ignore zero-length instructions (barriers and the like). */
8269 ninsns = get_attr_length (insn) / 4;
8273 /* Work out how many nops are needed. Note that we only care about
8274 registers that are explicitly mentioned in the instruction's pattern.
8275 It doesn't matter that calls use the argument registers or that they
8276 clobber hi and lo. */
8277 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
8278 nops = 2 - *hilo_delay;
8279 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
8284 /* Insert the nops between this instruction and the previous one.
8285 Each new nop takes us further from the last hilo hazard. */
8286 *hilo_delay += nops;
8288 emit_insn_after (gen_hazard_nop (), after);
8290 /* Set up the state for the next instruction. */
8291 *hilo_delay += ninsns;
8293 if (INSN_CODE (insn) >= 0)
8294 switch (get_attr_hazard (insn))
8304 set = single_set (insn);
8305 gcc_assert (set != 0);
8306 *delayed_reg = SET_DEST (set);
8312 /* Go through the instruction stream and insert nops where necessary.
8313 See if the whole function can then be put into .set noreorder &
8317 mips_avoid_hazards (void)
8319 rtx insn, last_insn, lo_reg, delayed_reg;
8322 /* Force all instructions to be split into their final form. */
8323 split_all_insns_noflow ();
8325 /* Recalculate instruction lengths without taking nops into account. */
8326 cfun->machine->ignore_hazard_length_p = true;
8327 shorten_branches (get_insns ());
8329 /* The profiler code uses assembler macros. -mfix-vr4120 relies on
8330 assembler nop insertion. */
8331 cfun->machine->all_noreorder_p = (!current_function_profile
8332 && !TARGET_FIX_VR4120);
8337 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
8339 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
8342 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
8343 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
8344 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
8345 &hilo_delay, &delayed_reg, lo_reg);
8347 mips_avoid_hazard (last_insn, insn, &hilo_delay,
8348 &delayed_reg, lo_reg);
8355 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
8361 mips16_lay_out_constants ();
8362 else if (TARGET_EXPLICIT_RELOCS)
8364 if (mips_flag_delayed_branch)
8365 dbr_schedule (get_insns (), dump_file);
8366 mips_avoid_hazards ();
8367 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
8368 vr4130_align_insns ();
8372 /* This function does three things:
8374 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
8375 - Register the mips16 hardware floating point stubs.
8376 - Register the gofast functions if selected using --enable-gofast. */
8378 #include "config/gofast.h"
8381 mips_init_libfuncs (void)
8383 if (TARGET_FIX_VR4120)
8385 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
8386 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
8389 if (TARGET_MIPS16 && mips16_hard_float)
8391 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
8392 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
8393 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
8394 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
8396 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
8397 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
8398 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
8399 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
8400 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
8401 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
8403 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
8404 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
8406 if (TARGET_DOUBLE_FLOAT)
8408 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
8409 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
8410 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
8411 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
8413 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
8414 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
8415 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
8416 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
8417 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
8418 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
8420 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
8421 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
8423 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
8424 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
8428 gofast_maybe_init_libfuncs ();
8431 /* Return a number assessing the cost of moving a register in class
8432 FROM to class TO. The classes are expressed using the enumeration
8433 values such as `GENERAL_REGS'. A value of 2 is the default; other
8434 values are interpreted relative to that.
8436 It is not required that the cost always equal 2 when FROM is the
8437 same as TO; on some machines it is expensive to move between
8438 registers if they are not general registers.
8440 If reload sees an insn consisting of a single `set' between two
8441 hard registers, and if `REGISTER_MOVE_COST' applied to their
8442 classes returns a value of 2, reload does not check to ensure that
8443 the constraints of the insn are met. Setting a cost of other than
8444 2 will allow reload to verify that the constraints are met. You
8445 should do this if the `movM' pattern's constraints do not allow
8448 ??? We make the cost of moving from HI/LO into general
8449 registers the same as for one of moving general registers to
8450 HI/LO for TARGET_MIPS16 in order to prevent allocating a
8451 pseudo to HI/LO. This might hurt optimizations though, it
8452 isn't clear if it is wise. And it might not work in all cases. We
8453 could solve the DImode LO reg problem by using a multiply, just
8454 like reload_{in,out}si. We could solve the SImode/HImode HI reg
8455 problem by using divide instructions. divu puts the remainder in
8456 the HI reg, so doing a divide by -1 will move the value in the HI
8457 reg for all values except -1. We could handle that case by using a
8458 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
8459 a compare/branch to test the input value to see which instruction
8460 we need to use. This gets pretty messy, but it is feasible. */
8463 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
8464 enum reg_class to, enum reg_class from)
8466 if (from == M16_REGS && GR_REG_CLASS_P (to))
8468 else if (from == M16_NA_REGS && GR_REG_CLASS_P (to))
8470 else if (GR_REG_CLASS_P (from))
8474 else if (to == M16_NA_REGS)
8476 else if (GR_REG_CLASS_P (to))
8483 else if (to == FP_REGS)
8485 else if (to == HI_REG || to == LO_REG || to == MD_REGS)
8492 else if (COP_REG_CLASS_P (to))
8496 } /* GR_REG_CLASS_P (from) */
8497 else if (from == FP_REGS)
8499 if (GR_REG_CLASS_P (to))
8501 else if (to == FP_REGS)
8503 else if (to == ST_REGS)
8505 } /* from == FP_REGS */
8506 else if (from == HI_REG || from == LO_REG || from == MD_REGS)
8508 if (GR_REG_CLASS_P (to))
8515 } /* from == HI_REG, etc. */
8516 else if (from == ST_REGS && GR_REG_CLASS_P (to))
8518 else if (COP_REG_CLASS_P (from))
8521 } /* COP_REG_CLASS_P (from) */
8528 /* Return the length of INSN. LENGTH is the initial length computed by
8529 attributes in the machine-description file. */
8532 mips_adjust_insn_length (rtx insn, int length)
8534 /* A unconditional jump has an unfilled delay slot if it is not part
8535 of a sequence. A conditional jump normally has a delay slot, but
8536 does not on MIPS16. */
8537 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
8540 /* See how many nops might be needed to avoid hardware hazards. */
8541 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
8542 switch (get_attr_hazard (insn))
8556 /* All MIPS16 instructions are a measly two bytes. */
8564 /* Return an asm sequence to start a noat block and load the address
8565 of a label into $1. */
8568 mips_output_load_label (void)
8570 if (TARGET_EXPLICIT_RELOCS)
8574 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
8577 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
8580 if (ISA_HAS_LOAD_DELAY)
8581 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
8582 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
8586 if (Pmode == DImode)
8587 return "%[dla\t%@,%0";
8589 return "%[la\t%@,%0";
8594 /* Output assembly instructions to peform a conditional branch.
8596 INSN is the branch instruction. OPERANDS[0] is the condition.
8597 OPERANDS[1] is the target of the branch. OPERANDS[2] is the target
8598 of the first operand to the condition. If TWO_OPERANDS_P is
8599 nonzero the comparison takes two operands; OPERANDS[3] will be the
8602 If INVERTED_P is nonzero we are to branch if the condition does
8603 not hold. If FLOAT_P is nonzero this is a floating-point comparison.
8605 LENGTH is the length (in bytes) of the sequence we are to generate.
8606 That tells us whether to generate a simple conditional branch, or a
8607 reversed conditional branch around a `jr' instruction. */
8609 mips_output_conditional_branch (rtx insn, rtx *operands, int two_operands_p,
8610 int float_p, int inverted_p, int length)
8612 static char buffer[200];
8613 /* The kind of comparison we are doing. */
8614 enum rtx_code code = GET_CODE (operands[0]);
8615 /* Nonzero if the opcode for the comparison needs a `z' indicating
8616 that it is a comparison against zero. */
8618 /* A string to use in the assembly output to represent the first
8620 const char *op1 = "%z2";
8621 /* A string to use in the assembly output to represent the second
8622 operand. Use the hard-wired zero register if there's no second
8624 const char *op2 = (two_operands_p ? ",%z3" : ",%.");
8625 /* The operand-printing string for the comparison. */
8626 const char *const comp = (float_p ? "%F0" : "%C0");
8627 /* The operand-printing string for the inverted comparison. */
8628 const char *const inverted_comp = (float_p ? "%W0" : "%N0");
8630 /* The MIPS processors (for levels of the ISA at least two), have
8631 "likely" variants of each branch instruction. These instructions
8632 annul the instruction in the delay slot if the branch is not
8634 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
8636 if (!two_operands_p)
8638 /* To compute whether than A > B, for example, we normally
8639 subtract B from A and then look at the sign bit. But, if we
8640 are doing an unsigned comparison, and B is zero, we don't
8641 have to do the subtraction. Instead, we can just check to
8642 see if A is nonzero. Thus, we change the CODE here to
8643 reflect the simpler comparison operation. */
8655 /* A condition which will always be true. */
8661 /* A condition which will always be false. */
8667 /* Not a special case. */
8672 /* Relative comparisons are always done against zero. But
8673 equality comparisons are done between two operands, and therefore
8674 do not require a `z' in the assembly language output. */
8675 need_z_p = (!float_p && code != EQ && code != NE);
8676 /* For comparisons against zero, the zero is not provided
8681 /* Begin by terminating the buffer. That way we can always use
8682 strcat to add to it. */
8689 /* Just a simple conditional branch. */
8691 sprintf (buffer, "%%*b%s%%?\t%%Z2%%1%%/",
8692 inverted_p ? inverted_comp : comp);
8694 sprintf (buffer, "%%*b%s%s%%?\t%s%s,%%1%%/",
8695 inverted_p ? inverted_comp : comp,
8696 need_z_p ? "z" : "",
8706 /* Generate a reversed conditional branch around ` j'
8719 If the original branch was a likely branch, the delay slot
8720 must be executed only if the branch is taken, so generate:
8732 When generating PIC, instead of:
8745 rtx target = gen_label_rtx ();
8747 orig_target = operands[1];
8748 operands[1] = target;
8749 /* Generate the reversed comparison. This takes four
8752 sprintf (buffer, "%%*b%s\t%%Z2%%1",
8753 inverted_p ? comp : inverted_comp);
8755 sprintf (buffer, "%%*b%s%s\t%s%s,%%1",
8756 inverted_p ? comp : inverted_comp,
8757 need_z_p ? "z" : "",
8760 output_asm_insn (buffer, operands);
8762 if (length != 16 && length != 28 && ! mips_branch_likely)
8764 /* Output delay slot instruction. */
8765 rtx insn = final_sequence;
8766 final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file,
8767 optimize, 0, 1, NULL);
8768 INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
8771 output_asm_insn ("%#", 0);
8774 output_asm_insn ("j\t%0", &orig_target);
8777 output_asm_insn (mips_output_load_label (), &orig_target);
8778 output_asm_insn ("jr\t%@%]", 0);
8781 if (length != 16 && length != 28 && mips_branch_likely)
8783 /* Output delay slot instruction. */
8784 rtx insn = final_sequence;
8785 final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file,
8786 optimize, 0, 1, NULL);
8787 INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
8790 output_asm_insn ("%#", 0);
8792 (*targetm.asm_out.internal_label) (asm_out_file, "L",
8793 CODE_LABEL_NUMBER (target));
8806 /* Used to output div or ddiv instruction DIVISION, which has the operands
8807 given by OPERANDS. Add in a divide-by-zero check if needed.
8809 When working around R4000 and R4400 errata, we need to make sure that
8810 the division is not immediately followed by a shift[1][2]. We also
8811 need to stop the division from being put into a branch delay slot[3].
8812 The easiest way to avoid both problems is to add a nop after the
8813 division. When a divide-by-zero check is needed, this nop can be
8814 used to fill the branch delay slot.
8816 [1] If a double-word or a variable shift executes immediately
8817 after starting an integer division, the shift may give an
8818 incorrect result. See quotations of errata #16 and #28 from
8819 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
8820 in mips.md for details.
8822 [2] A similar bug to [1] exists for all revisions of the
8823 R4000 and the R4400 when run in an MC configuration.
8824 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
8826 "19. In this following sequence:
8828 ddiv (or ddivu or div or divu)
8829 dsll32 (or dsrl32, dsra32)
8831 if an MPT stall occurs, while the divide is slipping the cpu
8832 pipeline, then the following double shift would end up with an
8835 Workaround: The compiler needs to avoid generating any
8836 sequence with divide followed by extended double shift."
8838 This erratum is also present in "MIPS R4400MC Errata, Processor
8839 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
8840 & 3.0" as errata #10 and #4, respectively.
8842 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
8843 (also valid for MIPS R4000MC processors):
8845 "52. R4000SC: This bug does not apply for the R4000PC.
8847 There are two flavors of this bug:
8849 1) If the instruction just after divide takes an RF exception
8850 (tlb-refill, tlb-invalid) and gets an instruction cache
8851 miss (both primary and secondary) and the line which is
8852 currently in secondary cache at this index had the first
8853 data word, where the bits 5..2 are set, then R4000 would
8854 get a wrong result for the div.
8859 ------------------- # end-of page. -tlb-refill
8864 ------------------- # end-of page. -tlb-invalid
8867 2) If the divide is in the taken branch delay slot, where the
8868 target takes RF exception and gets an I-cache miss for the
8869 exception vector or where I-cache miss occurs for the
8870 target address, under the above mentioned scenarios, the
8871 div would get wrong results.
8874 j r2 # to next page mapped or unmapped
8875 div r8,r9 # this bug would be there as long
8876 # as there is an ICache miss and
8877 nop # the "data pattern" is present
8880 beq r0, r0, NextPage # to Next page
8884 This bug is present for div, divu, ddiv, and ddivu
8887 Workaround: For item 1), OS could make sure that the next page
8888 after the divide instruction is also mapped. For item 2), the
8889 compiler could make sure that the divide instruction is not in
8890 the branch delay slot."
8892 These processors have PRId values of 0x00004220 and 0x00004300 for
8893 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
8896 mips_output_division (const char *division, rtx *operands)
8901 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
8903 output_asm_insn (s, operands);
8906 if (TARGET_CHECK_ZERO_DIV)
8910 output_asm_insn (s, operands);
8911 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
8913 else if (GENERATE_DIVIDE_TRAPS)
8915 output_asm_insn (s, operands);
8920 output_asm_insn ("%(bne\t%2,%.,1f", operands);
8921 output_asm_insn (s, operands);
8922 s = "break\t7%)\n1:";
8928 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
8929 with a final "000" replaced by "k". Ignore case.
8931 Note: this function is shared between GCC and GAS. */
8934 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
8936 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
8937 given++, canonical++;
8939 return ((*given == 0 && *canonical == 0)
8940 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
8944 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
8945 CPU name. We've traditionally allowed a lot of variation here.
8947 Note: this function is shared between GCC and GAS. */
8950 mips_matching_cpu_name_p (const char *canonical, const char *given)
8952 /* First see if the name matches exactly, or with a final "000"
8954 if (mips_strict_matching_cpu_name_p (canonical, given))
8957 /* If not, try comparing based on numerical designation alone.
8958 See if GIVEN is an unadorned number, or 'r' followed by a number. */
8959 if (TOLOWER (*given) == 'r')
8961 if (!ISDIGIT (*given))
8964 /* Skip over some well-known prefixes in the canonical name,
8965 hoping to find a number there too. */
8966 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
8968 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
8970 else if (TOLOWER (canonical[0]) == 'r')
8973 return mips_strict_matching_cpu_name_p (canonical, given);
8977 /* Parse an option that takes the name of a processor as its argument.
8978 OPTION is the name of the option and CPU_STRING is the argument.
8979 Return the corresponding processor enumeration if the CPU_STRING is
8980 recognized, otherwise report an error and return null.
8982 A similar function exists in GAS. */
8984 static const struct mips_cpu_info *
8985 mips_parse_cpu (const char *option, const char *cpu_string)
8987 const struct mips_cpu_info *p;
8990 /* In the past, we allowed upper-case CPU names, but it doesn't
8991 work well with the multilib machinery. */
8992 for (s = cpu_string; *s != 0; s++)
8995 warning ("the cpu name must be lower case");
8999 /* 'from-abi' selects the most compatible architecture for the given
9000 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
9001 EABIs, we have to decide whether we're using the 32-bit or 64-bit
9002 version. Look first at the -mgp options, if given, otherwise base
9003 the choice on MASK_64BIT in TARGET_DEFAULT. */
9004 if (strcasecmp (cpu_string, "from-abi") == 0)
9005 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
9006 : ABI_NEEDS_64BIT_REGS ? 3
9007 : (TARGET_64BIT ? 3 : 1));
9009 /* 'default' has traditionally been a no-op. Probably not very useful. */
9010 if (strcasecmp (cpu_string, "default") == 0)
9013 for (p = mips_cpu_info_table; p->name != 0; p++)
9014 if (mips_matching_cpu_name_p (p->name, cpu_string))
9017 error ("bad value (%s) for %s", cpu_string, option);
9022 /* Return the processor associated with the given ISA level, or null
9023 if the ISA isn't valid. */
9025 static const struct mips_cpu_info *
9026 mips_cpu_info_from_isa (int isa)
9028 const struct mips_cpu_info *p;
9030 for (p = mips_cpu_info_table; p->name != 0; p++)
9037 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
9038 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
9039 they only hold condition code modes, and CCmode is always considered to
9040 be 4 bytes wide. All other registers are word sized. */
9043 mips_hard_regno_nregs (int regno, enum machine_mode mode)
9045 if (ST_REG_P (regno))
9046 return ((GET_MODE_SIZE (mode) + 3) / 4);
9047 else if (! FP_REG_P (regno))
9048 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
9050 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
9053 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
9054 all BLKmode objects are returned in memory. Under the new (N32 and
9055 64-bit MIPS ABIs) small structures are returned in a register.
9056 Objects with varying size must still be returned in memory, of
9060 mips_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
9063 return (TYPE_MODE (type) == BLKmode);
9065 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
9066 || (int_size_in_bytes (type) == -1));
9070 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
9072 return !TARGET_OLDABI;
9075 /* Return true if INSN is a multiply-add or multiply-subtract
9076 instruction and PREV assigns to the accumulator operand. */
9079 mips_linked_madd_p (rtx prev, rtx insn)
9083 x = single_set (insn);
9089 if (GET_CODE (x) == PLUS
9090 && GET_CODE (XEXP (x, 0)) == MULT
9091 && reg_set_p (XEXP (x, 1), prev))
9094 if (GET_CODE (x) == MINUS
9095 && GET_CODE (XEXP (x, 1)) == MULT
9096 && reg_set_p (XEXP (x, 0), prev))
9102 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
9103 that may clobber hi or lo. */
9105 static rtx mips_macc_chains_last_hilo;
9107 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
9108 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
9111 mips_macc_chains_record (rtx insn)
9113 if (get_attr_may_clobber_hilo (insn))
9114 mips_macc_chains_last_hilo = insn;
9117 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
9118 has NREADY elements, looking for a multiply-add or multiply-subtract
9119 instruction that is cumulative with mips_macc_chains_last_hilo.
9120 If there is one, promote it ahead of anything else that might
9121 clobber hi or lo. */
9124 mips_macc_chains_reorder (rtx *ready, int nready)
9128 if (mips_macc_chains_last_hilo != 0)
9129 for (i = nready - 1; i >= 0; i--)
9130 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
9132 for (j = nready - 1; j > i; j--)
9133 if (recog_memoized (ready[j]) >= 0
9134 && get_attr_may_clobber_hilo (ready[j]))
9136 mips_promote_ready (ready, i, j);
9143 /* The last instruction to be scheduled. */
9145 static rtx vr4130_last_insn;
9147 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
9148 points to an rtx that is initially an instruction. Nullify the rtx
9149 if the instruction uses the value of register X. */
9152 vr4130_true_reg_dependence_p_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
9154 rtx *insn_ptr = data;
9157 && reg_referenced_p (x, PATTERN (*insn_ptr)))
9161 /* Return true if there is true register dependence between vr4130_last_insn
9165 vr4130_true_reg_dependence_p (rtx insn)
9167 note_stores (PATTERN (vr4130_last_insn),
9168 vr4130_true_reg_dependence_p_1, &insn);
9172 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
9173 the ready queue and that INSN2 is the instruction after it, return
9174 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
9175 in which INSN1 and INSN2 can probably issue in parallel, but for
9176 which (INSN2, INSN1) should be less sensitive to instruction
9177 alignment than (INSN1, INSN2). See 4130.md for more details. */
9180 vr4130_swap_insns_p (rtx insn1, rtx insn2)
9184 /* Check for the following case:
9186 1) there is some other instruction X with an anti dependence on INSN1;
9187 2) X has a higher priority than INSN2; and
9188 3) X is an arithmetic instruction (and thus has no unit restrictions).
9190 If INSN1 is the last instruction blocking X, it would better to
9191 choose (INSN1, X) over (INSN2, INSN1). */
9192 for (dep = INSN_DEPEND (insn1); dep != 0; dep = XEXP (dep, 1))
9193 if (REG_NOTE_KIND (dep) == REG_DEP_ANTI
9194 && INSN_PRIORITY (XEXP (dep, 0)) > INSN_PRIORITY (insn2)
9195 && recog_memoized (XEXP (dep, 0)) >= 0
9196 && get_attr_vr4130_class (XEXP (dep, 0)) == VR4130_CLASS_ALU)
9199 if (vr4130_last_insn != 0
9200 && recog_memoized (insn1) >= 0
9201 && recog_memoized (insn2) >= 0)
9203 /* See whether INSN1 and INSN2 use different execution units,
9204 or if they are both ALU-type instructions. If so, they can
9205 probably execute in parallel. */
9206 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
9207 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
9208 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
9210 /* If only one of the instructions has a dependence on
9211 vr4130_last_insn, prefer to schedule the other one first. */
9212 bool dep1 = vr4130_true_reg_dependence_p (insn1);
9213 bool dep2 = vr4130_true_reg_dependence_p (insn2);
9217 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
9218 is not an ALU-type instruction and if INSN1 uses the same
9219 execution unit. (Note that if this condition holds, we already
9220 know that INSN2 uses a different execution unit.) */
9221 if (class1 != VR4130_CLASS_ALU
9222 && recog_memoized (vr4130_last_insn) >= 0
9223 && class1 == get_attr_vr4130_class (vr4130_last_insn))
9230 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
9231 queue with at least two instructions. Swap the first two if
9232 vr4130_swap_insns_p says that it could be worthwhile. */
9235 vr4130_reorder (rtx *ready, int nready)
9237 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
9238 mips_promote_ready (ready, nready - 2, nready - 1);
9241 /* Remove the instruction at index LOWER from ready queue READY and
9242 reinsert it in front of the instruction at index HIGHER. LOWER must
9246 mips_promote_ready (rtx *ready, int lower, int higher)
9251 new_head = ready[lower];
9252 for (i = lower; i < higher; i++)
9253 ready[i] = ready[i + 1];
9254 ready[i] = new_head;
9257 /* Implement TARGET_SCHED_REORDER. */
9260 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9261 rtx *ready, int *nreadyp, int cycle)
9263 if (!reload_completed && TUNE_MACC_CHAINS)
9266 mips_macc_chains_last_hilo = 0;
9268 mips_macc_chains_reorder (ready, *nreadyp);
9270 if (reload_completed && TUNE_MIPS4130 && !TARGET_VR4130_ALIGN)
9273 vr4130_last_insn = 0;
9275 vr4130_reorder (ready, *nreadyp);
9277 return mips_issue_rate ();
9280 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
9283 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9286 switch (GET_CODE (PATTERN (insn)))
9290 /* Don't count USEs and CLOBBERs against the issue rate. */
9295 if (!reload_completed && TUNE_MACC_CHAINS)
9296 mips_macc_chains_record (insn);
9297 vr4130_last_insn = insn;
9303 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
9304 dependencies have no cost. */
9307 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
9308 rtx dep ATTRIBUTE_UNUSED, int cost)
9310 if (REG_NOTE_KIND (link) != 0)
9315 /* Return the number of instructions that can be issued per cycle. */
9318 mips_issue_rate (void)
9322 case PROCESSOR_R4130:
9323 case PROCESSOR_R5400:
9324 case PROCESSOR_R5500:
9325 case PROCESSOR_R7000:
9326 case PROCESSOR_R9000:
9330 /* This is actually 4, but we get better performance if we claim 3.
9331 This is partly because of unwanted speculative code motion with the
9332 larger number, and partly because in most common cases we can't
9333 reach the theoretical max of 4. */
9341 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
9342 be as wide as the scheduling freedom in the DFA. */
9345 mips_multipass_dfa_lookahead (void)
9347 /* Can schedule up to 4 of the 6 function units in any one cycle. */
9348 if (mips_tune == PROCESSOR_SB1)
9354 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
9355 return the first operand of the associated "pref" or "prefx" insn. */
9358 mips_prefetch_cookie (rtx write, rtx locality)
9360 /* store_streamed / load_streamed. */
9361 if (INTVAL (locality) <= 0)
9362 return GEN_INT (INTVAL (write) + 4);
9365 if (INTVAL (locality) <= 2)
9368 /* store_retained / load_retained. */
9369 return GEN_INT (INTVAL (write) + 6);
9372 /* MIPS builtin function support. */
9374 struct builtin_description
9376 /* The code of the main .md file instruction. See mips_builtin_type
9377 for more information. */
9378 enum insn_code icode;
9380 /* The floating-point comparison code to use with ICODE, if any. */
9381 enum mips_fp_condition cond;
9383 /* The name of the builtin function. */
9386 /* Specifies how the function should be expanded. */
9387 enum mips_builtin_type builtin_type;
9389 /* The function's prototype. */
9390 enum mips_function_type function_type;
9392 /* The target flags required for this function. */
9396 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
9397 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
9398 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
9399 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
9400 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
9402 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
9404 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
9405 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
9406 "__builtin_mips_" #INSN "_" #COND "_s", \
9407 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
9408 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
9409 "__builtin_mips_" #INSN "_" #COND "_d", \
9410 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
9412 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
9413 The lower and upper forms require TARGET_FLAGS while the any and all
9414 forms require MASK_MIPS3D. */
9415 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
9416 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9417 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
9418 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
9419 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9420 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
9421 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
9422 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9423 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
9424 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
9425 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9426 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
9427 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
9429 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
9430 require MASK_MIPS3D. */
9431 #define CMP_4S_BUILTINS(INSN, COND) \
9432 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
9433 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
9434 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9436 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
9437 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
9438 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9441 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
9442 instruction requires TARGET_FLAGS. */
9443 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
9444 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9445 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
9446 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9448 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9449 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
9450 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9453 /* Define all the builtins related to c.cond.fmt condition COND. */
9454 #define CMP_BUILTINS(COND) \
9455 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE), \
9456 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
9457 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
9458 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE), \
9459 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
9460 CMP_4S_BUILTINS (c, COND), \
9461 CMP_4S_BUILTINS (cabs, COND)
9463 /* __builtin_mips_abs_ps() maps to the standard absM2 pattern. */
9464 #define CODE_FOR_mips_abs_ps CODE_FOR_absv2sf2
9466 static const struct builtin_description mips_bdesc[] =
9468 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE),
9469 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE),
9470 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE),
9471 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE),
9472 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE),
9473 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE),
9474 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE),
9475 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE),
9477 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT, MASK_PAIRED_SINGLE),
9478 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
9479 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
9480 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
9481 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
9483 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
9484 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
9485 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
9486 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
9487 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
9488 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
9490 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
9491 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
9492 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
9493 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
9494 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
9495 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
9497 MIPS_FP_CONDITIONS (CMP_BUILTINS)
9500 /* Builtin functions for the SB-1 processor. */
9502 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
9504 static const struct builtin_description sb1_bdesc[] =
9506 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE)
9509 /* This helps provide a mapping from builtin function codes to bdesc
9514 /* The builtin function table that this entry describes. */
9515 const struct builtin_description *bdesc;
9517 /* The number of entries in the builtin function table. */
9520 /* The target processor that supports these builtin functions.
9521 PROCESSOR_DEFAULT means we enable them for all processors. */
9522 enum processor_type proc;
9525 static const struct bdesc_map bdesc_arrays[] =
9527 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_DEFAULT },
9528 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1 }
9531 /* Take the head of argument list *ARGLIST and convert it into a form
9532 suitable for input operand OP of instruction ICODE. Return the value
9533 and point *ARGLIST at the next element of the list. */
9536 mips_prepare_builtin_arg (enum insn_code icode,
9537 unsigned int op, tree *arglist)
9540 enum machine_mode mode;
9542 value = expand_expr (TREE_VALUE (*arglist), NULL_RTX, VOIDmode, 0);
9543 mode = insn_data[icode].operand[op].mode;
9544 if (!insn_data[icode].operand[op].predicate (value, mode))
9545 value = copy_to_mode_reg (mode, value);
9547 *arglist = TREE_CHAIN (*arglist);
9551 /* Return an rtx suitable for output operand OP of instruction ICODE.
9552 If TARGET is non-null, try to use it where possible. */
9555 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
9557 enum machine_mode mode;
9559 mode = insn_data[icode].operand[op].mode;
9560 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
9561 target = gen_reg_rtx (mode);
9566 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
9569 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9570 enum machine_mode mode ATTRIBUTE_UNUSED,
9571 int ignore ATTRIBUTE_UNUSED)
9573 enum insn_code icode;
9574 enum mips_builtin_type type;
9575 tree fndecl, arglist;
9577 const struct builtin_description *bdesc;
9578 const struct bdesc_map *m;
9580 fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
9581 arglist = TREE_OPERAND (exp, 1);
9582 fcode = DECL_FUNCTION_CODE (fndecl);
9585 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
9587 if (fcode < m->size)
9590 icode = bdesc[fcode].icode;
9591 type = bdesc[fcode].builtin_type;
9601 case MIPS_BUILTIN_DIRECT:
9602 return mips_expand_builtin_direct (icode, target, arglist);
9604 case MIPS_BUILTIN_MOVT:
9605 case MIPS_BUILTIN_MOVF:
9606 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
9609 case MIPS_BUILTIN_CMP_ANY:
9610 case MIPS_BUILTIN_CMP_ALL:
9611 case MIPS_BUILTIN_CMP_UPPER:
9612 case MIPS_BUILTIN_CMP_LOWER:
9613 case MIPS_BUILTIN_CMP_SINGLE:
9614 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
9622 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
9625 mips_init_builtins (void)
9627 const struct builtin_description *d;
9628 const struct bdesc_map *m;
9629 tree types[(int) MIPS_MAX_FTYPE_MAX];
9630 tree V2SF_type_node;
9631 unsigned int offset;
9633 /* We have only builtins for -mpaired-single and -mips3d. */
9634 if (!TARGET_PAIRED_SINGLE_FLOAT)
9637 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
9639 types[MIPS_V2SF_FTYPE_V2SF]
9640 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
9642 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
9643 = build_function_type_list (V2SF_type_node,
9644 V2SF_type_node, V2SF_type_node, NULL_TREE);
9646 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
9647 = build_function_type_list (V2SF_type_node,
9648 V2SF_type_node, V2SF_type_node,
9649 integer_type_node, NULL_TREE);
9651 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
9652 = build_function_type_list (V2SF_type_node,
9653 V2SF_type_node, V2SF_type_node,
9654 V2SF_type_node, V2SF_type_node, NULL_TREE);
9656 types[MIPS_V2SF_FTYPE_SF_SF]
9657 = build_function_type_list (V2SF_type_node,
9658 float_type_node, float_type_node, NULL_TREE);
9660 types[MIPS_INT_FTYPE_V2SF_V2SF]
9661 = build_function_type_list (integer_type_node,
9662 V2SF_type_node, V2SF_type_node, NULL_TREE);
9664 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
9665 = build_function_type_list (integer_type_node,
9666 V2SF_type_node, V2SF_type_node,
9667 V2SF_type_node, V2SF_type_node, NULL_TREE);
9669 types[MIPS_INT_FTYPE_SF_SF]
9670 = build_function_type_list (integer_type_node,
9671 float_type_node, float_type_node, NULL_TREE);
9673 types[MIPS_INT_FTYPE_DF_DF]
9674 = build_function_type_list (integer_type_node,
9675 double_type_node, double_type_node, NULL_TREE);
9677 types[MIPS_SF_FTYPE_V2SF]
9678 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
9680 types[MIPS_SF_FTYPE_SF]
9681 = build_function_type_list (float_type_node,
9682 float_type_node, NULL_TREE);
9684 types[MIPS_SF_FTYPE_SF_SF]
9685 = build_function_type_list (float_type_node,
9686 float_type_node, float_type_node, NULL_TREE);
9688 types[MIPS_DF_FTYPE_DF]
9689 = build_function_type_list (double_type_node,
9690 double_type_node, NULL_TREE);
9692 types[MIPS_DF_FTYPE_DF_DF]
9693 = build_function_type_list (double_type_node,
9694 double_type_node, double_type_node, NULL_TREE);
9696 /* Iterate through all of the bdesc arrays, initializing all of the
9697 builtin functions. */
9700 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
9702 if (m->proc == PROCESSOR_DEFAULT || (m->proc == mips_arch))
9703 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
9704 if ((d->target_flags & target_flags) == d->target_flags)
9705 lang_hooks.builtin_function (d->name, types[d->function_type],
9706 d - m->bdesc + offset,
9707 BUILT_IN_MD, NULL, NULL);
9712 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
9713 .md pattern and ARGLIST is the list of function arguments. TARGET,
9714 if nonnull, suggests a good place to put the result. */
9717 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree arglist)
9719 rtx ops[MAX_RECOG_OPERANDS];
9722 target = mips_prepare_builtin_target (icode, 0, target);
9723 for (i = 1; i < insn_data[icode].n_operands; i++)
9724 ops[i] = mips_prepare_builtin_arg (icode, i, &arglist);
9726 switch (insn_data[icode].n_operands)
9729 emit_insn (GEN_FCN (icode) (target, ops[1]));
9733 emit_insn (GEN_FCN (icode) (target, ops[1], ops[2]));
9737 emit_insn (GEN_FCN (icode) (target, ops[1], ops[2], ops[3]));
9746 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
9747 function (TYPE says which). ARGLIST is the list of arguments to the
9748 function, ICODE is the instruction that should be used to compare
9749 the first two arguments, and COND is the condition it should test.
9750 TARGET, if nonnull, suggests a good place to put the result. */
9753 mips_expand_builtin_movtf (enum mips_builtin_type type,
9754 enum insn_code icode, enum mips_fp_condition cond,
9755 rtx target, tree arglist)
9757 rtx cmp_result, op0, op1;
9759 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
9760 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
9761 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
9762 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
9764 icode = CODE_FOR_mips_cond_move_tf_ps;
9765 target = mips_prepare_builtin_target (icode, 0, target);
9766 if (type == MIPS_BUILTIN_MOVT)
9768 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
9769 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
9773 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
9774 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
9776 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
9780 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
9781 of the comparison instruction and COND is the condition it should test.
9782 ARGLIST is the list of function arguments and TARGET, if nonnull,
9783 suggests a good place to put the boolean result. */
9786 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
9787 enum insn_code icode, enum mips_fp_condition cond,
9788 rtx target, tree arglist)
9790 rtx label1, label2, if_then_else;
9791 rtx pat, cmp_result, ops[MAX_RECOG_OPERANDS];
9792 rtx target_if_equal, target_if_unequal;
9795 if (target == 0 || GET_MODE (target) != SImode)
9796 target = gen_reg_rtx (SImode);
9798 /* Prepare the operands to the comparison. */
9799 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
9800 for (i = 1; i < insn_data[icode].n_operands - 1; i++)
9801 ops[i] = mips_prepare_builtin_arg (icode, i, &arglist);
9803 switch (insn_data[icode].n_operands)
9806 pat = GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond));
9810 pat = GEN_FCN (icode) (cmp_result, ops[1], ops[2],
9811 ops[3], ops[4], GEN_INT (cond));
9818 /* If the comparison sets more than one register, we define the result
9819 to be 0 if all registers are false and -1 if all registers are true.
9820 The value of the complete result is indeterminate otherwise. It is
9821 possible to test individual registers using SUBREGs.
9823 Set up CMP_RESULT, CMP_VALUE, TARGET_IF_EQUAL and TARGET_IF_UNEQUAL so
9824 that the result should be TARGET_IF_EQUAL if (EQ CMP_RESULT CMP_VALUE)
9825 and TARGET_IF_UNEQUAL otherwise. */
9826 if (builtin_type == MIPS_BUILTIN_CMP_ALL)
9829 target_if_equal = const1_rtx;
9830 target_if_unequal = const0_rtx;
9835 target_if_equal = const0_rtx;
9836 target_if_unequal = const1_rtx;
9837 if (builtin_type == MIPS_BUILTIN_CMP_UPPER)
9838 cmp_result = simplify_gen_subreg (CCmode, cmp_result, CCV2mode, 4);
9839 else if (builtin_type == MIPS_BUILTIN_CMP_LOWER)
9840 cmp_result = simplify_gen_subreg (CCmode, cmp_result, CCV2mode, 0);
9843 /* First assume that CMP_RESULT == CMP_VALUE. */
9844 emit_move_insn (target, target_if_equal);
9846 /* Branch to LABEL1 if CMP_RESULT != CMP_VALUE. */
9848 label1 = gen_label_rtx ();
9849 label2 = gen_label_rtx ();
9851 = gen_rtx_IF_THEN_ELSE (VOIDmode,
9852 gen_rtx_fmt_ee (NE, GET_MODE (cmp_result),
9853 cmp_result, GEN_INT (cmp_value)),
9854 gen_rtx_LABEL_REF (VOIDmode, label1), pc_rtx);
9855 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, if_then_else));
9856 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
9857 gen_rtx_LABEL_REF (VOIDmode, label2)));
9859 emit_label (label1);
9861 /* Fix TARGET for CMP_RESULT != CMP_VALUE. */
9862 emit_move_insn (target, target_if_unequal);
9863 emit_label (label2);
9868 #include "gt-mips.h"