1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by A. Lichnewsky, lich@inria.inria.fr.
5 Changes by Michael Meissner, meissner@osf.org.
6 64 bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
7 Brendan Eich, brendan@microunity.com.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING. If not, write to
23 the Free Software Foundation, 59 Temple Place - Suite 330,
24 Boston, MA 02111-1307, USA. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
60 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
61 #define UNSPEC_ADDRESS_P(X) \
62 (GET_CODE (X) == UNSPEC \
63 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
64 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
66 /* Extract the symbol or label from UNSPEC wrapper X. */
67 #define UNSPEC_ADDRESS(X) \
70 /* Extract the symbol type from UNSPEC wrapper X. */
71 #define UNSPEC_ADDRESS_TYPE(X) \
72 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
74 /* The maximum distance between the top of the stack frame and the
75 value $sp has when we save & restore registers.
77 Use a maximum gap of 0x100 in the mips16 case. We can then use
78 unextended instructions to save and restore registers, and to
79 allocate and deallocate the top part of the frame.
81 The value in the !mips16 case must be a SMALL_OPERAND and must
82 preserve the maximum stack alignment. */
83 #define MIPS_MAX_FIRST_STACK_STEP (TARGET_MIPS16 ? 0x100 : 0x7ff0)
85 /* True if INSN is a mips.md pattern or asm statement. */
86 #define USEFUL_INSN_P(INSN) \
88 && GET_CODE (PATTERN (INSN)) != USE \
89 && GET_CODE (PATTERN (INSN)) != CLOBBER \
90 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
91 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
93 /* If INSN is a delayed branch sequence, return the first instruction
94 in the sequence, otherwise return INSN itself. */
95 #define SEQ_BEGIN(INSN) \
96 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
97 ? XVECEXP (PATTERN (INSN), 0, 0) \
100 /* Likewise for the last instruction in a delayed branch sequence. */
101 #define SEQ_END(INSN) \
102 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
103 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
106 /* Execute the following loop body with SUBINSN set to each instruction
107 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
108 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
109 for ((SUBINSN) = SEQ_BEGIN (INSN); \
110 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
111 (SUBINSN) = NEXT_INSN (SUBINSN))
113 /* Classifies an address.
116 A natural register + offset address. The register satisfies
117 mips_valid_base_register_p and the offset is a const_arith_operand.
120 A LO_SUM rtx. The first operand is a valid base register and
121 the second operand is a symbolic address.
124 A signed 16-bit constant address.
127 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
128 enum mips_address_type {
135 /* Classifies the prototype of a builtin function. */
136 enum mips_function_type
138 MIPS_V2SF_FTYPE_V2SF,
139 MIPS_V2SF_FTYPE_V2SF_V2SF,
140 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
141 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
142 MIPS_V2SF_FTYPE_SF_SF,
143 MIPS_INT_FTYPE_V2SF_V2SF,
144 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
145 MIPS_INT_FTYPE_SF_SF,
146 MIPS_INT_FTYPE_DF_DF,
157 /* Specifies how a builtin function should be converted into rtl. */
158 enum mips_builtin_type
160 /* The builtin corresponds directly to an .md pattern. The return
161 value is mapped to operand 0 and the arguments are mapped to
162 operands 1 and above. */
165 /* The builtin corresponds to a comparison instruction followed by
166 a mips_cond_move_tf_ps pattern. The first two arguments are the
167 values to compare and the second two arguments are the vector
168 operands for the movt.ps or movf.ps instruction (in assembly order). */
172 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
173 of this instruction is the result of the comparison, which has mode
174 CCV2 or CCV4. The function arguments are mapped to operands 1 and
175 above. The function's return value is an SImode boolean that is
176 true under the following conditions:
178 MIPS_BUILTIN_CMP_ANY: one of the registers is true
179 MIPS_BUILTIN_CMP_ALL: all of the registers are true
180 MIPS_BUILTIN_CMP_LOWER: the first register is true
181 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
182 MIPS_BUILTIN_CMP_ANY,
183 MIPS_BUILTIN_CMP_ALL,
184 MIPS_BUILTIN_CMP_UPPER,
185 MIPS_BUILTIN_CMP_LOWER,
187 /* As above, but the instruction only sets a single $fcc register. */
188 MIPS_BUILTIN_CMP_SINGLE
191 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
192 #define MIPS_FP_CONDITIONS(MACRO) \
210 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
211 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
212 enum mips_fp_condition {
213 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
216 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
217 #define STRINGIFY(X) #X
218 static const char *const mips_fp_conditions[] = {
219 MIPS_FP_CONDITIONS (STRINGIFY)
222 /* A function to save or store a register. The first argument is the
223 register and the second is the stack slot. */
224 typedef void (*mips_save_restore_fn) (rtx, rtx);
226 struct mips16_constant;
227 struct mips_arg_info;
228 struct mips_address_info;
229 struct mips_integer_op;
232 static enum mips_symbol_type mips_classify_symbol (rtx);
233 static void mips_split_const (rtx, rtx *, HOST_WIDE_INT *);
234 static bool mips_offset_within_object_p (rtx, HOST_WIDE_INT);
235 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
236 static bool mips_symbolic_address_p (enum mips_symbol_type, enum machine_mode);
237 static bool mips_classify_address (struct mips_address_info *, rtx,
238 enum machine_mode, int);
239 static bool mips_cannot_force_const_mem (rtx);
240 static int mips_symbol_insns (enum mips_symbol_type);
241 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
242 static rtx mips_force_temporary (rtx, rtx);
243 static rtx mips_split_symbol (rtx, rtx);
244 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
245 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
246 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
247 static unsigned int mips_build_lower (struct mips_integer_op *,
248 unsigned HOST_WIDE_INT);
249 static unsigned int mips_build_integer (struct mips_integer_op *,
250 unsigned HOST_WIDE_INT);
251 static void mips_move_integer (rtx, unsigned HOST_WIDE_INT);
252 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
253 static int m16_check_op (rtx, int, int, int);
254 static bool mips_rtx_costs (rtx, int, int, int *);
255 static int mips_address_cost (rtx);
256 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
257 static void mips_load_call_address (rtx, rtx, int);
258 static bool mips_function_ok_for_sibcall (tree, tree);
259 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
260 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
261 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
262 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
263 tree, int, struct mips_arg_info *);
264 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
265 static void mips_set_architecture (const struct mips_cpu_info *);
266 static void mips_set_tune (const struct mips_cpu_info *);
267 static bool mips_handle_option (size_t, const char *, int);
268 static struct machine_function *mips_init_machine_status (void);
269 static void print_operand_reloc (FILE *, rtx, const char **);
271 static void irix_output_external_libcall (rtx);
273 static void mips_file_start (void);
274 static void mips_file_end (void);
275 static bool mips_rewrite_small_data_p (rtx);
276 static int mips_small_data_pattern_1 (rtx *, void *);
277 static int mips_rewrite_small_data_1 (rtx *, void *);
278 static bool mips_function_has_gp_insn (void);
279 static unsigned int mips_global_pointer (void);
280 static bool mips_save_reg_p (unsigned int);
281 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
282 mips_save_restore_fn);
283 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
284 static void mips_output_cplocal (void);
285 static void mips_emit_loadgp (void);
286 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
287 static void mips_set_frame_expr (rtx);
288 static rtx mips_frame_set (rtx, rtx);
289 static void mips_save_reg (rtx, rtx);
290 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
291 static void mips_restore_reg (rtx, rtx);
292 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
293 HOST_WIDE_INT, tree);
294 static int symbolic_expression_p (rtx);
295 static void mips_select_rtx_section (enum machine_mode, rtx,
296 unsigned HOST_WIDE_INT);
297 static void mips_function_rodata_section (tree);
298 static bool mips_in_small_data_p (tree);
299 static int mips_fpr_return_fields (tree, tree *);
300 static bool mips_return_in_msb (tree);
301 static rtx mips_return_fpr_pair (enum machine_mode mode,
302 enum machine_mode mode1, HOST_WIDE_INT,
303 enum machine_mode mode2, HOST_WIDE_INT);
304 static rtx mips16_gp_pseudo_reg (void);
305 static void mips16_fp_args (FILE *, int, int);
306 static void build_mips16_function_stub (FILE *);
307 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
308 static void dump_constants (struct mips16_constant *, rtx);
309 static int mips16_insn_length (rtx);
310 static int mips16_rewrite_pool_refs (rtx *, void *);
311 static void mips16_lay_out_constants (void);
312 static void mips_sim_reset (struct mips_sim *);
313 static void mips_sim_init (struct mips_sim *, state_t);
314 static void mips_sim_next_cycle (struct mips_sim *);
315 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
316 static int mips_sim_wait_regs_2 (rtx *, void *);
317 static void mips_sim_wait_regs_1 (rtx *, void *);
318 static void mips_sim_wait_regs (struct mips_sim *, rtx);
319 static void mips_sim_wait_units (struct mips_sim *, rtx);
320 static void mips_sim_wait_insn (struct mips_sim *, rtx);
321 static void mips_sim_record_set (rtx, rtx, void *);
322 static void mips_sim_issue_insn (struct mips_sim *, rtx);
323 static void mips_sim_issue_nop (struct mips_sim *);
324 static void mips_sim_finish_insn (struct mips_sim *, rtx);
325 static void vr4130_avoid_branch_rt_conflict (rtx);
326 static void vr4130_align_insns (void);
327 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
328 static void mips_avoid_hazards (void);
329 static void mips_reorg (void);
330 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
331 static bool mips_matching_cpu_name_p (const char *, const char *);
332 static const struct mips_cpu_info *mips_parse_cpu (const char *);
333 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
334 static bool mips_return_in_memory (tree, tree);
335 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
336 static void mips_macc_chains_record (rtx);
337 static void mips_macc_chains_reorder (rtx *, int);
338 static void vr4130_true_reg_dependence_p_1 (rtx, rtx, void *);
339 static bool vr4130_true_reg_dependence_p (rtx);
340 static bool vr4130_swap_insns_p (rtx, rtx);
341 static void vr4130_reorder (rtx *, int);
342 static void mips_promote_ready (rtx *, int, int);
343 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
344 static int mips_variable_issue (FILE *, int, rtx, int);
345 static int mips_adjust_cost (rtx, rtx, rtx, int);
346 static int mips_issue_rate (void);
347 static int mips_multipass_dfa_lookahead (void);
348 static void mips_init_libfuncs (void);
349 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
351 static tree mips_build_builtin_va_list (void);
352 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
353 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
355 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
357 static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
359 static bool mips_valid_pointer_mode (enum machine_mode);
360 static bool mips_vector_mode_supported_p (enum machine_mode);
361 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree *);
362 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
363 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
364 static void mips_init_builtins (void);
365 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree);
366 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
367 enum insn_code, enum mips_fp_condition,
369 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
370 enum insn_code, enum mips_fp_condition,
373 /* Structure to be filled in by compute_frame_size with register
374 save masks, and offsets for the current function. */
376 struct mips_frame_info GTY(())
378 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
379 HOST_WIDE_INT var_size; /* # bytes that variables take up */
380 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
381 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
382 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
383 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
384 unsigned int mask; /* mask of saved gp registers */
385 unsigned int fmask; /* mask of saved fp registers */
386 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
387 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
388 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
389 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
390 bool initialized; /* true if frame size already calculated */
391 int num_gp; /* number of gp registers saved */
392 int num_fp; /* number of fp registers saved */
395 struct machine_function GTY(()) {
396 /* Pseudo-reg holding the value of $28 in a mips16 function which
397 refers to GP relative global variables. */
398 rtx mips16_gp_pseudo_rtx;
400 /* Current frame information, calculated by compute_frame_size. */
401 struct mips_frame_info frame;
403 /* The register to use as the global pointer within this function. */
404 unsigned int global_pointer;
406 /* True if mips_adjust_insn_length should ignore an instruction's
408 bool ignore_hazard_length_p;
410 /* True if the whole function is suitable for .set noreorder and
412 bool all_noreorder_p;
414 /* True if the function is known to have an instruction that needs $gp. */
418 /* Information about a single argument. */
421 /* True if the argument is passed in a floating-point register, or
422 would have been if we hadn't run out of registers. */
425 /* The number of words passed in registers, rounded up. */
426 unsigned int reg_words;
428 /* For EABI, the offset of the first register from GP_ARG_FIRST or
429 FP_ARG_FIRST. For other ABIs, the offset of the first register from
430 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
431 comment for details).
433 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
435 unsigned int reg_offset;
437 /* The number of words that must be passed on the stack, rounded up. */
438 unsigned int stack_words;
440 /* The offset from the start of the stack overflow area of the argument's
441 first stack word. Only meaningful when STACK_WORDS is nonzero. */
442 unsigned int stack_offset;
446 /* Information about an address described by mips_address_type.
452 REG is the base register and OFFSET is the constant offset.
455 REG is the register that contains the high part of the address,
456 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
457 is the type of OFFSET's symbol.
460 SYMBOL_TYPE is the type of symbol being referenced. */
462 struct mips_address_info
464 enum mips_address_type type;
467 enum mips_symbol_type symbol_type;
471 /* One stage in a constant building sequence. These sequences have
475 A = A CODE[1] VALUE[1]
476 A = A CODE[2] VALUE[2]
479 where A is an accumulator, each CODE[i] is a binary rtl operation
480 and each VALUE[i] is a constant integer. */
481 struct mips_integer_op {
483 unsigned HOST_WIDE_INT value;
487 /* The largest number of operations needed to load an integer constant.
488 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
489 When the lowest bit is clear, we can try, but reject a sequence with
490 an extra SLL at the end. */
491 #define MIPS_MAX_INTEGER_OPS 7
494 /* Global variables for machine-dependent things. */
496 /* Threshold for data being put into the small data/bss area, instead
497 of the normal data area. */
498 int mips_section_threshold = -1;
500 /* Count the number of .file directives, so that .loc is up to date. */
501 int num_source_filenames = 0;
503 /* Count the number of sdb related labels are generated (to find block
504 start and end boundaries). */
505 int sdb_label_count = 0;
507 /* Next label # for each statement for Silicon Graphics IRIS systems. */
510 /* Linked list of all externals that are to be emitted when optimizing
511 for the global pointer if they haven't been declared by the end of
512 the program with an appropriate .comm or initialization. */
514 struct extern_list GTY (())
516 struct extern_list *next; /* next external */
517 const char *name; /* name of the external */
518 int size; /* size in bytes */
521 static GTY (()) struct extern_list *extern_head = 0;
523 /* Name of the file containing the current function. */
524 const char *current_function_file = "";
526 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
532 /* The next branch instruction is a branch likely, not branch normal. */
533 int mips_branch_likely;
535 /* The operands passed to the last cmpMM expander. */
538 /* The target cpu for code generation. */
539 enum processor_type mips_arch;
540 const struct mips_cpu_info *mips_arch_info;
542 /* The target cpu for optimization and scheduling. */
543 enum processor_type mips_tune;
544 const struct mips_cpu_info *mips_tune_info;
546 /* Which instruction set architecture to use. */
549 /* Which ABI to use. */
550 int mips_abi = MIPS_ABI_DEFAULT;
552 /* Whether we are generating mips16 hard float code. In mips16 mode
553 we always set TARGET_SOFT_FLOAT; this variable is nonzero if
554 -msoft-float was not specified by the user, which means that we
555 should arrange to call mips32 hard floating point code. */
556 int mips16_hard_float;
558 /* The arguments passed to -march and -mtune. */
559 static const char *mips_arch_string;
560 static const char *mips_tune_string;
562 /* The architecture selected by -mipsN. */
563 static const struct mips_cpu_info *mips_isa_info;
565 const char *mips_cache_flush_func = CACHE_FLUSH_FUNC;
567 /* If TRUE, we split addresses into their high and low parts in the RTL. */
568 int mips_split_addresses;
570 /* Mode used for saving/restoring general purpose registers. */
571 static enum machine_mode gpr_mode;
573 /* Array giving truth value on whether or not a given hard register
574 can support a given mode. */
575 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
577 /* List of all MIPS punctuation characters used by print_operand. */
578 char mips_print_operand_punct[256];
580 /* Map GCC register number to debugger register number. */
581 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
583 /* A copy of the original flag_delayed_branch: see override_options. */
584 static int mips_flag_delayed_branch;
586 static GTY (()) int mips_output_filename_first_time = 1;
588 /* mips_split_p[X] is true if symbols of type X can be split by
589 mips_split_symbol(). */
590 static bool mips_split_p[NUM_SYMBOL_TYPES];
592 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
593 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
594 if they are matched by a special .md file pattern. */
595 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
597 /* Likewise for HIGHs. */
598 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
600 /* Map hard register number to register class */
601 const enum reg_class mips_regno_to_class[] =
603 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
604 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
605 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
606 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
607 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
608 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
609 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
610 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
611 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
612 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
613 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
614 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
615 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
616 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
617 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
618 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
619 HI_REG, LO_REG, NO_REGS, ST_REGS,
620 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
621 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
622 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
623 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
624 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
625 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
626 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
627 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
628 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
629 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
630 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
631 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
632 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
633 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
634 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
635 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
636 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
637 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
638 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
639 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
640 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
641 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
642 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
643 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
644 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
645 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
646 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS
649 /* Map register constraint character to register class. */
650 enum reg_class mips_char_to_class[256];
652 /* A table describing all the processors gcc knows about. Names are
653 matched in the order listed. The first mention of an ISA level is
654 taken as the canonical name for that ISA.
656 To ease comparison, please keep this table in the same order as
657 gas's mips_cpu_info_table[]. */
658 const struct mips_cpu_info mips_cpu_info_table[] = {
659 /* Entries for generic ISAs */
660 { "mips1", PROCESSOR_R3000, 1 },
661 { "mips2", PROCESSOR_R6000, 2 },
662 { "mips3", PROCESSOR_R4000, 3 },
663 { "mips4", PROCESSOR_R8000, 4 },
664 { "mips32", PROCESSOR_4KC, 32 },
665 { "mips32r2", PROCESSOR_M4K, 33 },
666 { "mips64", PROCESSOR_5KC, 64 },
669 { "r3000", PROCESSOR_R3000, 1 },
670 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
671 { "r3900", PROCESSOR_R3900, 1 },
674 { "r6000", PROCESSOR_R6000, 2 },
677 { "r4000", PROCESSOR_R4000, 3 },
678 { "vr4100", PROCESSOR_R4100, 3 },
679 { "vr4111", PROCESSOR_R4111, 3 },
680 { "vr4120", PROCESSOR_R4120, 3 },
681 { "vr4130", PROCESSOR_R4130, 3 },
682 { "vr4300", PROCESSOR_R4300, 3 },
683 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
684 { "r4600", PROCESSOR_R4600, 3 },
685 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
686 { "r4650", PROCESSOR_R4650, 3 },
689 { "r8000", PROCESSOR_R8000, 4 },
690 { "vr5000", PROCESSOR_R5000, 4 },
691 { "vr5400", PROCESSOR_R5400, 4 },
692 { "vr5500", PROCESSOR_R5500, 4 },
693 { "rm7000", PROCESSOR_R7000, 4 },
694 { "rm9000", PROCESSOR_R9000, 4 },
697 { "4kc", PROCESSOR_4KC, 32 },
698 { "4km", PROCESSOR_4KC, 32 }, /* = 4kc */
699 { "4kp", PROCESSOR_4KP, 32 },
701 /* MIPS32 Release 2 */
702 { "m4k", PROCESSOR_M4K, 33 },
703 { "24k", PROCESSOR_24K, 33 },
704 { "24kc", PROCESSOR_24K, 33 }, /* 24K no FPU */
705 { "24kf", PROCESSOR_24K, 33 }, /* 24K 1:2 FPU */
706 { "24kx", PROCESSOR_24KX, 33 }, /* 24K 1:1 FPU */
709 { "5kc", PROCESSOR_5KC, 64 },
710 { "20kc", PROCESSOR_20KC, 64 },
711 { "sb1", PROCESSOR_SB1, 64 },
712 { "sr71000", PROCESSOR_SR71000, 64 },
718 /* Nonzero if -march should decide the default value of MASK_SOFT_FLOAT. */
719 #ifndef MIPS_MARCH_CONTROLS_SOFT_FLOAT
720 #define MIPS_MARCH_CONTROLS_SOFT_FLOAT 0
723 /* Initialize the GCC target structure. */
724 #undef TARGET_ASM_ALIGNED_HI_OP
725 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
726 #undef TARGET_ASM_ALIGNED_SI_OP
727 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
728 #undef TARGET_ASM_ALIGNED_DI_OP
729 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
731 #undef TARGET_ASM_FUNCTION_PROLOGUE
732 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
733 #undef TARGET_ASM_FUNCTION_EPILOGUE
734 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
735 #undef TARGET_ASM_SELECT_RTX_SECTION
736 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
737 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
738 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
740 #undef TARGET_SCHED_REORDER
741 #define TARGET_SCHED_REORDER mips_sched_reorder
742 #undef TARGET_SCHED_VARIABLE_ISSUE
743 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
744 #undef TARGET_SCHED_ADJUST_COST
745 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
746 #undef TARGET_SCHED_ISSUE_RATE
747 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
748 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
749 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
750 mips_multipass_dfa_lookahead
752 #undef TARGET_DEFAULT_TARGET_FLAGS
753 #define TARGET_DEFAULT_TARGET_FLAGS \
755 | TARGET_CPU_DEFAULT \
756 | TARGET_ENDIAN_DEFAULT \
757 | TARGET_FP_EXCEPTIONS_DEFAULT \
758 | MASK_CHECK_ZERO_DIV \
760 #undef TARGET_HANDLE_OPTION
761 #define TARGET_HANDLE_OPTION mips_handle_option
763 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
764 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
766 #undef TARGET_VALID_POINTER_MODE
767 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
768 #undef TARGET_RTX_COSTS
769 #define TARGET_RTX_COSTS mips_rtx_costs
770 #undef TARGET_ADDRESS_COST
771 #define TARGET_ADDRESS_COST mips_address_cost
773 #undef TARGET_IN_SMALL_DATA_P
774 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
776 #undef TARGET_MACHINE_DEPENDENT_REORG
777 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
779 #undef TARGET_ASM_FILE_START
780 #undef TARGET_ASM_FILE_END
781 #define TARGET_ASM_FILE_START mips_file_start
782 #define TARGET_ASM_FILE_END mips_file_end
783 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
784 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
786 #undef TARGET_INIT_LIBFUNCS
787 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
789 #undef TARGET_BUILD_BUILTIN_VA_LIST
790 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
791 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
792 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
794 #undef TARGET_PROMOTE_FUNCTION_ARGS
795 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
796 #undef TARGET_PROMOTE_FUNCTION_RETURN
797 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
798 #undef TARGET_PROMOTE_PROTOTYPES
799 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
801 #undef TARGET_RETURN_IN_MEMORY
802 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
803 #undef TARGET_RETURN_IN_MSB
804 #define TARGET_RETURN_IN_MSB mips_return_in_msb
806 #undef TARGET_ASM_OUTPUT_MI_THUNK
807 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
808 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
809 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
811 #undef TARGET_SETUP_INCOMING_VARARGS
812 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
813 #undef TARGET_STRICT_ARGUMENT_NAMING
814 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
815 #undef TARGET_MUST_PASS_IN_STACK
816 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
817 #undef TARGET_PASS_BY_REFERENCE
818 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
819 #undef TARGET_CALLEE_COPIES
820 #define TARGET_CALLEE_COPIES mips_callee_copies
821 #undef TARGET_ARG_PARTIAL_BYTES
822 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
824 #undef TARGET_VECTOR_MODE_SUPPORTED_P
825 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
827 #undef TARGET_INIT_BUILTINS
828 #define TARGET_INIT_BUILTINS mips_init_builtins
829 #undef TARGET_EXPAND_BUILTIN
830 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
832 #undef TARGET_HAVE_TLS
833 #define TARGET_HAVE_TLS HAVE_AS_TLS
835 #undef TARGET_CANNOT_FORCE_CONST_MEM
836 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
838 struct gcc_target targetm = TARGET_INITIALIZER;
840 /* Classify symbol X, which must be a SYMBOL_REF or a LABEL_REF. */
842 static enum mips_symbol_type
843 mips_classify_symbol (rtx x)
845 if (GET_CODE (x) == LABEL_REF)
848 return SYMBOL_CONSTANT_POOL;
850 return SYMBOL_GOT_LOCAL;
851 return SYMBOL_GENERAL;
854 gcc_assert (GET_CODE (x) == SYMBOL_REF);
856 if (SYMBOL_REF_TLS_MODEL (x))
859 if (CONSTANT_POOL_ADDRESS_P (x))
862 return SYMBOL_CONSTANT_POOL;
865 return SYMBOL_GOT_LOCAL;
867 if (GET_MODE_SIZE (get_pool_mode (x)) <= mips_section_threshold)
868 return SYMBOL_SMALL_DATA;
870 return SYMBOL_GENERAL;
873 if (SYMBOL_REF_SMALL_P (x))
874 return SYMBOL_SMALL_DATA;
878 if (SYMBOL_REF_DECL (x) == 0)
879 return SYMBOL_REF_LOCAL_P (x) ? SYMBOL_GOT_LOCAL : SYMBOL_GOT_GLOBAL;
881 /* There are three cases to consider:
883 - o32 PIC (either with or without explicit relocs)
884 - n32/n64 PIC without explicit relocs
885 - n32/n64 PIC with explicit relocs
887 In the first case, both local and global accesses will use an
888 R_MIPS_GOT16 relocation. We must correctly predict which of
889 the two semantics (local or global) the assembler and linker
890 will apply. The choice doesn't depend on the symbol's
891 visibility, so we deliberately ignore decl_visibility and
894 In the second case, the assembler will not use R_MIPS_GOT16
895 relocations, but it chooses between local and global accesses
896 in the same way as for o32 PIC.
898 In the third case we have more freedom since both forms of
899 access will work for any kind of symbol. However, there seems
900 little point in doing things differently. */
901 if (DECL_P (SYMBOL_REF_DECL (x)) && TREE_PUBLIC (SYMBOL_REF_DECL (x)))
902 return SYMBOL_GOT_GLOBAL;
904 return SYMBOL_GOT_LOCAL;
907 return SYMBOL_GENERAL;
911 /* Split X into a base and a constant offset, storing them in *BASE
912 and *OFFSET respectively. */
915 mips_split_const (rtx x, rtx *base, HOST_WIDE_INT *offset)
919 if (GET_CODE (x) == CONST)
922 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
924 *offset += INTVAL (XEXP (x, 1));
931 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
932 to the same object as SYMBOL. */
935 mips_offset_within_object_p (rtx symbol, HOST_WIDE_INT offset)
937 if (GET_CODE (symbol) != SYMBOL_REF)
940 if (CONSTANT_POOL_ADDRESS_P (symbol)
942 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
945 if (SYMBOL_REF_DECL (symbol) != 0
947 && offset < int_size_in_bytes (TREE_TYPE (SYMBOL_REF_DECL (symbol))))
954 /* Return true if X is a symbolic constant that can be calculated in
955 the same way as a bare symbol. If it is, store the type of the
956 symbol in *SYMBOL_TYPE. */
959 mips_symbolic_constant_p (rtx x, enum mips_symbol_type *symbol_type)
961 HOST_WIDE_INT offset;
963 mips_split_const (x, &x, &offset);
964 if (UNSPEC_ADDRESS_P (x))
965 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
966 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
968 *symbol_type = mips_classify_symbol (x);
969 if (*symbol_type == SYMBOL_TLS)
978 /* Check whether a nonzero offset is valid for the underlying
980 switch (*symbol_type)
986 /* If the target has 64-bit pointers and the object file only
987 supports 32-bit symbols, the values of those symbols will be
988 sign-extended. In this case we can't allow an arbitrary offset
989 in case the 32-bit value X + OFFSET has a different sign from X. */
990 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
991 return mips_offset_within_object_p (x, offset);
993 /* In other cases the relocations can handle any offset. */
996 case SYMBOL_CONSTANT_POOL:
997 /* Allow constant pool references to be converted to LABEL+CONSTANT.
998 In this case, we no longer have access to the underlying constant,
999 but the original symbol-based access was known to be valid. */
1000 if (GET_CODE (x) == LABEL_REF)
1005 case SYMBOL_SMALL_DATA:
1006 /* Make sure that the offset refers to something within the
1007 underlying object. This should guarantee that the final
1008 PC- or GP-relative offset is within the 16-bit limit. */
1009 return mips_offset_within_object_p (x, offset);
1011 case SYMBOL_GOT_LOCAL:
1012 case SYMBOL_GOTOFF_PAGE:
1013 /* The linker should provide enough local GOT entries for a
1014 16-bit offset. Larger offsets may lead to GOT overflow. */
1015 return SMALL_OPERAND (offset);
1017 case SYMBOL_GOT_GLOBAL:
1018 case SYMBOL_GOTOFF_GLOBAL:
1019 case SYMBOL_GOTOFF_CALL:
1020 case SYMBOL_GOTOFF_LOADGP:
1025 case SYMBOL_GOTTPREL:
1033 /* Return true if X is a symbolic constant whose value is not split
1034 into separate relocations. */
1037 mips_atomic_symbolic_constant_p (rtx x)
1039 enum mips_symbol_type type;
1040 return mips_symbolic_constant_p (x, &type) && !mips_split_p[type];
1044 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1047 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1049 if (regno >= FIRST_PSEUDO_REGISTER)
1053 regno = reg_renumber[regno];
1056 /* These fake registers will be eliminated to either the stack or
1057 hard frame pointer, both of which are usually valid base registers.
1058 Reload deals with the cases where the eliminated form isn't valid. */
1059 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1062 /* In mips16 mode, the stack pointer can only address word and doubleword
1063 values, nothing smaller. There are two problems here:
1065 (a) Instantiating virtual registers can introduce new uses of the
1066 stack pointer. If these virtual registers are valid addresses,
1067 the stack pointer should be too.
1069 (b) Most uses of the stack pointer are not made explicit until
1070 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1071 We don't know until that stage whether we'll be eliminating to the
1072 stack pointer (which needs the restriction) or the hard frame
1073 pointer (which doesn't).
1075 All in all, it seems more consistent to only enforce this restriction
1076 during and after reload. */
1077 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1078 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1080 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1084 /* Return true if X is a valid base register for the given mode.
1085 Allow only hard registers if STRICT. */
1088 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1090 if (!strict && GET_CODE (x) == SUBREG)
1094 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1098 /* Return true if symbols of type SYMBOL_TYPE can directly address a value
1099 with mode MODE. This is used for both symbolic and LO_SUM addresses. */
1102 mips_symbolic_address_p (enum mips_symbol_type symbol_type,
1103 enum machine_mode mode)
1105 switch (symbol_type)
1107 case SYMBOL_GENERAL:
1108 return !TARGET_MIPS16;
1110 case SYMBOL_SMALL_DATA:
1113 case SYMBOL_CONSTANT_POOL:
1114 /* PC-relative addressing is only available for lw and ld. */
1115 return GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1117 case SYMBOL_GOT_LOCAL:
1120 case SYMBOL_GOT_GLOBAL:
1121 /* The address will have to be loaded from the GOT first. */
1128 case SYMBOL_GOTTPREL:
1132 case SYMBOL_GOTOFF_PAGE:
1133 case SYMBOL_GOTOFF_GLOBAL:
1134 case SYMBOL_GOTOFF_CALL:
1135 case SYMBOL_GOTOFF_LOADGP:
1136 case SYMBOL_64_HIGH:
1145 /* Return true if X is a valid address for machine mode MODE. If it is,
1146 fill in INFO appropriately. STRICT is true if we should only accept
1147 hard base registers. */
1150 mips_classify_address (struct mips_address_info *info, rtx x,
1151 enum machine_mode mode, int strict)
1153 switch (GET_CODE (x))
1157 info->type = ADDRESS_REG;
1159 info->offset = const0_rtx;
1160 return mips_valid_base_register_p (info->reg, mode, strict);
1163 info->type = ADDRESS_REG;
1164 info->reg = XEXP (x, 0);
1165 info->offset = XEXP (x, 1);
1166 return (mips_valid_base_register_p (info->reg, mode, strict)
1167 && const_arith_operand (info->offset, VOIDmode));
1170 info->type = ADDRESS_LO_SUM;
1171 info->reg = XEXP (x, 0);
1172 info->offset = XEXP (x, 1);
1173 return (mips_valid_base_register_p (info->reg, mode, strict)
1174 && mips_symbolic_constant_p (info->offset, &info->symbol_type)
1175 && mips_symbolic_address_p (info->symbol_type, mode)
1176 && mips_lo_relocs[info->symbol_type] != 0);
1179 /* Small-integer addresses don't occur very often, but they
1180 are legitimate if $0 is a valid base register. */
1181 info->type = ADDRESS_CONST_INT;
1182 return !TARGET_MIPS16 && SMALL_INT (x);
1187 info->type = ADDRESS_SYMBOLIC;
1188 return (mips_symbolic_constant_p (x, &info->symbol_type)
1189 && mips_symbolic_address_p (info->symbol_type, mode)
1190 && !mips_split_p[info->symbol_type]);
1197 /* Return true if X is a thread-local symbol. */
1200 mips_tls_operand_p (rtx x)
1202 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1205 /* Return true if X can not be forced into a constant pool. */
1208 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1210 return mips_tls_operand_p (*x);
1213 /* Return true if X can not be forced into a constant pool. */
1216 mips_cannot_force_const_mem (rtx x)
1218 if (! TARGET_HAVE_TLS)
1221 return for_each_rtx (&x, &mips_tls_symbol_ref_1, 0);
1224 /* Return the number of instructions needed to load a symbol of the
1225 given type into a register. If valid in an address, the same number
1226 of instructions are needed for loads and stores. Treat extended
1227 mips16 instructions as two instructions. */
1230 mips_symbol_insns (enum mips_symbol_type type)
1234 case SYMBOL_GENERAL:
1235 /* In mips16 code, general symbols must be fetched from the
1240 /* When using 64-bit symbols, we need 5 preparatory instructions,
1243 lui $at,%highest(symbol)
1244 daddiu $at,$at,%higher(symbol)
1246 daddiu $at,$at,%hi(symbol)
1249 The final address is then $at + %lo(symbol). With 32-bit
1250 symbols we just need a preparatory lui. */
1251 return (ABI_HAS_64BIT_SYMBOLS ? 6 : 2);
1253 case SYMBOL_SMALL_DATA:
1256 case SYMBOL_CONSTANT_POOL:
1257 /* This case is for mips16 only. Assume we'll need an
1258 extended instruction. */
1261 case SYMBOL_GOT_LOCAL:
1262 case SYMBOL_GOT_GLOBAL:
1263 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1264 the local/global classification is accurate. See override_options
1267 The worst cases are:
1269 (1) For local symbols when generating o32 or o64 code. The assembler
1275 ...and the final address will be $at + %lo(symbol).
1277 (2) For global symbols when -mxgot. The assembler will use:
1279 lui $at,%got_hi(symbol)
1282 ...and the final address will be $at + %got_lo(symbol). */
1285 case SYMBOL_GOTOFF_PAGE:
1286 case SYMBOL_GOTOFF_GLOBAL:
1287 case SYMBOL_GOTOFF_CALL:
1288 case SYMBOL_GOTOFF_LOADGP:
1289 case SYMBOL_64_HIGH:
1295 case SYMBOL_GOTTPREL:
1297 /* Check whether the offset is a 16- or 32-bit value. */
1298 return mips_split_p[type] ? 2 : 1;
1301 /* We don't treat a bare TLS symbol as a constant. */
1307 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1310 mips_stack_address_p (rtx x, enum machine_mode mode)
1312 struct mips_address_info addr;
1314 return (mips_classify_address (&addr, x, mode, false)
1315 && addr.type == ADDRESS_REG
1316 && addr.reg == stack_pointer_rtx);
1319 /* Return true if a value at OFFSET bytes from BASE can be accessed
1320 using an unextended mips16 instruction. MODE is the mode of the
1323 Usually the offset in an unextended instruction is a 5-bit field.
1324 The offset is unsigned and shifted left once for HIs, twice
1325 for SIs, and so on. An exception is SImode accesses off the
1326 stack pointer, which have an 8-bit immediate field. */
1329 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
1332 && GET_CODE (offset) == CONST_INT
1333 && INTVAL (offset) >= 0
1334 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
1336 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1337 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
1338 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
1344 /* Return the number of instructions needed to load or store a value
1345 of mode MODE at X. Return 0 if X isn't valid for MODE.
1347 For mips16 code, count extended instructions as two instructions. */
1350 mips_address_insns (rtx x, enum machine_mode mode)
1352 struct mips_address_info addr;
1355 if (mode == BLKmode)
1356 /* BLKmode is used for single unaligned loads and stores. */
1359 /* Each word of a multi-word value will be accessed individually. */
1360 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1362 if (mips_classify_address (&addr, x, mode, false))
1367 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
1371 case ADDRESS_LO_SUM:
1372 return (TARGET_MIPS16 ? factor * 2 : factor);
1374 case ADDRESS_CONST_INT:
1377 case ADDRESS_SYMBOLIC:
1378 return factor * mips_symbol_insns (addr.symbol_type);
1384 /* Likewise for constant X. */
1387 mips_const_insns (rtx x)
1389 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
1390 enum mips_symbol_type symbol_type;
1391 HOST_WIDE_INT offset;
1393 switch (GET_CODE (x))
1397 || !mips_symbolic_constant_p (XEXP (x, 0), &symbol_type)
1398 || !mips_split_p[symbol_type])
1405 /* Unsigned 8-bit constants can be loaded using an unextended
1406 LI instruction. Unsigned 16-bit constants can be loaded
1407 using an extended LI. Negative constants must be loaded
1408 using LI and then negated. */
1409 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
1410 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
1411 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
1412 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
1415 return mips_build_integer (codes, INTVAL (x));
1419 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
1425 /* See if we can refer to X directly. */
1426 if (mips_symbolic_constant_p (x, &symbol_type))
1427 return mips_symbol_insns (symbol_type);
1429 /* Otherwise try splitting the constant into a base and offset.
1430 16-bit offsets can be added using an extra addiu. Larger offsets
1431 must be calculated separately and then added to the base. */
1432 mips_split_const (x, &x, &offset);
1435 int n = mips_const_insns (x);
1438 if (SMALL_OPERAND (offset))
1441 return n + 1 + mips_build_integer (codes, offset);
1448 return mips_symbol_insns (mips_classify_symbol (x));
1456 /* Return the number of instructions needed for memory reference X.
1457 Count extended mips16 instructions as two instructions. */
1460 mips_fetch_insns (rtx x)
1462 gcc_assert (MEM_P (x));
1463 return mips_address_insns (XEXP (x, 0), GET_MODE (x));
1467 /* Return the number of instructions needed for an integer division. */
1470 mips_idiv_insns (void)
1475 if (TARGET_CHECK_ZERO_DIV)
1477 if (GENERATE_DIVIDE_TRAPS)
1483 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
1488 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
1489 returns a nonzero value if X is a legitimate address for a memory
1490 operand of the indicated MODE. STRICT is nonzero if this function
1491 is called during reload. */
1494 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1496 struct mips_address_info addr;
1498 return mips_classify_address (&addr, x, mode, strict);
1502 /* Copy VALUE to a register and return that register. If new psuedos
1503 are allowed, copy it into a new register, otherwise use DEST. */
1506 mips_force_temporary (rtx dest, rtx value)
1508 if (!no_new_pseudos)
1509 return force_reg (Pmode, value);
1512 emit_move_insn (copy_rtx (dest), value);
1518 /* Return a LO_SUM expression for ADDR. TEMP is as for mips_force_temporary
1519 and is used to load the high part into a register. */
1522 mips_split_symbol (rtx temp, rtx addr)
1527 high = mips16_gp_pseudo_reg ();
1529 high = mips_force_temporary (temp, gen_rtx_HIGH (Pmode, copy_rtx (addr)));
1530 return gen_rtx_LO_SUM (Pmode, high, addr);
1534 /* Return an UNSPEC address with underlying address ADDRESS and symbol
1535 type SYMBOL_TYPE. */
1538 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
1541 HOST_WIDE_INT offset;
1543 mips_split_const (address, &base, &offset);
1544 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
1545 UNSPEC_ADDRESS_FIRST + symbol_type);
1546 return plus_constant (gen_rtx_CONST (Pmode, base), offset);
1550 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
1551 high part to BASE and return the result. Just return BASE otherwise.
1552 TEMP is available as a temporary register if needed.
1554 The returned expression can be used as the first operand to a LO_SUM. */
1557 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
1558 enum mips_symbol_type symbol_type)
1560 if (mips_split_p[symbol_type])
1562 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
1563 addr = mips_force_temporary (temp, addr);
1564 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
1570 /* Return a legitimate address for REG + OFFSET. TEMP is as for
1571 mips_force_temporary; it is only needed when OFFSET is not a
1575 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
1577 if (!SMALL_OPERAND (offset))
1582 /* Load the full offset into a register so that we can use
1583 an unextended instruction for the address itself. */
1584 high = GEN_INT (offset);
1589 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
1590 high = GEN_INT (CONST_HIGH_PART (offset));
1591 offset = CONST_LOW_PART (offset);
1593 high = mips_force_temporary (temp, high);
1594 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
1596 return plus_constant (reg, offset);
1599 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
1600 referencing, and TYPE is the symbol type to use (either global
1601 dynamic or local dynamic). V0 is an RTX for the return value
1602 location. The entire insn sequence is returned. */
1604 static GTY(()) rtx mips_tls_symbol;
1607 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
1609 rtx insn, loc, tga, a0;
1611 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
1613 if (!mips_tls_symbol)
1614 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
1616 loc = mips_unspec_address (sym, type);
1620 emit_insn (gen_rtx_SET (Pmode, a0,
1621 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
1622 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
1623 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
1624 CONST_OR_PURE_CALL_P (insn) = 1;
1625 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
1626 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
1627 insn = get_insns ();
1634 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
1635 return value will be a valid address and move_operand (either a REG
1639 mips_legitimize_tls_address (rtx loc)
1641 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
1642 enum tls_model model;
1644 v0 = gen_rtx_REG (Pmode, GP_RETURN);
1645 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
1647 model = SYMBOL_REF_TLS_MODEL (loc);
1651 case TLS_MODEL_GLOBAL_DYNAMIC:
1652 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
1653 dest = gen_reg_rtx (Pmode);
1654 emit_libcall_block (insn, dest, v0, loc);
1657 case TLS_MODEL_LOCAL_DYNAMIC:
1658 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
1659 tmp1 = gen_reg_rtx (Pmode);
1661 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
1662 share the LDM result with other LD model accesses. */
1663 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
1665 emit_libcall_block (insn, tmp1, v0, eqv);
1667 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
1668 dest = gen_rtx_LO_SUM (Pmode, tmp2,
1669 mips_unspec_address (loc, SYMBOL_DTPREL));
1672 case TLS_MODEL_INITIAL_EXEC:
1673 tmp1 = gen_reg_rtx (Pmode);
1674 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
1675 if (Pmode == DImode)
1677 emit_insn (gen_tls_get_tp_di (v1));
1678 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
1682 emit_insn (gen_tls_get_tp_si (v1));
1683 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
1685 dest = gen_reg_rtx (Pmode);
1686 emit_insn (gen_add3_insn (dest, tmp1, v1));
1689 case TLS_MODEL_LOCAL_EXEC:
1691 if (Pmode == DImode)
1692 emit_insn (gen_tls_get_tp_di (v1));
1694 emit_insn (gen_tls_get_tp_si (v1));
1696 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
1697 dest = gen_rtx_LO_SUM (Pmode, tmp1,
1698 mips_unspec_address (loc, SYMBOL_TPREL));
1708 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
1709 be legitimized in a way that the generic machinery might not expect,
1710 put the new address in *XLOC and return true. MODE is the mode of
1711 the memory being accessed. */
1714 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
1716 enum mips_symbol_type symbol_type;
1718 if (mips_tls_operand_p (*xloc))
1720 *xloc = mips_legitimize_tls_address (*xloc);
1724 /* See if the address can split into a high part and a LO_SUM. */
1725 if (mips_symbolic_constant_p (*xloc, &symbol_type)
1726 && mips_symbolic_address_p (symbol_type, mode)
1727 && mips_split_p[symbol_type])
1729 *xloc = mips_split_symbol (0, *xloc);
1733 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
1735 /* Handle REG + CONSTANT using mips_add_offset. */
1738 reg = XEXP (*xloc, 0);
1739 if (!mips_valid_base_register_p (reg, mode, 0))
1740 reg = copy_to_mode_reg (Pmode, reg);
1741 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
1749 /* Subroutine of mips_build_integer (with the same interface).
1750 Assume that the final action in the sequence should be a left shift. */
1753 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
1755 unsigned int i, shift;
1757 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
1758 since signed numbers are easier to load than unsigned ones. */
1760 while ((value & 1) == 0)
1761 value /= 2, shift++;
1763 i = mips_build_integer (codes, value);
1764 codes[i].code = ASHIFT;
1765 codes[i].value = shift;
1770 /* As for mips_build_shift, but assume that the final action will be
1771 an IOR or PLUS operation. */
1774 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
1776 unsigned HOST_WIDE_INT high;
1779 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
1780 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
1782 /* The constant is too complex to load with a simple lui/ori pair
1783 so our goal is to clear as many trailing zeros as possible.
1784 In this case, we know bit 16 is set and that the low 16 bits
1785 form a negative number. If we subtract that number from VALUE,
1786 we will clear at least the lowest 17 bits, maybe more. */
1787 i = mips_build_integer (codes, CONST_HIGH_PART (value));
1788 codes[i].code = PLUS;
1789 codes[i].value = CONST_LOW_PART (value);
1793 i = mips_build_integer (codes, high);
1794 codes[i].code = IOR;
1795 codes[i].value = value & 0xffff;
1801 /* Fill CODES with a sequence of rtl operations to load VALUE.
1802 Return the number of operations needed. */
1805 mips_build_integer (struct mips_integer_op *codes,
1806 unsigned HOST_WIDE_INT value)
1808 if (SMALL_OPERAND (value)
1809 || SMALL_OPERAND_UNSIGNED (value)
1810 || LUI_OPERAND (value))
1812 /* The value can be loaded with a single instruction. */
1813 codes[0].code = UNKNOWN;
1814 codes[0].value = value;
1817 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
1819 /* Either the constant is a simple LUI/ORI combination or its
1820 lowest bit is set. We don't want to shift in this case. */
1821 return mips_build_lower (codes, value);
1823 else if ((value & 0xffff) == 0)
1825 /* The constant will need at least three actions. The lowest
1826 16 bits are clear, so the final action will be a shift. */
1827 return mips_build_shift (codes, value);
1831 /* The final action could be a shift, add or inclusive OR.
1832 Rather than use a complex condition to select the best
1833 approach, try both mips_build_shift and mips_build_lower
1834 and pick the one that gives the shortest sequence.
1835 Note that this case is only used once per constant. */
1836 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
1837 unsigned int cost, alt_cost;
1839 cost = mips_build_shift (codes, value);
1840 alt_cost = mips_build_lower (alt_codes, value);
1841 if (alt_cost < cost)
1843 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
1851 /* Move VALUE into register DEST. */
1854 mips_move_integer (rtx dest, unsigned HOST_WIDE_INT value)
1856 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
1857 enum machine_mode mode;
1858 unsigned int i, cost;
1861 mode = GET_MODE (dest);
1862 cost = mips_build_integer (codes, value);
1864 /* Apply each binary operation to X. Invariant: X is a legitimate
1865 source operand for a SET pattern. */
1866 x = GEN_INT (codes[0].value);
1867 for (i = 1; i < cost; i++)
1870 emit_move_insn (dest, x), x = dest;
1872 x = force_reg (mode, x);
1873 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
1876 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
1880 /* Subroutine of mips_legitimize_move. Move constant SRC into register
1881 DEST given that SRC satisfies immediate_operand but doesn't satisfy
1885 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
1888 HOST_WIDE_INT offset;
1889 enum mips_symbol_type symbol_type;
1891 /* Split moves of big integers into smaller pieces. In mips16 code,
1892 it's better to force the constant into memory instead. */
1893 if (GET_CODE (src) == CONST_INT && !TARGET_MIPS16)
1895 mips_move_integer (dest, INTVAL (src));
1899 if (mips_tls_operand_p (src))
1901 emit_move_insn (dest, mips_legitimize_tls_address (src));
1905 /* See if the symbol can be split. For mips16, this is often worse than
1906 forcing it in the constant pool since it needs the single-register form
1907 of addiu or daddiu. */
1909 && mips_symbolic_constant_p (src, &symbol_type)
1910 && mips_split_p[symbol_type])
1912 emit_move_insn (dest, mips_split_symbol (dest, src));
1916 /* If we have (const (plus symbol offset)), load the symbol first
1917 and then add in the offset. This is usually better than forcing
1918 the constant into memory, at least in non-mips16 code. */
1919 mips_split_const (src, &base, &offset);
1922 && (!no_new_pseudos || SMALL_OPERAND (offset)))
1924 base = mips_force_temporary (dest, base);
1925 emit_move_insn (dest, mips_add_offset (0, base, offset));
1929 src = force_const_mem (mode, src);
1931 /* When using explicit relocs, constant pool references are sometimes
1932 not legitimate addresses. */
1933 if (!memory_operand (src, VOIDmode))
1934 src = replace_equiv_address (src, mips_split_symbol (dest, XEXP (src, 0)));
1935 emit_move_insn (dest, src);
1939 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
1940 sequence that is valid. */
1943 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
1945 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
1947 emit_move_insn (dest, force_reg (mode, src));
1951 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
1952 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
1953 && REG_P (src) && MD_REG_P (REGNO (src))
1954 && REG_P (dest) && GP_REG_P (REGNO (dest)))
1956 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
1957 if (GET_MODE_SIZE (mode) <= 4)
1958 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
1959 gen_rtx_REG (SImode, REGNO (src)),
1960 gen_rtx_REG (SImode, other_regno)));
1962 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
1963 gen_rtx_REG (DImode, REGNO (src)),
1964 gen_rtx_REG (DImode, other_regno)));
1968 /* We need to deal with constants that would be legitimate
1969 immediate_operands but not legitimate move_operands. */
1970 if (CONSTANT_P (src) && !move_operand (src, mode))
1972 mips_legitimize_const_move (mode, dest, src);
1973 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
1979 /* We need a lot of little routines to check constant values on the
1980 mips16. These are used to figure out how long the instruction will
1981 be. It would be much better to do this using constraints, but
1982 there aren't nearly enough letters available. */
1985 m16_check_op (rtx op, int low, int high, int mask)
1987 return (GET_CODE (op) == CONST_INT
1988 && INTVAL (op) >= low
1989 && INTVAL (op) <= high
1990 && (INTVAL (op) & mask) == 0);
1994 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
1996 return m16_check_op (op, 0x1, 0x8, 0);
2000 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2002 return m16_check_op (op, - 0x8, 0x7, 0);
2006 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2008 return m16_check_op (op, - 0x7, 0x8, 0);
2012 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2014 return m16_check_op (op, - 0x10, 0xf, 0);
2018 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2020 return m16_check_op (op, - 0xf, 0x10, 0);
2024 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2026 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2030 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2032 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2036 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2038 return m16_check_op (op, - 0x80, 0x7f, 0);
2042 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2044 return m16_check_op (op, - 0x7f, 0x80, 0);
2048 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2050 return m16_check_op (op, 0x0, 0xff, 0);
2054 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2056 return m16_check_op (op, - 0xff, 0x0, 0);
2060 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2062 return m16_check_op (op, - 0x1, 0xfe, 0);
2066 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2068 return m16_check_op (op, 0x0, 0xff << 2, 3);
2072 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2074 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2078 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2080 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2084 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2086 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2090 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
2092 enum machine_mode mode = GET_MODE (x);
2099 /* Always return 0, since we don't have different sized
2100 instructions, hence different costs according to Richard
2106 /* A number between 1 and 8 inclusive is efficient for a shift.
2107 Otherwise, we will need an extended instruction. */
2108 if ((outer_code) == ASHIFT || (outer_code) == ASHIFTRT
2109 || (outer_code) == LSHIFTRT)
2111 if (INTVAL (x) >= 1 && INTVAL (x) <= 8)
2114 *total = COSTS_N_INSNS (1);
2118 /* We can use cmpi for an xor with an unsigned 16 bit value. */
2119 if ((outer_code) == XOR
2120 && INTVAL (x) >= 0 && INTVAL (x) < 0x10000)
2126 /* We may be able to use slt or sltu for a comparison with a
2127 signed 16 bit value. (The boundary conditions aren't quite
2128 right, but this is just a heuristic anyhow.) */
2129 if (((outer_code) == LT || (outer_code) == LE
2130 || (outer_code) == GE || (outer_code) == GT
2131 || (outer_code) == LTU || (outer_code) == LEU
2132 || (outer_code) == GEU || (outer_code) == GTU)
2133 && INTVAL (x) >= -0x8000 && INTVAL (x) < 0x8000)
2139 /* Equality comparisons with 0 are cheap. */
2140 if (((outer_code) == EQ || (outer_code) == NE)
2147 /* Constants in the range 0...255 can be loaded with an unextended
2148 instruction. They are therefore as cheap as a register move.
2150 Given the choice between "li R1,0...255" and "move R1,R2"
2151 (where R2 is a known constant), it is usually better to use "li",
2152 since we do not want to unnecessarily extend the lifetime of R2. */
2153 if (outer_code == SET
2155 && INTVAL (x) < 256)
2161 /* Otherwise fall through to the handling below. */
2167 if (LEGITIMATE_CONSTANT_P (x))
2169 *total = COSTS_N_INSNS (1);
2174 /* The value will need to be fetched from the constant pool. */
2175 *total = CONSTANT_POOL_COST;
2181 /* If the address is legitimate, return the number of
2182 instructions it needs, otherwise use the default handling. */
2183 int n = mips_address_insns (XEXP (x, 0), GET_MODE (x));
2186 *total = COSTS_N_INSNS (1 + n);
2193 *total = COSTS_N_INSNS (6);
2197 *total = COSTS_N_INSNS ((mode == DImode && !TARGET_64BIT) ? 2 : 1);
2203 if (mode == DImode && !TARGET_64BIT)
2205 *total = COSTS_N_INSNS (2);
2213 if (mode == DImode && !TARGET_64BIT)
2215 *total = COSTS_N_INSNS ((GET_CODE (XEXP (x, 1)) == CONST_INT)
2222 if (mode == SFmode || mode == DFmode)
2223 *total = COSTS_N_INSNS (1);
2225 *total = COSTS_N_INSNS (4);
2229 *total = COSTS_N_INSNS (1);
2234 if (mode == SFmode || mode == DFmode)
2236 if (TUNE_MIPS3000 || TUNE_MIPS3900)
2237 *total = COSTS_N_INSNS (2);
2238 else if (TUNE_MIPS6000)
2239 *total = COSTS_N_INSNS (3);
2241 *total = COSTS_N_INSNS (4);
2243 *total = COSTS_N_INSNS (6);
2246 if (mode == DImode && !TARGET_64BIT)
2248 *total = COSTS_N_INSNS (4);
2254 if (mode == DImode && !TARGET_64BIT)
2268 *total = COSTS_N_INSNS (4);
2269 else if (TUNE_MIPS6000
2272 *total = COSTS_N_INSNS (5);
2274 *total = COSTS_N_INSNS (7);
2281 *total = COSTS_N_INSNS (4);
2282 else if (TUNE_MIPS3000
2285 *total = COSTS_N_INSNS (5);
2286 else if (TUNE_MIPS6000
2289 *total = COSTS_N_INSNS (6);
2291 *total = COSTS_N_INSNS (8);
2296 *total = COSTS_N_INSNS (12);
2297 else if (TUNE_MIPS3900)
2298 *total = COSTS_N_INSNS (2);
2299 else if (TUNE_MIPS4130)
2300 *total = COSTS_N_INSNS (mode == DImode ? 6 : 4);
2301 else if (TUNE_MIPS5400 || TUNE_SB1)
2302 *total = COSTS_N_INSNS (mode == DImode ? 4 : 3);
2303 else if (TUNE_MIPS5500 || TUNE_MIPS7000)
2304 *total = COSTS_N_INSNS (mode == DImode ? 9 : 5);
2305 else if (TUNE_MIPS9000)
2306 *total = COSTS_N_INSNS (mode == DImode ? 8 : 3);
2307 else if (TUNE_MIPS6000)
2308 *total = COSTS_N_INSNS (17);
2309 else if (TUNE_MIPS5000)
2310 *total = COSTS_N_INSNS (5);
2312 *total = COSTS_N_INSNS (10);
2321 *total = COSTS_N_INSNS (12);
2322 else if (TUNE_MIPS6000)
2323 *total = COSTS_N_INSNS (15);
2325 *total = COSTS_N_INSNS (24);
2326 else if (TUNE_MIPS5400 || TUNE_MIPS5500)
2327 *total = COSTS_N_INSNS (30);
2329 *total = COSTS_N_INSNS (23);
2337 *total = COSTS_N_INSNS (19);
2338 else if (TUNE_MIPS5400 || TUNE_MIPS5500)
2339 *total = COSTS_N_INSNS (59);
2340 else if (TUNE_MIPS6000)
2341 *total = COSTS_N_INSNS (16);
2343 *total = COSTS_N_INSNS (32);
2345 *total = COSTS_N_INSNS (36);
2354 *total = COSTS_N_INSNS (35);
2355 else if (TUNE_MIPS6000)
2356 *total = COSTS_N_INSNS (38);
2357 else if (TUNE_MIPS5000)
2358 *total = COSTS_N_INSNS (36);
2360 *total = COSTS_N_INSNS ((mode == SImode) ? 36 : 68);
2361 else if (TUNE_MIPS5400 || TUNE_MIPS5500)
2362 *total = COSTS_N_INSNS ((mode == SImode) ? 42 : 74);
2364 *total = COSTS_N_INSNS (69);
2368 /* A sign extend from SImode to DImode in 64 bit mode is often
2369 zero instructions, because the result can often be used
2370 directly by another instruction; we'll call it one. */
2371 if (TARGET_64BIT && mode == DImode
2372 && GET_MODE (XEXP (x, 0)) == SImode)
2373 *total = COSTS_N_INSNS (1);
2375 *total = COSTS_N_INSNS (2);
2379 if (TARGET_64BIT && mode == DImode
2380 && GET_MODE (XEXP (x, 0)) == SImode)
2381 *total = COSTS_N_INSNS (2);
2383 *total = COSTS_N_INSNS (1);
2391 /* Provide the costs of an addressing mode that contains ADDR.
2392 If ADDR is not a valid address, its cost is irrelevant. */
2395 mips_address_cost (rtx addr)
2397 return mips_address_insns (addr, SImode);
2400 /* Return one word of double-word value OP, taking into account the fixed
2401 endianness of certain registers. HIGH_P is true to select the high part,
2402 false to select the low part. */
2405 mips_subword (rtx op, int high_p)
2408 enum machine_mode mode;
2410 mode = GET_MODE (op);
2411 if (mode == VOIDmode)
2414 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
2415 byte = UNITS_PER_WORD;
2421 if (FP_REG_P (REGNO (op)))
2422 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
2423 if (REGNO (op) == HI_REGNUM)
2424 return gen_rtx_REG (word_mode, high_p ? HI_REGNUM : LO_REGNUM);
2428 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
2430 return simplify_gen_subreg (word_mode, op, mode, byte);
2434 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
2437 mips_split_64bit_move_p (rtx dest, rtx src)
2442 /* FP->FP moves can be done in a single instruction. */
2443 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
2446 /* Check for floating-point loads and stores. They can be done using
2447 ldc1 and sdc1 on MIPS II and above. */
2450 if (FP_REG_RTX_P (dest) && MEM_P (src))
2452 if (FP_REG_RTX_P (src) && MEM_P (dest))
2459 /* Split a 64-bit move from SRC to DEST assuming that
2460 mips_split_64bit_move_p holds.
2462 Moves into and out of FPRs cause some difficulty here. Such moves
2463 will always be DFmode, since paired FPRs are not allowed to store
2464 DImode values. The most natural representation would be two separate
2465 32-bit moves, such as:
2467 (set (reg:SI $f0) (mem:SI ...))
2468 (set (reg:SI $f1) (mem:SI ...))
2470 However, the second insn is invalid because odd-numbered FPRs are
2471 not allowed to store independent values. Use the patterns load_df_low,
2472 load_df_high and store_df_high instead. */
2475 mips_split_64bit_move (rtx dest, rtx src)
2477 if (FP_REG_RTX_P (dest))
2479 /* Loading an FPR from memory or from GPRs. */
2480 emit_insn (gen_load_df_low (copy_rtx (dest), mips_subword (src, 0)));
2481 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
2484 else if (FP_REG_RTX_P (src))
2486 /* Storing an FPR into memory or GPRs. */
2487 emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
2488 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
2492 /* The operation can be split into two normal moves. Decide in
2493 which order to do them. */
2496 low_dest = mips_subword (dest, 0);
2497 if (REG_P (low_dest)
2498 && reg_overlap_mentioned_p (low_dest, src))
2500 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2501 emit_move_insn (low_dest, mips_subword (src, 0));
2505 emit_move_insn (low_dest, mips_subword (src, 0));
2506 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2511 /* Return the appropriate instructions to move SRC into DEST. Assume
2512 that SRC is operand 1 and DEST is operand 0. */
2515 mips_output_move (rtx dest, rtx src)
2517 enum rtx_code dest_code, src_code;
2520 dest_code = GET_CODE (dest);
2521 src_code = GET_CODE (src);
2522 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
2524 if (dbl_p && mips_split_64bit_move_p (dest, src))
2527 if ((src_code == REG && GP_REG_P (REGNO (src)))
2528 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
2530 if (dest_code == REG)
2532 if (GP_REG_P (REGNO (dest)))
2533 return "move\t%0,%z1";
2535 if (MD_REG_P (REGNO (dest)))
2538 if (FP_REG_P (REGNO (dest)))
2539 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
2541 if (ALL_COP_REG_P (REGNO (dest)))
2543 static char retval[] = "dmtc_\t%z1,%0";
2545 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2546 return (dbl_p ? retval : retval + 1);
2549 if (dest_code == MEM)
2550 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
2552 if (dest_code == REG && GP_REG_P (REGNO (dest)))
2554 if (src_code == REG)
2556 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
2557 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
2559 if (FP_REG_P (REGNO (src)))
2560 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
2562 if (ALL_COP_REG_P (REGNO (src)))
2564 static char retval[] = "dmfc_\t%0,%1";
2566 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
2567 return (dbl_p ? retval : retval + 1);
2571 if (src_code == MEM)
2572 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
2574 if (src_code == CONST_INT)
2576 /* Don't use the X format, because that will give out of
2577 range numbers for 64 bit hosts and 32 bit targets. */
2579 return "li\t%0,%1\t\t\t# %X1";
2581 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
2584 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
2588 if (src_code == HIGH)
2589 return "lui\t%0,%h1";
2591 if (CONST_GP_P (src))
2592 return "move\t%0,%1";
2594 if (symbolic_operand (src, VOIDmode))
2595 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
2597 if (src_code == REG && FP_REG_P (REGNO (src)))
2599 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2601 if (GET_MODE (dest) == V2SFmode)
2602 return "mov.ps\t%0,%1";
2604 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
2607 if (dest_code == MEM)
2608 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
2610 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2612 if (src_code == MEM)
2613 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
2615 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
2617 static char retval[] = "l_c_\t%0,%1";
2619 retval[1] = (dbl_p ? 'd' : 'w');
2620 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2623 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
2625 static char retval[] = "s_c_\t%1,%0";
2627 retval[1] = (dbl_p ? 'd' : 'w');
2628 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
2634 /* Restore $gp from its save slot. Valid only when using o32 or
2638 mips_restore_gp (void)
2642 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
2644 address = mips_add_offset (pic_offset_table_rtx,
2645 frame_pointer_needed
2646 ? hard_frame_pointer_rtx
2647 : stack_pointer_rtx,
2648 current_function_outgoing_args_size);
2649 slot = gen_rtx_MEM (Pmode, address);
2651 emit_move_insn (pic_offset_table_rtx, slot);
2652 if (!TARGET_EXPLICIT_RELOCS)
2653 emit_insn (gen_blockage ());
2656 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
2659 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
2661 emit_insn (gen_rtx_SET (VOIDmode, target,
2662 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
2665 /* Return true if CMP1 is a suitable second operand for relational
2666 operator CODE. See also the *sCC patterns in mips.md. */
2669 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
2675 return reg_or_0_operand (cmp1, VOIDmode);
2679 return !TARGET_MIPS16 && cmp1 == const1_rtx;
2683 return arith_operand (cmp1, VOIDmode);
2686 return sle_operand (cmp1, VOIDmode);
2689 return sleu_operand (cmp1, VOIDmode);
2696 /* Compare CMP0 and CMP1 using relational operator CODE and store the
2697 result in TARGET. CMP0 and TARGET are register_operands that have
2698 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
2699 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
2702 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
2703 rtx target, rtx cmp0, rtx cmp1)
2705 /* First see if there is a MIPS instruction that can do this operation
2706 with CMP1 in its current form. If not, try doing the same for the
2707 inverse operation. If that also fails, force CMP1 into a register
2709 if (mips_relational_operand_ok_p (code, cmp1))
2710 mips_emit_binary (code, target, cmp0, cmp1);
2713 enum rtx_code inv_code = reverse_condition (code);
2714 if (!mips_relational_operand_ok_p (inv_code, cmp1))
2716 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
2717 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
2719 else if (invert_ptr == 0)
2721 rtx inv_target = gen_reg_rtx (GET_MODE (target));
2722 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
2723 mips_emit_binary (XOR, target, inv_target, const1_rtx);
2727 *invert_ptr = !*invert_ptr;
2728 mips_emit_binary (inv_code, target, cmp0, cmp1);
2733 /* Return a register that is zero iff CMP0 and CMP1 are equal.
2734 The register will have the same mode as CMP0. */
2737 mips_zero_if_equal (rtx cmp0, rtx cmp1)
2739 if (cmp1 == const0_rtx)
2742 if (uns_arith_operand (cmp1, VOIDmode))
2743 return expand_binop (GET_MODE (cmp0), xor_optab,
2744 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
2746 return expand_binop (GET_MODE (cmp0), sub_optab,
2747 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
2750 /* Convert a comparison into something that can be used in a branch or
2751 conditional move. cmp_operands[0] and cmp_operands[1] are the values
2752 being compared and *CODE is the code used to compare them.
2754 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
2755 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
2756 otherwise any standard branch condition can be used. The standard branch
2759 - EQ/NE between two registers.
2760 - any comparison between a register and zero. */
2763 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
2765 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
2767 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
2769 *op0 = cmp_operands[0];
2770 *op1 = cmp_operands[1];
2772 else if (*code == EQ || *code == NE)
2776 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
2781 *op0 = cmp_operands[0];
2782 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
2787 /* The comparison needs a separate scc instruction. Store the
2788 result of the scc in *OP0 and compare it against zero. */
2789 bool invert = false;
2790 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
2792 mips_emit_int_relational (*code, &invert, *op0,
2793 cmp_operands[0], cmp_operands[1]);
2794 *code = (invert ? EQ : NE);
2799 enum rtx_code cmp_code;
2801 /* Floating-point tests use a separate c.cond.fmt comparison to
2802 set a condition code register. The branch or conditional move
2803 will then compare that register against zero.
2805 Set CMP_CODE to the code of the comparison instruction and
2806 *CODE to the code that the branch or move should use. */
2812 cmp_code = reverse_condition_maybe_unordered (*code);
2822 ? gen_reg_rtx (CCmode)
2823 : gen_rtx_REG (CCmode, FPSW_REGNUM));
2825 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
2829 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
2830 Store the result in TARGET and return true if successful.
2832 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
2835 mips_emit_scc (enum rtx_code code, rtx target)
2837 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
2840 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
2841 if (code == EQ || code == NE)
2843 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
2844 mips_emit_binary (code, target, zie, const0_rtx);
2847 mips_emit_int_relational (code, 0, target,
2848 cmp_operands[0], cmp_operands[1]);
2852 /* Emit the common code for doing conditional branches.
2853 operand[0] is the label to jump to.
2854 The comparison operands are saved away by cmp{si,di,sf,df}. */
2857 gen_conditional_branch (rtx *operands, enum rtx_code code)
2859 rtx op0, op1, target;
2861 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
2862 target = gen_rtx_IF_THEN_ELSE (VOIDmode,
2863 gen_rtx_fmt_ee (code, GET_MODE (op0),
2865 gen_rtx_LABEL_REF (VOIDmode, operands[0]),
2867 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, target));
2870 /* Emit the common code for conditional moves. OPERANDS is the array
2871 of operands passed to the conditional move define_expand. */
2874 gen_conditional_move (rtx *operands)
2879 code = GET_CODE (operands[1]);
2880 mips_emit_compare (&code, &op0, &op1, true);
2881 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
2882 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
2883 gen_rtx_fmt_ee (code,
2886 operands[2], operands[3])));
2889 /* Emit a conditional trap. OPERANDS is the array of operands passed to
2890 the conditional_trap expander. */
2893 mips_gen_conditional_trap (rtx *operands)
2896 enum rtx_code cmp_code = GET_CODE (operands[0]);
2897 enum machine_mode mode = GET_MODE (cmp_operands[0]);
2899 /* MIPS conditional trap machine instructions don't have GT or LE
2900 flavors, so we must invert the comparison and convert to LT and
2901 GE, respectively. */
2904 case GT: cmp_code = LT; break;
2905 case LE: cmp_code = GE; break;
2906 case GTU: cmp_code = LTU; break;
2907 case LEU: cmp_code = GEU; break;
2910 if (cmp_code == GET_CODE (operands[0]))
2912 op0 = cmp_operands[0];
2913 op1 = cmp_operands[1];
2917 op0 = cmp_operands[1];
2918 op1 = cmp_operands[0];
2920 op0 = force_reg (mode, op0);
2921 if (!arith_operand (op1, mode))
2922 op1 = force_reg (mode, op1);
2924 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
2925 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
2929 /* Load function address ADDR into register DEST. SIBCALL_P is true
2930 if the address is needed for a sibling call. */
2933 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
2935 /* If we're generating PIC, and this call is to a global function,
2936 try to allow its address to be resolved lazily. This isn't
2937 possible for NewABI sibcalls since the value of $gp on entry
2938 to the stub would be our caller's gp, not ours. */
2939 if (TARGET_EXPLICIT_RELOCS
2940 && !(sibcall_p && TARGET_NEWABI)
2941 && global_got_operand (addr, VOIDmode))
2943 rtx high, lo_sum_symbol;
2945 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
2946 addr, SYMBOL_GOTOFF_CALL);
2947 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
2948 if (Pmode == SImode)
2949 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
2951 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
2954 emit_move_insn (dest, addr);
2958 /* Expand a call or call_value instruction. RESULT is where the
2959 result will go (null for calls), ADDR is the address of the
2960 function, ARGS_SIZE is the size of the arguments and AUX is
2961 the value passed to us by mips_function_arg. SIBCALL_P is true
2962 if we are expanding a sibling call, false if we're expanding
2966 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
2968 rtx orig_addr, pattern, insn;
2971 if (!call_insn_operand (addr, VOIDmode))
2973 addr = gen_reg_rtx (Pmode);
2974 mips_load_call_address (addr, orig_addr, sibcall_p);
2978 && mips16_hard_float
2979 && build_mips16_call_stub (result, addr, args_size,
2980 aux == 0 ? 0 : (int) GET_MODE (aux)))
2984 pattern = (sibcall_p
2985 ? gen_sibcall_internal (addr, args_size)
2986 : gen_call_internal (addr, args_size));
2987 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
2991 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
2992 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
2995 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
2996 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
2999 pattern = (sibcall_p
3000 ? gen_sibcall_value_internal (result, addr, args_size)
3001 : gen_call_value_internal (result, addr, args_size));
3003 insn = emit_call_insn (pattern);
3005 /* Lazy-binding stubs require $gp to be valid on entry. */
3006 if (global_got_operand (orig_addr, VOIDmode))
3007 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3011 /* We can handle any sibcall when TARGET_SIBCALLS is true. */
3014 mips_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
3015 tree exp ATTRIBUTE_UNUSED)
3017 return TARGET_SIBCALLS;
3020 /* Emit code to move general operand SRC into condition-code
3021 register DEST. SCRATCH is a scratch TFmode float register.
3028 where FP1 and FP2 are single-precision float registers
3029 taken from SCRATCH. */
3032 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
3036 /* Change the source to SFmode. */
3038 src = adjust_address (src, SFmode, 0);
3039 else if (REG_P (src) || GET_CODE (src) == SUBREG)
3040 src = gen_rtx_REG (SFmode, true_regnum (src));
3042 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
3043 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + FP_INC);
3045 emit_move_insn (copy_rtx (fp1), src);
3046 emit_move_insn (copy_rtx (fp2), CONST0_RTX (SFmode));
3047 emit_insn (gen_slt_sf (dest, fp2, fp1));
3050 /* Emit code to change the current function's return address to
3051 ADDRESS. SCRATCH is available as a scratch register, if needed.
3052 ADDRESS and SCRATCH are both word-mode GPRs. */
3055 mips_set_return_address (rtx address, rtx scratch)
3059 compute_frame_size (get_frame_size ());
3060 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
3061 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
3062 cfun->machine->frame.gp_sp_offset);
3064 emit_move_insn (gen_rtx_MEM (GET_MODE (address), slot_address), address);
3067 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
3068 Assume that the areas do not overlap. */
3071 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
3073 HOST_WIDE_INT offset, delta;
3074 unsigned HOST_WIDE_INT bits;
3076 enum machine_mode mode;
3079 /* Work out how many bits to move at a time. If both operands have
3080 half-word alignment, it is usually better to move in half words.
3081 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
3082 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
3083 Otherwise move word-sized chunks. */
3084 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
3085 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
3086 bits = BITS_PER_WORD / 2;
3088 bits = BITS_PER_WORD;
3090 mode = mode_for_size (bits, MODE_INT, 0);
3091 delta = bits / BITS_PER_UNIT;
3093 /* Allocate a buffer for the temporary registers. */
3094 regs = alloca (sizeof (rtx) * length / delta);
3096 /* Load as many BITS-sized chunks as possible. Use a normal load if
3097 the source has enough alignment, otherwise use left/right pairs. */
3098 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3100 regs[i] = gen_reg_rtx (mode);
3101 if (MEM_ALIGN (src) >= bits)
3102 emit_move_insn (regs[i], adjust_address (src, mode, offset));
3105 rtx part = adjust_address (src, BLKmode, offset);
3106 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
3111 /* Copy the chunks to the destination. */
3112 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3113 if (MEM_ALIGN (dest) >= bits)
3114 emit_move_insn (adjust_address (dest, mode, offset), regs[i]);
3117 rtx part = adjust_address (dest, BLKmode, offset);
3118 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
3122 /* Mop up any left-over bytes. */
3123 if (offset < length)
3125 src = adjust_address (src, BLKmode, offset);
3126 dest = adjust_address (dest, BLKmode, offset);
3127 move_by_pieces (dest, src, length - offset,
3128 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
3132 #define MAX_MOVE_REGS 4
3133 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
3136 /* Helper function for doing a loop-based block operation on memory
3137 reference MEM. Each iteration of the loop will operate on LENGTH
3140 Create a new base register for use within the loop and point it to
3141 the start of MEM. Create a new memory reference that uses this
3142 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
3145 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
3146 rtx *loop_reg, rtx *loop_mem)
3148 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
3150 /* Although the new mem does not refer to a known location,
3151 it does keep up to LENGTH bytes of alignment. */
3152 *loop_mem = change_address (mem, BLKmode, *loop_reg);
3153 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
3157 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
3158 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
3159 memory regions do not overlap. */
3162 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
3164 rtx label, src_reg, dest_reg, final_src;
3165 HOST_WIDE_INT leftover;
3167 leftover = length % MAX_MOVE_BYTES;
3170 /* Create registers and memory references for use within the loop. */
3171 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
3172 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
3174 /* Calculate the value that SRC_REG should have after the last iteration
3176 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
3179 /* Emit the start of the loop. */
3180 label = gen_label_rtx ();
3183 /* Emit the loop body. */
3184 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
3186 /* Move on to the next block. */
3187 emit_move_insn (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
3188 emit_move_insn (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
3190 /* Emit the loop condition. */
3191 if (Pmode == DImode)
3192 emit_insn (gen_cmpdi (src_reg, final_src));
3194 emit_insn (gen_cmpsi (src_reg, final_src));
3195 emit_jump_insn (gen_bne (label));
3197 /* Mop up any left-over bytes. */
3199 mips_block_move_straight (dest, src, leftover);
3202 /* Expand a movmemsi instruction. */
3205 mips_expand_block_move (rtx dest, rtx src, rtx length)
3207 if (GET_CODE (length) == CONST_INT)
3209 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
3211 mips_block_move_straight (dest, src, INTVAL (length));
3216 mips_block_move_loop (dest, src, INTVAL (length));
3223 /* Argument support functions. */
3225 /* Initialize CUMULATIVE_ARGS for a function. */
3228 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
3229 rtx libname ATTRIBUTE_UNUSED)
3231 static CUMULATIVE_ARGS zero_cum;
3232 tree param, next_param;
3235 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
3237 /* Determine if this function has variable arguments. This is
3238 indicated by the last argument being 'void_type_mode' if there
3239 are no variable arguments. The standard MIPS calling sequence
3240 passes all arguments in the general purpose registers in this case. */
3242 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
3243 param != 0; param = next_param)
3245 next_param = TREE_CHAIN (param);
3246 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
3247 cum->gp_reg_found = 1;
3252 /* Fill INFO with information about a single argument. CUM is the
3253 cumulative state for earlier arguments. MODE is the mode of this
3254 argument and TYPE is its type (if known). NAMED is true if this
3255 is a named (fixed) argument rather than a variable one. */
3258 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3259 tree type, int named, struct mips_arg_info *info)
3261 bool doubleword_aligned_p;
3262 unsigned int num_bytes, num_words, max_regs;
3264 /* Work out the size of the argument. */
3265 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
3266 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3268 /* Decide whether it should go in a floating-point register, assuming
3269 one is free. Later code checks for availability.
3271 The checks against UNITS_PER_FPVALUE handle the soft-float and
3272 single-float cases. */
3276 /* The EABI conventions have traditionally been defined in terms
3277 of TYPE_MODE, regardless of the actual type. */
3278 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
3279 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3280 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3285 /* Only leading floating-point scalars are passed in
3286 floating-point registers. We also handle vector floats the same
3287 say, which is OK because they are not covered by the standard ABI. */
3288 info->fpr_p = (!cum->gp_reg_found
3289 && cum->arg_number < 2
3290 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
3291 || VECTOR_FLOAT_TYPE_P (type))
3292 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3293 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3294 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3299 /* Scalar and complex floating-point types are passed in
3300 floating-point registers. */
3301 info->fpr_p = (named
3302 && (type == 0 || FLOAT_TYPE_P (type))
3303 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3304 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3305 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3306 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
3308 /* ??? According to the ABI documentation, the real and imaginary
3309 parts of complex floats should be passed in individual registers.
3310 The real and imaginary parts of stack arguments are supposed
3311 to be contiguous and there should be an extra word of padding
3314 This has two problems. First, it makes it impossible to use a
3315 single "void *" va_list type, since register and stack arguments
3316 are passed differently. (At the time of writing, MIPSpro cannot
3317 handle complex float varargs correctly.) Second, it's unclear
3318 what should happen when there is only one register free.
3320 For now, we assume that named complex floats should go into FPRs
3321 if there are two FPRs free, otherwise they should be passed in the
3322 same way as a struct containing two floats. */
3324 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3325 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
3327 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
3328 info->fpr_p = false;
3338 /* See whether the argument has doubleword alignment. */
3339 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
3341 /* Set REG_OFFSET to the register count we're interested in.
3342 The EABI allocates the floating-point registers separately,
3343 but the other ABIs allocate them like integer registers. */
3344 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
3348 /* Advance to an even register if the argument is doubleword-aligned. */
3349 if (doubleword_aligned_p)
3350 info->reg_offset += info->reg_offset & 1;
3352 /* Work out the offset of a stack argument. */
3353 info->stack_offset = cum->stack_words;
3354 if (doubleword_aligned_p)
3355 info->stack_offset += info->stack_offset & 1;
3357 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
3359 /* Partition the argument between registers and stack. */
3360 info->reg_words = MIN (num_words, max_regs);
3361 info->stack_words = num_words - info->reg_words;
3365 /* Implement FUNCTION_ARG_ADVANCE. */
3368 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3369 tree type, int named)
3371 struct mips_arg_info info;
3373 mips_arg_info (cum, mode, type, named, &info);
3376 cum->gp_reg_found = true;
3378 /* See the comment above the cumulative args structure in mips.h
3379 for an explanation of what this code does. It assumes the O32
3380 ABI, which passes at most 2 arguments in float registers. */
3381 if (cum->arg_number < 2 && info.fpr_p)
3382 cum->fp_code += (mode == SFmode ? 1 : 2) << ((cum->arg_number - 1) * 2);
3384 if (mips_abi != ABI_EABI || !info.fpr_p)
3385 cum->num_gprs = info.reg_offset + info.reg_words;
3386 else if (info.reg_words > 0)
3387 cum->num_fprs += FP_INC;
3389 if (info.stack_words > 0)
3390 cum->stack_words = info.stack_offset + info.stack_words;
3395 /* Implement FUNCTION_ARG. */
3398 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3399 tree type, int named)
3401 struct mips_arg_info info;
3403 /* We will be called with a mode of VOIDmode after the last argument
3404 has been seen. Whatever we return will be passed to the call
3405 insn. If we need a mips16 fp_code, return a REG with the code
3406 stored as the mode. */
3407 if (mode == VOIDmode)
3409 if (TARGET_MIPS16 && cum->fp_code != 0)
3410 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
3416 mips_arg_info (cum, mode, type, named, &info);
3418 /* Return straight away if the whole argument is passed on the stack. */
3419 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
3423 && TREE_CODE (type) == RECORD_TYPE
3425 && TYPE_SIZE_UNIT (type)
3426 && host_integerp (TYPE_SIZE_UNIT (type), 1)
3429 /* The Irix 6 n32/n64 ABIs say that if any 64 bit chunk of the
3430 structure contains a double in its entirety, then that 64 bit
3431 chunk is passed in a floating point register. */
3434 /* First check to see if there is any such field. */
3435 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3436 if (TREE_CODE (field) == FIELD_DECL
3437 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3438 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
3439 && host_integerp (bit_position (field), 0)
3440 && int_bit_position (field) % BITS_PER_WORD == 0)
3445 /* Now handle the special case by returning a PARALLEL
3446 indicating where each 64 bit chunk goes. INFO.REG_WORDS
3447 chunks are passed in registers. */
3449 HOST_WIDE_INT bitpos;
3452 /* assign_parms checks the mode of ENTRY_PARM, so we must
3453 use the actual mode here. */
3454 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
3457 field = TYPE_FIELDS (type);
3458 for (i = 0; i < info.reg_words; i++)
3462 for (; field; field = TREE_CHAIN (field))
3463 if (TREE_CODE (field) == FIELD_DECL
3464 && int_bit_position (field) >= bitpos)
3468 && int_bit_position (field) == bitpos
3469 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3470 && !TARGET_SOFT_FLOAT
3471 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
3472 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
3474 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
3477 = gen_rtx_EXPR_LIST (VOIDmode, reg,
3478 GEN_INT (bitpos / BITS_PER_UNIT));
3480 bitpos += BITS_PER_WORD;
3486 /* Handle the n32/n64 conventions for passing complex floating-point
3487 arguments in FPR pairs. The real part goes in the lower register
3488 and the imaginary part goes in the upper register. */
3491 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
3494 enum machine_mode inner;
3497 inner = GET_MODE_INNER (mode);
3498 reg = FP_ARG_FIRST + info.reg_offset;
3499 real = gen_rtx_EXPR_LIST (VOIDmode,
3500 gen_rtx_REG (inner, reg),
3502 imag = gen_rtx_EXPR_LIST (VOIDmode,
3503 gen_rtx_REG (inner, reg + info.reg_words / 2),
3504 GEN_INT (GET_MODE_SIZE (inner)));
3505 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
3509 return gen_rtx_REG (mode, GP_ARG_FIRST + info.reg_offset);
3510 else if (info.reg_offset == 1)
3511 /* This code handles the special o32 case in which the second word
3512 of the argument structure is passed in floating-point registers. */
3513 return gen_rtx_REG (mode, FP_ARG_FIRST + FP_INC);
3515 return gen_rtx_REG (mode, FP_ARG_FIRST + info.reg_offset);
3519 /* Implement TARGET_ARG_PARTIAL_BYTES. */
3522 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
3523 enum machine_mode mode, tree type, bool named)
3525 struct mips_arg_info info;
3527 mips_arg_info (cum, mode, type, named, &info);
3528 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
3532 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
3533 PARM_BOUNDARY bits of alignment, but will be given anything up
3534 to STACK_BOUNDARY bits if the type requires it. */
3537 function_arg_boundary (enum machine_mode mode, tree type)
3539 unsigned int alignment;
3541 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
3542 if (alignment < PARM_BOUNDARY)
3543 alignment = PARM_BOUNDARY;
3544 if (alignment > STACK_BOUNDARY)
3545 alignment = STACK_BOUNDARY;
3549 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
3550 upward rather than downward. In other words, return true if the
3551 first byte of the stack slot has useful data, false if the last
3555 mips_pad_arg_upward (enum machine_mode mode, tree type)
3557 /* On little-endian targets, the first byte of every stack argument
3558 is passed in the first byte of the stack slot. */
3559 if (!BYTES_BIG_ENDIAN)
3562 /* Otherwise, integral types are padded downward: the last byte of a
3563 stack argument is passed in the last byte of the stack slot. */
3565 ? INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)
3566 : GET_MODE_CLASS (mode) == MODE_INT)
3569 /* Big-endian o64 pads floating-point arguments downward. */
3570 if (mips_abi == ABI_O64)
3571 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
3574 /* Other types are padded upward for o32, o64, n32 and n64. */
3575 if (mips_abi != ABI_EABI)
3578 /* Arguments smaller than a stack slot are padded downward. */
3579 if (mode != BLKmode)
3580 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
3582 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
3586 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
3587 if the least significant byte of the register has useful data. Return
3588 the opposite if the most significant byte does. */
3591 mips_pad_reg_upward (enum machine_mode mode, tree type)
3593 /* No shifting is required for floating-point arguments. */
3594 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
3595 return !BYTES_BIG_ENDIAN;
3597 /* Otherwise, apply the same padding to register arguments as we do
3598 to stack arguments. */
3599 return mips_pad_arg_upward (mode, type);
3603 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3604 tree type, int *pretend_size, int no_rtl)
3606 CUMULATIVE_ARGS local_cum;
3607 int gp_saved, fp_saved;
3609 /* The caller has advanced CUM up to, but not beyond, the last named
3610 argument. Advance a local copy of CUM past the last "real" named
3611 argument, to find out how many registers are left over. */
3614 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
3616 /* Found out how many registers we need to save. */
3617 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
3618 fp_saved = (EABI_FLOAT_VARARGS_P
3619 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
3628 ptr = virtual_incoming_args_rtx;
3633 ptr = plus_constant (ptr, local_cum.num_gprs * UNITS_PER_WORD);
3637 ptr = plus_constant (ptr, -gp_saved * UNITS_PER_WORD);
3640 mem = gen_rtx_MEM (BLKmode, ptr);
3641 set_mem_alias_set (mem, get_varargs_alias_set ());
3643 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
3648 /* We can't use move_block_from_reg, because it will use
3650 enum machine_mode mode;
3653 /* Set OFF to the offset from virtual_incoming_args_rtx of
3654 the first float register. The FP save area lies below
3655 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
3656 off = -gp_saved * UNITS_PER_WORD;
3657 off &= ~(UNITS_PER_FPVALUE - 1);
3658 off -= fp_saved * UNITS_PER_FPREG;
3660 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
3662 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS; i += FP_INC)
3666 ptr = plus_constant (virtual_incoming_args_rtx, off);
3667 mem = gen_rtx_MEM (mode, ptr);
3668 set_mem_alias_set (mem, get_varargs_alias_set ());
3669 emit_move_insn (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
3670 off += UNITS_PER_HWFPVALUE;
3676 /* No need for pretend arguments: the register parameter area was
3677 allocated by the caller. */
3681 *pretend_size = (gp_saved * UNITS_PER_WORD) + (fp_saved * UNITS_PER_FPREG);
3684 /* Create the va_list data type.
3685 We keep 3 pointers, and two offsets.
3686 Two pointers are to the overflow area, which starts at the CFA.
3687 One of these is constant, for addressing into the GPR save area below it.
3688 The other is advanced up the stack through the overflow region.
3689 The third pointer is to the GPR save area. Since the FPR save area
3690 is just below it, we can address FPR slots off this pointer.
3691 We also keep two one-byte offsets, which are to be subtracted from the
3692 constant pointers to yield addresses in the GPR and FPR save areas.
3693 These are downcounted as float or non-float arguments are used,
3694 and when they get to zero, the argument must be obtained from the
3696 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
3697 pointer is enough. It's started at the GPR save area, and is
3699 Note that the GPR save area is not constant size, due to optimization
3700 in the prologue. Hence, we can't use a design with two pointers
3701 and two offsets, although we could have designed this with two pointers
3702 and three offsets. */
3705 mips_build_builtin_va_list (void)
3707 if (EABI_FLOAT_VARARGS_P)
3709 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
3712 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
3714 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
3716 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
3718 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
3720 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
3721 unsigned_char_type_node);
3722 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
3723 unsigned_char_type_node);
3724 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
3725 warn on every user file. */
3726 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
3727 array = build_array_type (unsigned_char_type_node,
3728 build_index_type (index));
3729 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
3731 DECL_FIELD_CONTEXT (f_ovfl) = record;
3732 DECL_FIELD_CONTEXT (f_gtop) = record;
3733 DECL_FIELD_CONTEXT (f_ftop) = record;
3734 DECL_FIELD_CONTEXT (f_goff) = record;
3735 DECL_FIELD_CONTEXT (f_foff) = record;
3736 DECL_FIELD_CONTEXT (f_res) = record;
3738 TYPE_FIELDS (record) = f_ovfl;
3739 TREE_CHAIN (f_ovfl) = f_gtop;
3740 TREE_CHAIN (f_gtop) = f_ftop;
3741 TREE_CHAIN (f_ftop) = f_goff;
3742 TREE_CHAIN (f_goff) = f_foff;
3743 TREE_CHAIN (f_foff) = f_res;
3745 layout_type (record);
3748 else if (TARGET_IRIX && TARGET_IRIX6)
3749 /* On IRIX 6, this type is 'char *'. */
3750 return build_pointer_type (char_type_node);
3752 /* Otherwise, we use 'void *'. */
3753 return ptr_type_node;
3756 /* Implement va_start. */
3759 mips_va_start (tree valist, rtx nextarg)
3761 const CUMULATIVE_ARGS *cum = ¤t_function_args_info;
3763 /* ARG_POINTER_REGNUM is initialized to STACK_POINTER_BOUNDARY, but
3764 since the stack is aligned for a pair of argument-passing slots,
3765 and the beginning of a variable argument list may be an odd slot,
3766 we have to decrease its alignment. */
3767 if (cfun && cfun->emit->regno_pointer_align)
3768 while (((current_function_pretend_args_size * BITS_PER_UNIT)
3769 & (REGNO_POINTER_ALIGN (ARG_POINTER_REGNUM) - 1)) != 0)
3770 REGNO_POINTER_ALIGN (ARG_POINTER_REGNUM) /= 2;
3772 if (mips_abi == ABI_EABI)
3774 int gpr_save_area_size;
3777 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
3779 if (EABI_FLOAT_VARARGS_P)
3781 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
3782 tree ovfl, gtop, ftop, goff, foff;
3785 int fpr_save_area_size;
3787 f_ovfl = TYPE_FIELDS (va_list_type_node);
3788 f_gtop = TREE_CHAIN (f_ovfl);
3789 f_ftop = TREE_CHAIN (f_gtop);
3790 f_goff = TREE_CHAIN (f_ftop);
3791 f_foff = TREE_CHAIN (f_goff);
3793 ovfl = build (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
3795 gtop = build (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
3797 ftop = build (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
3799 goff = build (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
3801 foff = build (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
3804 /* Emit code to initialize OVFL, which points to the next varargs
3805 stack argument. CUM->STACK_WORDS gives the number of stack
3806 words used by named arguments. */
3807 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
3808 if (cum->stack_words > 0)
3809 t = build (PLUS_EXPR, TREE_TYPE (ovfl), t,
3810 build_int_cst (NULL_TREE,
3811 cum->stack_words * UNITS_PER_WORD));
3812 t = build (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
3813 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3815 /* Emit code to initialize GTOP, the top of the GPR save area. */
3816 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
3817 t = build (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
3818 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3820 /* Emit code to initialize FTOP, the top of the FPR save area.
3821 This address is gpr_save_area_bytes below GTOP, rounded
3822 down to the next fp-aligned boundary. */
3823 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
3824 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
3825 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
3827 t = build (PLUS_EXPR, TREE_TYPE (ftop), t,
3828 build_int_cst (NULL_TREE, -fpr_offset));
3829 t = build (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
3830 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3832 /* Emit code to initialize GOFF, the offset from GTOP of the
3833 next GPR argument. */
3834 t = build (MODIFY_EXPR, TREE_TYPE (goff), goff,
3835 build_int_cst (NULL_TREE, gpr_save_area_size));
3836 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3838 /* Likewise emit code to initialize FOFF, the offset from FTOP
3839 of the next FPR argument. */
3841 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
3842 t = build (MODIFY_EXPR, TREE_TYPE (foff), foff,
3843 build_int_cst (NULL_TREE, fpr_save_area_size));
3844 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
3848 /* Everything is in the GPR save area, or in the overflow
3849 area which is contiguous with it. */
3850 nextarg = plus_constant (nextarg, -gpr_save_area_size);
3851 std_expand_builtin_va_start (valist, nextarg);
3855 std_expand_builtin_va_start (valist, nextarg);
3858 /* Implement va_arg. */
3861 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
3863 HOST_WIDE_INT size, rsize;
3867 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
3870 type = build_pointer_type (type);
3872 size = int_size_in_bytes (type);
3873 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
3875 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
3876 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
3879 /* Not a simple merged stack. */
3881 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
3882 tree ovfl, top, off, align;
3883 HOST_WIDE_INT osize;
3886 f_ovfl = TYPE_FIELDS (va_list_type_node);
3887 f_gtop = TREE_CHAIN (f_ovfl);
3888 f_ftop = TREE_CHAIN (f_gtop);
3889 f_goff = TREE_CHAIN (f_ftop);
3890 f_foff = TREE_CHAIN (f_goff);
3892 /* We maintain separate pointers and offsets for floating-point
3893 and integer arguments, but we need similar code in both cases.
3896 TOP be the top of the register save area;
3897 OFF be the offset from TOP of the next register;
3898 ADDR_RTX be the address of the argument;
3899 RSIZE be the number of bytes used to store the argument
3900 when it's in the register save area;
3901 OSIZE be the number of bytes used to store it when it's
3902 in the stack overflow area; and
3903 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
3905 The code we want is:
3907 1: off &= -rsize; // round down
3910 4: addr_rtx = top - off;
3915 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
3916 10: addr_rtx = ovfl + PADDING;
3920 [1] and [9] can sometimes be optimized away. */
3922 ovfl = build (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
3925 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
3926 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
3928 top = build (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
3930 off = build (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
3933 /* When floating-point registers are saved to the stack,
3934 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
3935 of the float's precision. */
3936 rsize = UNITS_PER_HWFPVALUE;
3938 /* Overflow arguments are padded to UNITS_PER_WORD bytes
3939 (= PARM_BOUNDARY bits). This can be different from RSIZE
3942 (1) On 32-bit targets when TYPE is a structure such as:
3944 struct s { float f; };
3946 Such structures are passed in paired FPRs, so RSIZE
3947 will be 8 bytes. However, the structure only takes
3948 up 4 bytes of memory, so OSIZE will only be 4.
3950 (2) In combinations such as -mgp64 -msingle-float
3951 -fshort-double. Doubles passed in registers
3952 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
3953 but those passed on the stack take up
3954 UNITS_PER_WORD bytes. */
3955 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
3959 top = build (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
3961 off = build (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
3963 if (rsize > UNITS_PER_WORD)
3965 /* [1] Emit code for: off &= -rsize. */
3966 t = build (BIT_AND_EXPR, TREE_TYPE (off), off,
3967 build_int_cst (NULL_TREE, -rsize));
3968 t = build (MODIFY_EXPR, TREE_TYPE (off), off, t);
3969 gimplify_and_add (t, pre_p);
3974 /* [2] Emit code to branch if off == 0. */
3975 t = build (NE_EXPR, boolean_type_node, off,
3976 build_int_cst (TREE_TYPE (off), 0));
3977 addr = build (COND_EXPR, ptr_type_node, t, NULL, NULL);
3979 /* [5] Emit code for: off -= rsize. We do this as a form of
3980 post-increment not available to C. Also widen for the
3981 coming pointer arithmetic. */
3982 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
3983 t = build (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
3984 t = fold_convert (sizetype, t);
3985 t = fold_convert (TREE_TYPE (top), t);
3987 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
3988 the argument has RSIZE - SIZE bytes of leading padding. */
3989 t = build (MINUS_EXPR, TREE_TYPE (top), top, t);
3990 if (BYTES_BIG_ENDIAN && rsize > size)
3992 u = fold_convert (TREE_TYPE (t), build_int_cst (NULL_TREE,
3994 t = build (PLUS_EXPR, TREE_TYPE (t), t, u);
3996 COND_EXPR_THEN (addr) = t;
3998 if (osize > UNITS_PER_WORD)
4000 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
4001 u = fold_convert (TREE_TYPE (ovfl),
4002 build_int_cst (NULL_TREE, osize - 1));
4003 t = build (PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
4004 u = fold_convert (TREE_TYPE (ovfl),
4005 build_int_cst (NULL_TREE, -osize));
4006 t = build (BIT_AND_EXPR, TREE_TYPE (ovfl), t, u);
4007 align = build (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
4012 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
4013 post-increment ovfl by osize. On big-endian machines,
4014 the argument has OSIZE - SIZE bytes of leading padding. */
4015 u = fold_convert (TREE_TYPE (ovfl),
4016 build_int_cst (NULL_TREE, osize));
4017 t = build (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
4018 if (BYTES_BIG_ENDIAN && osize > size)
4020 u = fold_convert (TREE_TYPE (t),
4021 build_int_cst (NULL_TREE, osize - size));
4022 t = build (PLUS_EXPR, TREE_TYPE (t), t, u);
4025 /* String [9] and [10,11] together. */
4027 t = build (COMPOUND_EXPR, TREE_TYPE (t), align, t);
4028 COND_EXPR_ELSE (addr) = t;
4030 addr = fold_convert (build_pointer_type (type), addr);
4031 addr = build_fold_indirect_ref (addr);
4035 addr = build_fold_indirect_ref (addr);
4040 /* Return true if it is possible to use left/right accesses for a
4041 bitfield of WIDTH bits starting BITPOS bits into *OP. When
4042 returning true, update *OP, *LEFT and *RIGHT as follows:
4044 *OP is a BLKmode reference to the whole field.
4046 *LEFT is a QImode reference to the first byte if big endian or
4047 the last byte if little endian. This address can be used in the
4048 left-side instructions (lwl, swl, ldl, sdl).
4050 *RIGHT is a QImode reference to the opposite end of the field and
4051 can be used in the parterning right-side instruction. */
4054 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
4055 rtx *left, rtx *right)
4059 /* Check that the operand really is a MEM. Not all the extv and
4060 extzv predicates are checked. */
4064 /* Check that the size is valid. */
4065 if (width != 32 && (!TARGET_64BIT || width != 64))
4068 /* We can only access byte-aligned values. Since we are always passed
4069 a reference to the first byte of the field, it is not necessary to
4070 do anything with BITPOS after this check. */
4071 if (bitpos % BITS_PER_UNIT != 0)
4074 /* Reject aligned bitfields: we want to use a normal load or store
4075 instead of a left/right pair. */
4076 if (MEM_ALIGN (*op) >= width)
4079 /* Adjust *OP to refer to the whole field. This also has the effect
4080 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
4081 *op = adjust_address (*op, BLKmode, 0);
4082 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
4084 /* Get references to both ends of the field. We deliberately don't
4085 use the original QImode *OP for FIRST since the new BLKmode one
4086 might have a simpler address. */
4087 first = adjust_address (*op, QImode, 0);
4088 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
4090 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
4091 be the upper word and RIGHT the lower word. */
4092 if (TARGET_BIG_ENDIAN)
4093 *left = first, *right = last;
4095 *left = last, *right = first;
4101 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
4102 Return true on success. We only handle cases where zero_extract is
4103 equivalent to sign_extract. */
4106 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
4108 rtx left, right, temp;
4110 /* If TARGET_64BIT, the destination of a 32-bit load will be a
4111 paradoxical word_mode subreg. This is the only case in which
4112 we allow the destination to be larger than the source. */
4113 if (GET_CODE (dest) == SUBREG
4114 && GET_MODE (dest) == DImode
4115 && SUBREG_BYTE (dest) == 0
4116 && GET_MODE (SUBREG_REG (dest)) == SImode)
4117 dest = SUBREG_REG (dest);
4119 /* After the above adjustment, the destination must be the same
4120 width as the source. */
4121 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
4124 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
4127 temp = gen_reg_rtx (GET_MODE (dest));
4128 if (GET_MODE (dest) == DImode)
4130 emit_insn (gen_mov_ldl (temp, src, left));
4131 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
4135 emit_insn (gen_mov_lwl (temp, src, left));
4136 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
4142 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
4146 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
4150 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
4153 src = gen_lowpart (mode_for_size (width, MODE_INT, 0), src);
4155 if (GET_MODE (src) == DImode)
4157 emit_insn (gen_mov_sdl (dest, src, left));
4158 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
4162 emit_insn (gen_mov_swl (dest, src, left));
4163 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
4168 /* Set up globals to generate code for the ISA or processor
4169 described by INFO. */
4172 mips_set_architecture (const struct mips_cpu_info *info)
4176 mips_arch_info = info;
4177 mips_arch = info->cpu;
4178 mips_isa = info->isa;
4183 /* Likewise for tuning. */
4186 mips_set_tune (const struct mips_cpu_info *info)
4190 mips_tune_info = info;
4191 mips_tune = info->cpu;
4195 /* Implement TARGET_HANDLE_OPTION. */
4198 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
4203 if (strcmp (arg, "32") == 0)
4205 else if (strcmp (arg, "o64") == 0)
4207 else if (strcmp (arg, "n32") == 0)
4209 else if (strcmp (arg, "64") == 0)
4211 else if (strcmp (arg, "eabi") == 0)
4212 mips_abi = ABI_EABI;
4218 mips_arch_string = arg;
4219 return mips_parse_cpu (arg) != 0;
4222 mips_tune_string = arg;
4223 return mips_parse_cpu (arg) != 0;
4226 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
4227 return mips_isa_info != 0;
4229 case OPT_mflush_func_:
4230 mips_cache_flush_func = arg;
4233 case OPT_mno_flush_func:
4234 mips_cache_flush_func = NULL;
4242 /* Set up the threshold for data to go into the small data area, instead
4243 of the normal data area, and detect any conflicts in the switches. */
4246 override_options (void)
4248 int i, start, regno;
4249 enum machine_mode mode;
4251 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
4253 /* The following code determines the architecture and register size.
4254 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
4255 The GAS and GCC code should be kept in sync as much as possible. */
4257 if (mips_arch_string != 0)
4258 mips_set_architecture (mips_parse_cpu (mips_arch_string));
4260 if (mips_isa_info != 0)
4262 if (mips_arch_info == 0)
4263 mips_set_architecture (mips_isa_info);
4264 else if (mips_arch_info->isa != mips_isa_info->isa)
4265 error ("-%s conflicts with the other architecture options, "
4266 "which specify a %s processor",
4267 mips_isa_info->name,
4268 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
4271 if (mips_arch_info == 0)
4273 #ifdef MIPS_CPU_STRING_DEFAULT
4274 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
4276 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
4280 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
4281 error ("-march=%s is not compatible with the selected ABI",
4282 mips_arch_info->name);
4284 /* Optimize for mips_arch, unless -mtune selects a different processor. */
4285 if (mips_tune_string != 0)
4286 mips_set_tune (mips_parse_cpu (mips_tune_string));
4288 if (mips_tune_info == 0)
4289 mips_set_tune (mips_arch_info);
4291 if ((target_flags_explicit & MASK_64BIT) != 0)
4293 /* The user specified the size of the integer registers. Make sure
4294 it agrees with the ABI and ISA. */
4295 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
4296 error ("-mgp64 used with a 32-bit processor");
4297 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
4298 error ("-mgp32 used with a 64-bit ABI");
4299 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
4300 error ("-mgp64 used with a 32-bit ABI");
4304 /* Infer the integer register size from the ABI and processor.
4305 Restrict ourselves to 32-bit registers if that's all the
4306 processor has, or if the ABI cannot handle 64-bit registers. */
4307 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
4308 target_flags &= ~MASK_64BIT;
4310 target_flags |= MASK_64BIT;
4313 if ((target_flags_explicit & MASK_FLOAT64) != 0)
4315 /* Really, -mfp32 and -mfp64 are ornamental options. There's
4316 only one right answer here. */
4317 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
4318 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
4319 else if (!TARGET_64BIT && TARGET_FLOAT64)
4320 error ("unsupported combination: %s", "-mgp32 -mfp64");
4321 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
4322 error ("unsupported combination: %s", "-mfp64 -msingle-float");
4326 /* -msingle-float selects 32-bit float registers. Otherwise the
4327 float registers should be the same size as the integer ones. */
4328 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
4329 target_flags |= MASK_FLOAT64;
4331 target_flags &= ~MASK_FLOAT64;
4334 /* End of code shared with GAS. */
4336 if ((target_flags_explicit & MASK_LONG64) == 0)
4338 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
4339 target_flags |= MASK_LONG64;
4341 target_flags &= ~MASK_LONG64;
4344 if (MIPS_MARCH_CONTROLS_SOFT_FLOAT
4345 && (target_flags_explicit & MASK_SOFT_FLOAT) == 0)
4347 /* For some configurations, it is useful to have -march control
4348 the default setting of MASK_SOFT_FLOAT. */
4349 switch ((int) mips_arch)
4351 case PROCESSOR_R4100:
4352 case PROCESSOR_R4111:
4353 case PROCESSOR_R4120:
4354 case PROCESSOR_R4130:
4355 target_flags |= MASK_SOFT_FLOAT;
4359 target_flags &= ~MASK_SOFT_FLOAT;
4365 flag_pcc_struct_return = 0;
4367 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
4369 /* If neither -mbranch-likely nor -mno-branch-likely was given
4370 on the command line, set MASK_BRANCHLIKELY based on the target
4373 By default, we enable use of Branch Likely instructions on
4374 all architectures which support them with the following
4375 exceptions: when creating MIPS32 or MIPS64 code, and when
4376 tuning for architectures where their use tends to hurt
4379 The MIPS32 and MIPS64 architecture specifications say "Software
4380 is strongly encouraged to avoid use of Branch Likely
4381 instructions, as they will be removed from a future revision
4382 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
4383 issue those instructions unless instructed to do so by
4385 if (ISA_HAS_BRANCHLIKELY
4386 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
4387 && !(TUNE_MIPS5500 || TUNE_SB1))
4388 target_flags |= MASK_BRANCHLIKELY;
4390 target_flags &= ~MASK_BRANCHLIKELY;
4392 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
4393 warning (0, "generation of Branch Likely instructions enabled, but not supported by architecture");
4395 /* The effect of -mabicalls isn't defined for the EABI. */
4396 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
4398 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
4399 target_flags &= ~MASK_ABICALLS;
4402 /* -fpic (-KPIC) is the default when TARGET_ABICALLS is defined. We need
4403 to set flag_pic so that the LEGITIMATE_PIC_OPERAND_P macro will work. */
4404 /* ??? -non_shared turns off pic code generation, but this is not
4406 if (TARGET_ABICALLS)
4409 if (mips_section_threshold > 0)
4410 warning (0, "-G is incompatible with PIC code which is the default");
4413 /* mips_split_addresses is a half-way house between explicit
4414 relocations and the traditional assembler macros. It can
4415 split absolute 32-bit symbolic constants into a high/lo_sum
4416 pair but uses macros for other sorts of access.
4418 Like explicit relocation support for REL targets, it relies
4419 on GNU extensions in the assembler and the linker.
4421 Although this code should work for -O0, it has traditionally
4422 been treated as an optimization. */
4423 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
4424 && optimize && !flag_pic
4425 && !ABI_HAS_64BIT_SYMBOLS)
4426 mips_split_addresses = 1;
4428 mips_split_addresses = 0;
4430 /* -mvr4130-align is a "speed over size" optimization: it usually produces
4431 faster code, but at the expense of more nops. Enable it at -O3 and
4433 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
4434 target_flags |= MASK_VR4130_ALIGN;
4436 /* When compiling for the mips16, we cannot use floating point. We
4437 record the original hard float value in mips16_hard_float. */
4440 if (TARGET_SOFT_FLOAT)
4441 mips16_hard_float = 0;
4443 mips16_hard_float = 1;
4444 target_flags |= MASK_SOFT_FLOAT;
4446 /* Don't run the scheduler before reload, since it tends to
4447 increase register pressure. */
4448 flag_schedule_insns = 0;
4450 /* Don't do hot/cold partitioning. The constant layout code expects
4451 the whole function to be in a single section. */
4452 flag_reorder_blocks_and_partition = 0;
4454 /* Silently disable -mexplicit-relocs since it doesn't apply
4455 to mips16 code. Even so, it would overly pedantic to warn
4456 about "-mips16 -mexplicit-relocs", especially given that
4457 we use a %gprel() operator. */
4458 target_flags &= ~MASK_EXPLICIT_RELOCS;
4461 /* When using explicit relocs, we call dbr_schedule from within
4463 if (TARGET_EXPLICIT_RELOCS)
4465 mips_flag_delayed_branch = flag_delayed_branch;
4466 flag_delayed_branch = 0;
4469 #ifdef MIPS_TFMODE_FORMAT
4470 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
4473 /* Make sure that the user didn't turn off paired single support when
4474 MIPS-3D support is requested. */
4475 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
4476 && !TARGET_PAIRED_SINGLE_FLOAT)
4477 error ("-mips3d requires -mpaired-single");
4479 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
4481 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
4483 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
4484 and TARGET_HARD_FLOAT are both true. */
4485 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
4486 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
4488 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
4490 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
4491 error ("-mips3d/-mpaired-single must be used with -mips64");
4493 mips_print_operand_punct['?'] = 1;
4494 mips_print_operand_punct['#'] = 1;
4495 mips_print_operand_punct['/'] = 1;
4496 mips_print_operand_punct['&'] = 1;
4497 mips_print_operand_punct['!'] = 1;
4498 mips_print_operand_punct['*'] = 1;
4499 mips_print_operand_punct['@'] = 1;
4500 mips_print_operand_punct['.'] = 1;
4501 mips_print_operand_punct['('] = 1;
4502 mips_print_operand_punct[')'] = 1;
4503 mips_print_operand_punct['['] = 1;
4504 mips_print_operand_punct[']'] = 1;
4505 mips_print_operand_punct['<'] = 1;
4506 mips_print_operand_punct['>'] = 1;
4507 mips_print_operand_punct['{'] = 1;
4508 mips_print_operand_punct['}'] = 1;
4509 mips_print_operand_punct['^'] = 1;
4510 mips_print_operand_punct['$'] = 1;
4511 mips_print_operand_punct['+'] = 1;
4512 mips_print_operand_punct['~'] = 1;
4514 mips_char_to_class['d'] = TARGET_MIPS16 ? M16_REGS : GR_REGS;
4515 mips_char_to_class['t'] = T_REG;
4516 mips_char_to_class['f'] = (TARGET_HARD_FLOAT ? FP_REGS : NO_REGS);
4517 mips_char_to_class['h'] = HI_REG;
4518 mips_char_to_class['l'] = LO_REG;
4519 mips_char_to_class['x'] = MD_REGS;
4520 mips_char_to_class['b'] = ALL_REGS;
4521 mips_char_to_class['c'] = (TARGET_ABICALLS ? PIC_FN_ADDR_REG :
4522 TARGET_MIPS16 ? M16_NA_REGS :
4524 mips_char_to_class['e'] = LEA_REGS;
4525 mips_char_to_class['j'] = PIC_FN_ADDR_REG;
4526 mips_char_to_class['v'] = V1_REG;
4527 mips_char_to_class['y'] = GR_REGS;
4528 mips_char_to_class['z'] = ST_REGS;
4529 mips_char_to_class['B'] = COP0_REGS;
4530 mips_char_to_class['C'] = COP2_REGS;
4531 mips_char_to_class['D'] = COP3_REGS;
4533 /* Set up array to map GCC register number to debug register number.
4534 Ignore the special purpose register numbers. */
4536 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4537 mips_dbx_regno[i] = -1;
4539 start = GP_DBX_FIRST - GP_REG_FIRST;
4540 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
4541 mips_dbx_regno[i] = i + start;
4543 start = FP_DBX_FIRST - FP_REG_FIRST;
4544 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
4545 mips_dbx_regno[i] = i + start;
4547 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
4548 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
4550 /* Set up array giving whether a given register can hold a given mode. */
4552 for (mode = VOIDmode;
4553 mode != MAX_MACHINE_MODE;
4554 mode = (enum machine_mode) ((int)mode + 1))
4556 register int size = GET_MODE_SIZE (mode);
4557 register enum mode_class class = GET_MODE_CLASS (mode);
4559 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4563 if (mode == CCV2mode)
4566 && (regno - ST_REG_FIRST) % 2 == 0);
4568 else if (mode == CCV4mode)
4571 && (regno - ST_REG_FIRST) % 4 == 0);
4573 else if (mode == CCmode)
4576 temp = (regno == FPSW_REGNUM);
4578 temp = (ST_REG_P (regno) || GP_REG_P (regno)
4579 || FP_REG_P (regno));
4582 else if (GP_REG_P (regno))
4583 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
4585 else if (FP_REG_P (regno))
4586 temp = ((regno % FP_INC) == 0)
4587 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
4588 || class == MODE_VECTOR_FLOAT)
4589 && size <= UNITS_PER_FPVALUE)
4590 /* Allow integer modes that fit into a single
4591 register. We need to put integers into FPRs
4592 when using instructions like cvt and trunc. */
4593 || (class == MODE_INT && size <= UNITS_PER_FPREG)
4594 /* Allow TFmode for CCmode reloads. */
4595 || (ISA_HAS_8CC && mode == TFmode));
4597 else if (MD_REG_P (regno))
4598 temp = (INTEGRAL_MODE_P (mode)
4599 && (size <= UNITS_PER_WORD
4600 || (regno == MD_REG_FIRST
4601 && size == 2 * UNITS_PER_WORD)));
4603 else if (ALL_COP_REG_P (regno))
4604 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
4608 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
4612 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
4613 initialized yet, so we can't use that here. */
4614 gpr_mode = TARGET_64BIT ? DImode : SImode;
4616 /* Provide default values for align_* for 64-bit targets. */
4617 if (TARGET_64BIT && !TARGET_MIPS16)
4619 if (align_loops == 0)
4621 if (align_jumps == 0)
4623 if (align_functions == 0)
4624 align_functions = 8;
4627 /* Function to allocate machine-dependent function status. */
4628 init_machine_status = &mips_init_machine_status;
4630 if (ABI_HAS_64BIT_SYMBOLS)
4632 if (TARGET_EXPLICIT_RELOCS)
4634 mips_split_p[SYMBOL_64_HIGH] = true;
4635 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
4636 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
4638 mips_split_p[SYMBOL_64_MID] = true;
4639 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
4640 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
4642 mips_split_p[SYMBOL_64_LOW] = true;
4643 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
4644 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
4646 mips_split_p[SYMBOL_GENERAL] = true;
4647 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
4652 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses)
4654 mips_split_p[SYMBOL_GENERAL] = true;
4655 mips_hi_relocs[SYMBOL_GENERAL] = "%hi(";
4656 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
4662 /* The high part is provided by a pseudo copy of $gp. */
4663 mips_split_p[SYMBOL_SMALL_DATA] = true;
4664 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gprel(";
4667 if (TARGET_EXPLICIT_RELOCS)
4669 /* Small data constants are kept whole until after reload,
4670 then lowered by mips_rewrite_small_data. */
4671 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gp_rel(";
4673 mips_split_p[SYMBOL_GOT_LOCAL] = true;
4676 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
4677 mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%got_ofst(";
4681 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
4682 mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%lo(";
4687 /* The HIGH and LO_SUM are matched by special .md patterns. */
4688 mips_split_p[SYMBOL_GOT_GLOBAL] = true;
4690 mips_split_p[SYMBOL_GOTOFF_GLOBAL] = true;
4691 mips_hi_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_hi(";
4692 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_lo(";
4694 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
4695 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
4696 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
4701 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_disp(";
4703 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got(";
4704 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
4710 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
4711 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
4712 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
4715 /* Thread-local relocation operators. */
4716 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
4717 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
4718 mips_split_p[SYMBOL_DTPREL] = 1;
4719 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
4720 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
4721 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
4722 mips_split_p[SYMBOL_TPREL] = 1;
4723 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
4724 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
4726 /* We don't have a thread pointer access instruction on MIPS16, or
4727 appropriate TLS relocations. */
4729 targetm.have_tls = false;
4731 /* Default to working around R4000 errata only if the processor
4732 was selected explicitly. */
4733 if ((target_flags_explicit & MASK_FIX_R4000) == 0
4734 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
4735 target_flags |= MASK_FIX_R4000;
4737 /* Default to working around R4400 errata only if the processor
4738 was selected explicitly. */
4739 if ((target_flags_explicit & MASK_FIX_R4400) == 0
4740 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
4741 target_flags |= MASK_FIX_R4400;
4744 /* Implement CONDITIONAL_REGISTER_USAGE. */
4747 mips_conditional_register_usage (void)
4749 if (!TARGET_HARD_FLOAT)
4753 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
4754 fixed_regs[regno] = call_used_regs[regno] = 1;
4755 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
4756 fixed_regs[regno] = call_used_regs[regno] = 1;
4758 else if (! ISA_HAS_8CC)
4762 /* We only have a single condition code register. We
4763 implement this by hiding all the condition code registers,
4764 and generating RTL that refers directly to ST_REG_FIRST. */
4765 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
4766 fixed_regs[regno] = call_used_regs[regno] = 1;
4768 /* In mips16 mode, we permit the $t temporary registers to be used
4769 for reload. We prohibit the unused $s registers, since they
4770 are caller saved, and saving them via a mips16 register would
4771 probably waste more time than just reloading the value. */
4774 fixed_regs[18] = call_used_regs[18] = 1;
4775 fixed_regs[19] = call_used_regs[19] = 1;
4776 fixed_regs[20] = call_used_regs[20] = 1;
4777 fixed_regs[21] = call_used_regs[21] = 1;
4778 fixed_regs[22] = call_used_regs[22] = 1;
4779 fixed_regs[23] = call_used_regs[23] = 1;
4780 fixed_regs[26] = call_used_regs[26] = 1;
4781 fixed_regs[27] = call_used_regs[27] = 1;
4782 fixed_regs[30] = call_used_regs[30] = 1;
4784 /* fp20-23 are now caller saved. */
4785 if (mips_abi == ABI_64)
4788 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
4789 call_really_used_regs[regno] = call_used_regs[regno] = 1;
4791 /* Odd registers from fp21 to fp31 are now caller saved. */
4792 if (mips_abi == ABI_N32)
4795 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
4796 call_really_used_regs[regno] = call_used_regs[regno] = 1;
4800 /* Allocate a chunk of memory for per-function machine-dependent data. */
4801 static struct machine_function *
4802 mips_init_machine_status (void)
4804 return ((struct machine_function *)
4805 ggc_alloc_cleared (sizeof (struct machine_function)));
4808 /* On the mips16, we want to allocate $24 (T_REG) before other
4809 registers for instructions for which it is possible. This helps
4810 avoid shuffling registers around in order to set up for an xor,
4811 encouraging the compiler to use a cmp instead. */
4814 mips_order_regs_for_local_alloc (void)
4818 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4819 reg_alloc_order[i] = i;
4823 /* It really doesn't matter where we put register 0, since it is
4824 a fixed register anyhow. */
4825 reg_alloc_order[0] = 24;
4826 reg_alloc_order[24] = 0;
4831 /* The MIPS debug format wants all automatic variables and arguments
4832 to be in terms of the virtual frame pointer (stack pointer before
4833 any adjustment in the function), while the MIPS 3.0 linker wants
4834 the frame pointer to be the stack pointer after the initial
4835 adjustment. So, we do the adjustment here. The arg pointer (which
4836 is eliminated) points to the virtual frame pointer, while the frame
4837 pointer (which may be eliminated) points to the stack pointer after
4838 the initial adjustments. */
4841 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
4843 rtx offset2 = const0_rtx;
4844 rtx reg = eliminate_constant_term (addr, &offset2);
4847 offset = INTVAL (offset2);
4849 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
4850 || reg == hard_frame_pointer_rtx)
4852 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
4853 ? compute_frame_size (get_frame_size ())
4854 : cfun->machine->frame.total_size;
4856 /* MIPS16 frame is smaller */
4857 if (frame_pointer_needed && TARGET_MIPS16)
4858 frame_size -= cfun->machine->frame.args_size;
4860 offset = offset - frame_size;
4863 /* sdbout_parms does not want this to crash for unrecognized cases. */
4865 else if (reg != arg_pointer_rtx)
4866 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
4873 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
4875 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
4876 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
4877 'h' OP is HIGH, prints %hi(X),
4878 'd' output integer constant in decimal,
4879 'z' if the operand is 0, use $0 instead of normal operand.
4880 'D' print second part of double-word register or memory operand.
4881 'L' print low-order register of double-word register operand.
4882 'M' print high-order register of double-word register operand.
4883 'C' print part of opcode for a branch condition.
4884 'F' print part of opcode for a floating-point branch condition.
4885 'N' print part of opcode for a branch condition, inverted.
4886 'W' print part of opcode for a floating-point branch condition, inverted.
4887 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
4888 'z' for (eq:?I ...), 'n' for (ne:?I ...).
4889 't' like 'T', but with the EQ/NE cases reversed
4890 'Y' for a CONST_INT X, print mips_fp_conditions[X]
4891 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
4892 'R' print the reloc associated with LO_SUM
4894 The punctuation characters are:
4896 '(' Turn on .set noreorder
4897 ')' Turn on .set reorder
4898 '[' Turn on .set noat
4900 '<' Turn on .set nomacro
4901 '>' Turn on .set macro
4902 '{' Turn on .set volatile (not GAS)
4903 '}' Turn on .set novolatile (not GAS)
4904 '&' Turn on .set noreorder if filling delay slots
4905 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
4906 '!' Turn on .set nomacro if filling delay slots
4907 '#' Print nop if in a .set noreorder section.
4908 '/' Like '#', but does nothing within a delayed branch sequence
4909 '?' Print 'l' if we are to use a branch likely instead of normal branch.
4910 '@' Print the name of the assembler temporary register (at or $1).
4911 '.' Print the name of the register with a hard-wired zero (zero or $0).
4912 '^' Print the name of the pic call-through register (t9 or $25).
4913 '$' Print the name of the stack pointer register (sp or $29).
4914 '+' Print the name of the gp register (usually gp or $28).
4915 '~' Output a branch alignment to LABEL_ALIGN(NULL). */
4918 print_operand (FILE *file, rtx op, int letter)
4920 register enum rtx_code code;
4922 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
4927 if (mips_branch_likely)
4932 fputs (reg_names [GP_REG_FIRST + 1], file);
4936 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
4940 fputs (reg_names [GP_REG_FIRST + 0], file);
4944 fputs (reg_names[STACK_POINTER_REGNUM], file);
4948 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
4952 if (final_sequence != 0 && set_noreorder++ == 0)
4953 fputs (".set\tnoreorder\n\t", file);
4957 if (final_sequence != 0)
4959 if (set_noreorder++ == 0)
4960 fputs (".set\tnoreorder\n\t", file);
4962 if (set_nomacro++ == 0)
4963 fputs (".set\tnomacro\n\t", file);
4968 if (final_sequence != 0 && set_nomacro++ == 0)
4969 fputs ("\n\t.set\tnomacro", file);
4973 if (set_noreorder != 0)
4974 fputs ("\n\tnop", file);
4978 /* Print an extra newline so that the delayed insn is separated
4979 from the following ones. This looks neater and is consistent
4980 with non-nop delayed sequences. */
4981 if (set_noreorder != 0 && final_sequence == 0)
4982 fputs ("\n\tnop\n", file);
4986 if (set_noreorder++ == 0)
4987 fputs (".set\tnoreorder\n\t", file);
4991 if (set_noreorder == 0)
4992 error ("internal error: %%) found without a %%( in assembler pattern");
4994 else if (--set_noreorder == 0)
4995 fputs ("\n\t.set\treorder", file);
5000 if (set_noat++ == 0)
5001 fputs (".set\tnoat\n\t", file);
5006 error ("internal error: %%] found without a %%[ in assembler pattern");
5007 else if (--set_noat == 0)
5008 fputs ("\n\t.set\tat", file);
5013 if (set_nomacro++ == 0)
5014 fputs (".set\tnomacro\n\t", file);
5018 if (set_nomacro == 0)
5019 error ("internal error: %%> found without a %%< in assembler pattern");
5020 else if (--set_nomacro == 0)
5021 fputs ("\n\t.set\tmacro", file);
5026 if (set_volatile++ == 0)
5027 fputs ("#.set\tvolatile\n\t", file);
5031 if (set_volatile == 0)
5032 error ("internal error: %%} found without a %%{ in assembler pattern");
5033 else if (--set_volatile == 0)
5034 fputs ("\n\t#.set\tnovolatile", file);
5040 if (align_labels_log > 0)
5041 ASM_OUTPUT_ALIGN (file, align_labels_log);
5046 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
5055 error ("PRINT_OPERAND null pointer");
5059 code = GET_CODE (op);
5064 case EQ: fputs ("eq", file); break;
5065 case NE: fputs ("ne", file); break;
5066 case GT: fputs ("gt", file); break;
5067 case GE: fputs ("ge", file); break;
5068 case LT: fputs ("lt", file); break;
5069 case LE: fputs ("le", file); break;
5070 case GTU: fputs ("gtu", file); break;
5071 case GEU: fputs ("geu", file); break;
5072 case LTU: fputs ("ltu", file); break;
5073 case LEU: fputs ("leu", file); break;
5075 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
5078 else if (letter == 'N')
5081 case EQ: fputs ("ne", file); break;
5082 case NE: fputs ("eq", file); break;
5083 case GT: fputs ("le", file); break;
5084 case GE: fputs ("lt", file); break;
5085 case LT: fputs ("ge", file); break;
5086 case LE: fputs ("gt", file); break;
5087 case GTU: fputs ("leu", file); break;
5088 case GEU: fputs ("ltu", file); break;
5089 case LTU: fputs ("geu", file); break;
5090 case LEU: fputs ("gtu", file); break;
5092 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
5095 else if (letter == 'F')
5098 case EQ: fputs ("c1f", file); break;
5099 case NE: fputs ("c1t", file); break;
5101 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
5104 else if (letter == 'W')
5107 case EQ: fputs ("c1t", file); break;
5108 case NE: fputs ("c1f", file); break;
5110 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
5113 else if (letter == 'h')
5115 if (GET_CODE (op) == HIGH)
5118 print_operand_reloc (file, op, mips_hi_relocs);
5121 else if (letter == 'R')
5122 print_operand_reloc (file, op, mips_lo_relocs);
5124 else if (letter == 'Y')
5126 if (GET_CODE (op) == CONST_INT
5127 && ((unsigned HOST_WIDE_INT) INTVAL (op)
5128 < ARRAY_SIZE (mips_fp_conditions)))
5129 fputs (mips_fp_conditions[INTVAL (op)], file);
5131 output_operand_lossage ("invalid %%Y value");
5134 else if (letter == 'Z')
5138 print_operand (file, op, 0);
5143 else if (code == REG || code == SUBREG)
5145 register int regnum;
5148 regnum = REGNO (op);
5150 regnum = true_regnum (op);
5152 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
5153 || (letter == 'L' && WORDS_BIG_ENDIAN)
5157 fprintf (file, "%s", reg_names[regnum]);
5160 else if (code == MEM)
5163 output_address (plus_constant (XEXP (op, 0), 4));
5165 output_address (XEXP (op, 0));
5168 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
5169 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
5171 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
5172 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
5174 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
5175 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
5177 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
5178 fputs (reg_names[GP_REG_FIRST], file);
5180 else if (letter == 'd' || letter == 'x' || letter == 'X')
5181 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
5183 else if (letter == 'T' || letter == 't')
5185 int truth = (code == NE) == (letter == 'T');
5186 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
5189 else if (CONST_GP_P (op))
5190 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
5193 output_addr_const (file, op);
5197 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM.
5198 RELOCS is the array of relocations to use. */
5201 print_operand_reloc (FILE *file, rtx op, const char **relocs)
5203 enum mips_symbol_type symbol_type;
5206 HOST_WIDE_INT offset;
5208 if (!mips_symbolic_constant_p (op, &symbol_type) || relocs[symbol_type] == 0)
5209 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
5211 /* If OP uses an UNSPEC address, we want to print the inner symbol. */
5212 mips_split_const (op, &base, &offset);
5213 if (UNSPEC_ADDRESS_P (base))
5214 op = plus_constant (UNSPEC_ADDRESS (base), offset);
5216 fputs (relocs[symbol_type], file);
5217 output_addr_const (file, op);
5218 for (p = relocs[symbol_type]; *p != 0; p++)
5223 /* Output address operand X to FILE. */
5226 print_operand_address (FILE *file, rtx x)
5228 struct mips_address_info addr;
5230 if (mips_classify_address (&addr, x, word_mode, true))
5234 print_operand (file, addr.offset, 0);
5235 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
5238 case ADDRESS_LO_SUM:
5239 print_operand (file, addr.offset, 'R');
5240 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
5243 case ADDRESS_CONST_INT:
5244 output_addr_const (file, x);
5245 fprintf (file, "(%s)", reg_names[0]);
5248 case ADDRESS_SYMBOLIC:
5249 output_addr_const (file, x);
5255 /* When using assembler macros, keep track of all of small-data externs
5256 so that mips_file_end can emit the appropriate declarations for them.
5258 In most cases it would be safe (though pointless) to emit .externs
5259 for other symbols too. One exception is when an object is within
5260 the -G limit but declared by the user to be in a section other
5261 than .sbss or .sdata. */
5264 mips_output_external (FILE *file ATTRIBUTE_UNUSED, tree decl, const char *name)
5266 register struct extern_list *p;
5268 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
5270 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5271 p->next = extern_head;
5273 p->size = int_size_in_bytes (TREE_TYPE (decl));
5277 if (TARGET_IRIX && mips_abi == ABI_32 && TREE_CODE (decl) == FUNCTION_DECL)
5279 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5280 p->next = extern_head;
5291 irix_output_external_libcall (rtx fun)
5293 register struct extern_list *p;
5295 if (mips_abi == ABI_32)
5297 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5298 p->next = extern_head;
5299 p->name = XSTR (fun, 0);
5306 /* Emit a new filename to a stream. If we are smuggling stabs, try to
5307 put out a MIPS ECOFF file and a stab. */
5310 mips_output_filename (FILE *stream, const char *name)
5313 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
5315 if (write_symbols == DWARF2_DEBUG)
5317 else if (mips_output_filename_first_time)
5319 mips_output_filename_first_time = 0;
5320 num_source_filenames += 1;
5321 current_function_file = name;
5322 fprintf (stream, "\t.file\t%d ", num_source_filenames);
5323 output_quoted_string (stream, name);
5324 putc ('\n', stream);
5327 /* If we are emitting stabs, let dbxout.c handle this (except for
5328 the mips_output_filename_first_time case). */
5329 else if (write_symbols == DBX_DEBUG)
5332 else if (name != current_function_file
5333 && strcmp (name, current_function_file) != 0)
5335 num_source_filenames += 1;
5336 current_function_file = name;
5337 fprintf (stream, "\t.file\t%d ", num_source_filenames);
5338 output_quoted_string (stream, name);
5339 putc ('\n', stream);
5343 /* Output an ASCII string, in a space-saving way. PREFIX is the string
5344 that should be written before the opening quote, such as "\t.ascii\t"
5345 for real string data or "\t# " for a comment. */
5348 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
5353 register const unsigned char *string =
5354 (const unsigned char *)string_param;
5356 fprintf (stream, "%s\"", prefix);
5357 for (i = 0; i < len; i++)
5359 register int c = string[i];
5363 if (c == '\\' || c == '\"')
5365 putc ('\\', stream);
5373 fprintf (stream, "\\%03o", c);
5377 if (cur_pos > 72 && i+1 < len)
5380 fprintf (stream, "\"\n%s\"", prefix);
5383 fprintf (stream, "\"\n");
5386 /* Implement TARGET_ASM_FILE_START. */
5389 mips_file_start (void)
5391 default_file_start ();
5395 /* Generate a special section to describe the ABI switches used to
5396 produce the resultant binary. This used to be done by the assembler
5397 setting bits in the ELF header's flags field, but we have run out of
5398 bits. GDB needs this information in order to be able to correctly
5399 debug these binaries. See the function mips_gdbarch_init() in
5400 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
5401 causes unnecessary IRIX 6 ld warnings. */
5402 const char * abi_string = NULL;
5406 case ABI_32: abi_string = "abi32"; break;
5407 case ABI_N32: abi_string = "abiN32"; break;
5408 case ABI_64: abi_string = "abi64"; break;
5409 case ABI_O64: abi_string = "abiO64"; break;
5410 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
5414 /* Note - we use fprintf directly rather than called named_section()
5415 because in this way we can avoid creating an allocated section. We
5416 do not want this section to take up any space in the running
5418 fprintf (asm_out_file, "\t.section .mdebug.%s\n", abi_string);
5420 /* There is no ELF header flag to distinguish long32 forms of the
5421 EABI from long64 forms. Emit a special section to help tools
5423 if (mips_abi == ABI_EABI)
5424 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n",
5425 TARGET_LONG64 ? 64 : 32);
5427 /* Restore the default section. */
5428 fprintf (asm_out_file, "\t.previous\n");
5431 /* Generate the pseudo ops that System V.4 wants. */
5432 if (TARGET_ABICALLS)
5433 /* ??? but do not want this (or want pic0) if -non-shared? */
5434 fprintf (asm_out_file, "\t.abicalls\n");
5437 fprintf (asm_out_file, "\t.set\tmips16\n");
5439 if (flag_verbose_asm)
5440 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
5442 mips_section_threshold, mips_arch_info->name, mips_isa);
5445 #ifdef BSS_SECTION_ASM_OP
5446 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
5447 in the use of sbss. */
5450 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
5451 unsigned HOST_WIDE_INT size, int align)
5453 extern tree last_assemble_variable_decl;
5455 if (mips_in_small_data_p (decl))
5456 named_section (0, ".sbss", 0);
5459 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
5460 last_assemble_variable_decl = decl;
5461 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
5462 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
5466 /* Implement TARGET_ASM_FILE_END. When using assembler macros, emit
5467 .externs for any small-data variables that turned out to be external. */
5470 mips_file_end (void)
5473 struct extern_list *p;
5477 fputs ("\n", asm_out_file);
5479 for (p = extern_head; p != 0; p = p->next)
5481 name_tree = get_identifier (p->name);
5483 /* Positively ensure only one .extern for any given symbol. */
5484 if (!TREE_ASM_WRITTEN (name_tree)
5485 && TREE_SYMBOL_REFERENCED (name_tree))
5487 TREE_ASM_WRITTEN (name_tree) = 1;
5488 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
5489 `.global name .text' directive for every used but
5490 undefined function. If we don't, the linker may perform
5491 an optimization (skipping over the insns that set $gp)
5492 when it is unsafe. */
5493 if (TARGET_IRIX && mips_abi == ABI_32 && p->size == -1)
5495 fputs ("\t.globl ", asm_out_file);
5496 assemble_name (asm_out_file, p->name);
5497 fputs (" .text\n", asm_out_file);
5501 fputs ("\t.extern\t", asm_out_file);
5502 assemble_name (asm_out_file, p->name);
5503 fprintf (asm_out_file, ", %d\n", p->size);
5510 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
5511 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
5514 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
5515 unsigned HOST_WIDE_INT size,
5518 /* If the target wants uninitialized const declarations in
5519 .rdata then don't put them in .comm. */
5520 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
5521 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
5522 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
5524 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
5525 targetm.asm_out.globalize_label (stream, name);
5527 readonly_data_section ();
5528 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
5529 mips_declare_object (stream, name, "",
5530 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
5534 mips_declare_common_object (stream, name, "\n\t.comm\t",
5538 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
5539 NAME is the name of the object and ALIGN is the required alignment
5540 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
5541 alignment argument. */
5544 mips_declare_common_object (FILE *stream, const char *name,
5545 const char *init_string,
5546 unsigned HOST_WIDE_INT size,
5547 unsigned int align, bool takes_alignment_p)
5549 if (!takes_alignment_p)
5551 size += (align / BITS_PER_UNIT) - 1;
5552 size -= size % (align / BITS_PER_UNIT);
5553 mips_declare_object (stream, name, init_string,
5554 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
5557 mips_declare_object (stream, name, init_string,
5558 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
5559 size, align / BITS_PER_UNIT);
5562 /* Emit either a label, .comm, or .lcomm directive. When using assembler
5563 macros, mark the symbol as written so that mips_file_end won't emit an
5564 .extern for it. STREAM is the output file, NAME is the name of the
5565 symbol, INIT_STRING is the string that should be written before the
5566 symbol and FINAL_STRING is the string that should be written after it.
5567 FINAL_STRING is a printf() format that consumes the remaining arguments. */
5570 mips_declare_object (FILE *stream, const char *name, const char *init_string,
5571 const char *final_string, ...)
5575 fputs (init_string, stream);
5576 assemble_name (stream, name);
5577 va_start (ap, final_string);
5578 vfprintf (stream, final_string, ap);
5581 if (!TARGET_EXPLICIT_RELOCS)
5583 tree name_tree = get_identifier (name);
5584 TREE_ASM_WRITTEN (name_tree) = 1;
5588 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
5589 extern int size_directive_output;
5591 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
5592 definitions except that it uses mips_declare_object() to emit the label. */
5595 mips_declare_object_name (FILE *stream, const char *name,
5596 tree decl ATTRIBUTE_UNUSED)
5598 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5599 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
5602 size_directive_output = 0;
5603 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
5607 size_directive_output = 1;
5608 size = int_size_in_bytes (TREE_TYPE (decl));
5609 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
5612 mips_declare_object (stream, name, "", ":\n", 0);
5615 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
5618 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
5622 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
5623 if (!flag_inhibit_size_directive
5624 && DECL_SIZE (decl) != 0
5625 && !at_end && top_level
5626 && DECL_INITIAL (decl) == error_mark_node
5627 && !size_directive_output)
5631 size_directive_output = 1;
5632 size = int_size_in_bytes (TREE_TYPE (decl));
5633 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
5638 /* Return true if X is a small data address that can be rewritten
5642 mips_rewrite_small_data_p (rtx x)
5644 enum mips_symbol_type symbol_type;
5646 return (TARGET_EXPLICIT_RELOCS
5647 && mips_symbolic_constant_p (x, &symbol_type)
5648 && symbol_type == SYMBOL_SMALL_DATA);
5652 /* A for_each_rtx callback for mips_small_data_pattern_p. */
5655 mips_small_data_pattern_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
5657 if (GET_CODE (*loc) == LO_SUM)
5660 return mips_rewrite_small_data_p (*loc);
5663 /* Return true if OP refers to small data symbols directly, not through
5667 mips_small_data_pattern_p (rtx op)
5669 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
5672 /* A for_each_rtx callback, used by mips_rewrite_small_data. */
5675 mips_rewrite_small_data_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
5677 if (mips_rewrite_small_data_p (*loc))
5678 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
5680 if (GET_CODE (*loc) == LO_SUM)
5686 /* If possible, rewrite OP so that it refers to small data using
5687 explicit relocations. */
5690 mips_rewrite_small_data (rtx op)
5692 op = copy_insn (op);
5693 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
5697 /* Return true if the current function has an insn that implicitly
5701 mips_function_has_gp_insn (void)
5703 /* Don't bother rechecking if we found one last time. */
5704 if (!cfun->machine->has_gp_insn_p)
5708 push_topmost_sequence ();
5709 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
5711 && GET_CODE (PATTERN (insn)) != USE
5712 && GET_CODE (PATTERN (insn)) != CLOBBER
5713 && (get_attr_got (insn) != GOT_UNSET
5714 || small_data_pattern (PATTERN (insn), VOIDmode)))
5716 pop_topmost_sequence ();
5718 cfun->machine->has_gp_insn_p = (insn != 0);
5720 return cfun->machine->has_gp_insn_p;
5724 /* Return the register that should be used as the global pointer
5725 within this function. Return 0 if the function doesn't need
5726 a global pointer. */
5729 mips_global_pointer (void)
5733 /* $gp is always available in non-abicalls code. */
5734 if (!TARGET_ABICALLS)
5735 return GLOBAL_POINTER_REGNUM;
5737 /* We must always provide $gp when it is used implicitly. */
5738 if (!TARGET_EXPLICIT_RELOCS)
5739 return GLOBAL_POINTER_REGNUM;
5741 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
5743 if (current_function_profile)
5744 return GLOBAL_POINTER_REGNUM;
5746 /* If the function has a nonlocal goto, $gp must hold the correct
5747 global pointer for the target function. */
5748 if (current_function_has_nonlocal_goto)
5749 return GLOBAL_POINTER_REGNUM;
5751 /* If the gp is never referenced, there's no need to initialize it.
5752 Note that reload can sometimes introduce constant pool references
5753 into a function that otherwise didn't need them. For example,
5754 suppose we have an instruction like:
5756 (set (reg:DF R1) (float:DF (reg:SI R2)))
5758 If R2 turns out to be constant such as 1, the instruction may have a
5759 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
5760 using this constant if R2 doesn't get allocated to a register.
5762 In cases like these, reload will have added the constant to the pool
5763 but no instruction will yet refer to it. */
5764 if (!regs_ever_live[GLOBAL_POINTER_REGNUM]
5765 && !current_function_uses_const_pool
5766 && !mips_function_has_gp_insn ())
5769 /* We need a global pointer, but perhaps we can use a call-clobbered
5770 register instead of $gp. */
5771 if (TARGET_NEWABI && current_function_is_leaf)
5772 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
5773 if (!regs_ever_live[regno]
5774 && call_used_regs[regno]
5775 && !fixed_regs[regno]
5776 && regno != PIC_FUNCTION_ADDR_REGNUM)
5779 return GLOBAL_POINTER_REGNUM;
5783 /* Return true if the current function must save REGNO. */
5786 mips_save_reg_p (unsigned int regno)
5788 /* We only need to save $gp for NewABI PIC. */
5789 if (regno == GLOBAL_POINTER_REGNUM)
5790 return (TARGET_ABICALLS && TARGET_NEWABI
5791 && cfun->machine->global_pointer == regno);
5793 /* Check call-saved registers. */
5794 if (regs_ever_live[regno] && !call_used_regs[regno])
5797 /* We need to save the old frame pointer before setting up a new one. */
5798 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
5801 /* We need to save the incoming return address if it is ever clobbered
5802 within the function. */
5803 if (regno == GP_REG_FIRST + 31 && regs_ever_live[regno])
5810 return_type = DECL_RESULT (current_function_decl);
5812 /* $18 is a special case in mips16 code. It may be used to call
5813 a function which returns a floating point value, but it is
5814 marked in call_used_regs. */
5815 if (regno == GP_REG_FIRST + 18 && regs_ever_live[regno])
5818 /* $31 is also a special case. It will be used to copy a return
5819 value into the floating point registers if the return value is
5821 if (regno == GP_REG_FIRST + 31
5822 && mips16_hard_float
5823 && !aggregate_value_p (return_type, current_function_decl)
5824 && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
5825 && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
5833 /* Return the bytes needed to compute the frame pointer from the current
5834 stack pointer. SIZE is the size (in bytes) of the local variables.
5836 Mips stack frames look like:
5838 Before call After call
5839 +-----------------------+ +-----------------------+
5842 | caller's temps. | | caller's temps. |
5844 +-----------------------+ +-----------------------+
5846 | arguments on stack. | | arguments on stack. |
5848 +-----------------------+ +-----------------------+
5849 | 4 words to save | | 4 words to save |
5850 | arguments passed | | arguments passed |
5851 | in registers, even | | in registers, even |
5852 SP->| if not passed. | VFP->| if not passed. |
5853 +-----------------------+ +-----------------------+
5855 | fp register save |
5857 +-----------------------+
5859 | gp register save |
5861 +-----------------------+
5865 +-----------------------+
5867 | alloca allocations |
5869 +-----------------------+
5871 | GP save for V.4 abi |
5873 +-----------------------+
5875 | arguments on stack |
5877 +-----------------------+
5879 | arguments passed |
5880 | in registers, even |
5881 low SP->| if not passed. |
5882 memory +-----------------------+
5887 compute_frame_size (HOST_WIDE_INT size)
5890 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
5891 HOST_WIDE_INT var_size; /* # bytes that variables take up */
5892 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
5893 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
5894 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
5895 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
5896 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
5897 unsigned int mask; /* mask of saved gp registers */
5898 unsigned int fmask; /* mask of saved fp registers */
5900 cfun->machine->global_pointer = mips_global_pointer ();
5906 var_size = MIPS_STACK_ALIGN (size);
5907 args_size = current_function_outgoing_args_size;
5908 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
5910 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
5911 functions. If the function has local variables, we're committed
5912 to allocating it anyway. Otherwise reclaim it here. */
5913 if (var_size == 0 && current_function_is_leaf)
5914 cprestore_size = args_size = 0;
5916 /* The MIPS 3.0 linker does not like functions that dynamically
5917 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
5918 looks like we are trying to create a second frame pointer to the
5919 function, so allocate some stack space to make it happy. */
5921 if (args_size == 0 && current_function_calls_alloca)
5922 args_size = 4 * UNITS_PER_WORD;
5924 total_size = var_size + args_size + cprestore_size;
5926 /* Calculate space needed for gp registers. */
5927 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
5928 if (mips_save_reg_p (regno))
5930 gp_reg_size += GET_MODE_SIZE (gpr_mode);
5931 mask |= 1 << (regno - GP_REG_FIRST);
5934 /* We need to restore these for the handler. */
5935 if (current_function_calls_eh_return)
5940 regno = EH_RETURN_DATA_REGNO (i);
5941 if (regno == INVALID_REGNUM)
5943 gp_reg_size += GET_MODE_SIZE (gpr_mode);
5944 mask |= 1 << (regno - GP_REG_FIRST);
5948 /* This loop must iterate over the same space as its companion in
5949 save_restore_insns. */
5950 for (regno = (FP_REG_LAST - FP_INC + 1);
5951 regno >= FP_REG_FIRST;
5954 if (mips_save_reg_p (regno))
5956 fp_reg_size += FP_INC * UNITS_PER_FPREG;
5957 fmask |= ((1 << FP_INC) - 1) << (regno - FP_REG_FIRST);
5961 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
5962 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
5964 /* Add in space reserved on the stack by the callee for storing arguments
5965 passed in registers. */
5967 total_size += MIPS_STACK_ALIGN (current_function_pretend_args_size);
5969 /* Save other computed information. */
5970 cfun->machine->frame.total_size = total_size;
5971 cfun->machine->frame.var_size = var_size;
5972 cfun->machine->frame.args_size = args_size;
5973 cfun->machine->frame.cprestore_size = cprestore_size;
5974 cfun->machine->frame.gp_reg_size = gp_reg_size;
5975 cfun->machine->frame.fp_reg_size = fp_reg_size;
5976 cfun->machine->frame.mask = mask;
5977 cfun->machine->frame.fmask = fmask;
5978 cfun->machine->frame.initialized = reload_completed;
5979 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
5980 cfun->machine->frame.num_fp = fp_reg_size / (FP_INC * UNITS_PER_FPREG);
5984 HOST_WIDE_INT offset;
5986 offset = (args_size + cprestore_size + var_size
5987 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
5988 cfun->machine->frame.gp_sp_offset = offset;
5989 cfun->machine->frame.gp_save_offset = offset - total_size;
5993 cfun->machine->frame.gp_sp_offset = 0;
5994 cfun->machine->frame.gp_save_offset = 0;
5999 HOST_WIDE_INT offset;
6001 offset = (args_size + cprestore_size + var_size
6002 + gp_reg_rounded + fp_reg_size
6003 - FP_INC * UNITS_PER_FPREG);
6004 cfun->machine->frame.fp_sp_offset = offset;
6005 cfun->machine->frame.fp_save_offset = offset - total_size;
6009 cfun->machine->frame.fp_sp_offset = 0;
6010 cfun->machine->frame.fp_save_offset = 0;
6013 /* Ok, we're done. */
6017 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
6018 pointer or argument pointer. TO is either the stack pointer or
6019 hard frame pointer. */
6022 mips_initial_elimination_offset (int from, int to)
6024 HOST_WIDE_INT offset;
6026 compute_frame_size (get_frame_size ());
6028 /* Set OFFSET to the offset from the stack pointer. */
6031 case FRAME_POINTER_REGNUM:
6035 case ARG_POINTER_REGNUM:
6036 offset = cfun->machine->frame.total_size;
6038 offset -= current_function_pretend_args_size;
6045 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
6046 offset -= cfun->machine->frame.args_size;
6051 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
6052 back to a previous frame. */
6054 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
6059 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
6062 /* Use FN to save or restore register REGNO. MODE is the register's
6063 mode and OFFSET is the offset of its save slot from the current
6067 mips_save_restore_reg (enum machine_mode mode, int regno,
6068 HOST_WIDE_INT offset, mips_save_restore_fn fn)
6072 mem = gen_rtx_MEM (mode, plus_constant (stack_pointer_rtx, offset));
6074 fn (gen_rtx_REG (mode, regno), mem);
6078 /* Call FN for each register that is saved by the current function.
6079 SP_OFFSET is the offset of the current stack pointer from the start
6083 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
6085 #define BITSET_P(VALUE, BIT) (((VALUE) & (1L << (BIT))) != 0)
6087 enum machine_mode fpr_mode;
6088 HOST_WIDE_INT offset;
6091 /* Save registers starting from high to low. The debuggers prefer at least
6092 the return register be stored at func+4, and also it allows us not to
6093 need a nop in the epilog if at least one register is reloaded in
6094 addition to return address. */
6095 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
6096 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
6097 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
6099 mips_save_restore_reg (gpr_mode, regno, offset, fn);
6100 offset -= GET_MODE_SIZE (gpr_mode);
6103 /* This loop must iterate over the same space as its companion in
6104 compute_frame_size. */
6105 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
6106 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
6107 for (regno = (FP_REG_LAST - FP_INC + 1);
6108 regno >= FP_REG_FIRST;
6110 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
6112 mips_save_restore_reg (fpr_mode, regno, offset, fn);
6113 offset -= GET_MODE_SIZE (fpr_mode);
6118 /* If we're generating n32 or n64 abicalls, and the current function
6119 does not use $28 as its global pointer, emit a cplocal directive.
6120 Use pic_offset_table_rtx as the argument to the directive. */
6123 mips_output_cplocal (void)
6125 if (!TARGET_EXPLICIT_RELOCS
6126 && cfun->machine->global_pointer > 0
6127 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
6128 output_asm_insn (".cplocal %+", 0);
6131 /* If we're generating n32 or n64 abicalls, emit instructions
6132 to set up the global pointer. */
6135 mips_emit_loadgp (void)
6137 if (TARGET_ABICALLS && TARGET_NEWABI && cfun->machine->global_pointer > 0)
6139 rtx addr, offset, incoming_address;
6141 addr = XEXP (DECL_RTL (current_function_decl), 0);
6142 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
6143 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
6144 emit_insn (gen_loadgp (offset, incoming_address));
6145 if (!TARGET_EXPLICIT_RELOCS)
6146 emit_insn (gen_loadgp_blockage ());
6150 /* Set up the stack and frame (if desired) for the function. */
6153 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6156 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
6158 #ifdef SDB_DEBUGGING_INFO
6159 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
6160 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
6163 /* In mips16 mode, we may need to generate a 32 bit to handle
6164 floating point arguments. The linker will arrange for any 32 bit
6165 functions to call this stub, which will then jump to the 16 bit
6167 if (TARGET_MIPS16 && !TARGET_SOFT_FLOAT
6168 && current_function_args_info.fp_code != 0)
6169 build_mips16_function_stub (file);
6171 if (!FUNCTION_NAME_ALREADY_DECLARED)
6173 /* Get the function name the same way that toplev.c does before calling
6174 assemble_start_function. This is needed so that the name used here
6175 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
6176 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
6178 if (!flag_inhibit_size_directive)
6180 fputs ("\t.ent\t", file);
6181 assemble_name (file, fnname);
6185 assemble_name (file, fnname);
6186 fputs (":\n", file);
6189 /* Stop mips_file_end from treating this function as external. */
6190 if (TARGET_IRIX && mips_abi == ABI_32)
6191 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
6193 if (!flag_inhibit_size_directive)
6195 /* .frame FRAMEREG, FRAMESIZE, RETREG */
6197 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
6198 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
6199 ", args= " HOST_WIDE_INT_PRINT_DEC
6200 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
6201 (reg_names[(frame_pointer_needed)
6202 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
6203 ((frame_pointer_needed && TARGET_MIPS16)
6204 ? tsize - cfun->machine->frame.args_size
6206 reg_names[GP_REG_FIRST + 31],
6207 cfun->machine->frame.var_size,
6208 cfun->machine->frame.num_gp,
6209 cfun->machine->frame.num_fp,
6210 cfun->machine->frame.args_size,
6211 cfun->machine->frame.cprestore_size);
6213 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
6214 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
6215 cfun->machine->frame.mask,
6216 cfun->machine->frame.gp_save_offset);
6217 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
6218 cfun->machine->frame.fmask,
6219 cfun->machine->frame.fp_save_offset);
6222 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
6223 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
6226 if (TARGET_ABICALLS && !TARGET_NEWABI && cfun->machine->global_pointer > 0)
6228 /* Handle the initialization of $gp for SVR4 PIC. */
6229 if (!cfun->machine->all_noreorder_p)
6230 output_asm_insn ("%(.cpload\t%^%)", 0);
6232 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
6234 else if (cfun->machine->all_noreorder_p)
6235 output_asm_insn ("%(%<", 0);
6237 /* Tell the assembler which register we're using as the global
6238 pointer. This is needed for thunks, since they can use either
6239 explicit relocs or assembler macros. */
6240 mips_output_cplocal ();
6243 /* Make the last instruction frame related and note that it performs
6244 the operation described by FRAME_PATTERN. */
6247 mips_set_frame_expr (rtx frame_pattern)
6251 insn = get_last_insn ();
6252 RTX_FRAME_RELATED_P (insn) = 1;
6253 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
6259 /* Return a frame-related rtx that stores REG at MEM.
6260 REG must be a single register. */
6263 mips_frame_set (rtx mem, rtx reg)
6267 /* If we're saving the return address register and the dwarf return
6268 address column differs from the hard register number, adjust the
6269 note reg to refer to the former. */
6270 if (REGNO (reg) == GP_REG_FIRST + 31
6271 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
6272 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
6274 set = gen_rtx_SET (VOIDmode, mem, reg);
6275 RTX_FRAME_RELATED_P (set) = 1;
6281 /* Save register REG to MEM. Make the instruction frame-related. */
6284 mips_save_reg (rtx reg, rtx mem)
6286 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
6290 if (mips_split_64bit_move_p (mem, reg))
6291 mips_split_64bit_move (mem, reg);
6293 emit_move_insn (mem, reg);
6295 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
6296 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
6297 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
6302 && REGNO (reg) != GP_REG_FIRST + 31
6303 && !M16_REG_P (REGNO (reg)))
6305 /* Save a non-mips16 register by moving it through a temporary.
6306 We don't need to do this for $31 since there's a special
6307 instruction for it. */
6308 emit_move_insn (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
6309 emit_move_insn (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
6312 emit_move_insn (mem, reg);
6314 mips_set_frame_expr (mips_frame_set (mem, reg));
6319 /* Expand the prologue into a bunch of separate insns. */
6322 mips_expand_prologue (void)
6326 if (cfun->machine->global_pointer > 0)
6327 REGNO (pic_offset_table_rtx) = cfun->machine->global_pointer;
6329 size = compute_frame_size (get_frame_size ());
6331 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
6332 bytes beforehand; this is enough to cover the register save area
6333 without going out of range. */
6334 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
6336 HOST_WIDE_INT step1;
6338 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
6339 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
6341 GEN_INT (-step1)))) = 1;
6343 mips_for_each_saved_reg (size, mips_save_reg);
6346 /* Allocate the rest of the frame. */
6349 if (SMALL_OPERAND (-size))
6350 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
6352 GEN_INT (-size)))) = 1;
6355 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
6358 /* There are no instructions to add or subtract registers
6359 from the stack pointer, so use the frame pointer as a
6360 temporary. We should always be using a frame pointer
6361 in this case anyway. */
6362 gcc_assert (frame_pointer_needed);
6363 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
6364 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
6365 hard_frame_pointer_rtx,
6366 MIPS_PROLOGUE_TEMP (Pmode)));
6367 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
6370 emit_insn (gen_sub3_insn (stack_pointer_rtx,
6372 MIPS_PROLOGUE_TEMP (Pmode)));
6374 /* Describe the combined effect of the previous instructions. */
6376 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
6377 plus_constant (stack_pointer_rtx, -size)));
6381 /* Set up the frame pointer, if we're using one. In mips16 code,
6382 we point the frame pointer ahead of the outgoing argument area.
6383 This should allow more variables & incoming arguments to be
6384 accessed with unextended instructions. */
6385 if (frame_pointer_needed)
6387 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
6389 rtx offset = GEN_INT (cfun->machine->frame.args_size);
6391 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
6396 RTX_FRAME_RELATED_P (emit_move_insn (hard_frame_pointer_rtx,
6397 stack_pointer_rtx)) = 1;
6400 /* If generating o32/o64 abicalls, save $gp on the stack. */
6401 if (TARGET_ABICALLS && !TARGET_NEWABI && !current_function_is_leaf)
6402 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
6404 mips_emit_loadgp ();
6406 /* If we are profiling, make sure no instructions are scheduled before
6407 the call to mcount. */
6409 if (current_function_profile)
6410 emit_insn (gen_blockage ());
6413 /* Do any necessary cleanup after a function to restore stack, frame,
6416 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
6419 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
6420 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6422 /* Reinstate the normal $gp. */
6423 REGNO (pic_offset_table_rtx) = GLOBAL_POINTER_REGNUM;
6424 mips_output_cplocal ();
6426 if (cfun->machine->all_noreorder_p)
6428 /* Avoid using %>%) since it adds excess whitespace. */
6429 output_asm_insn (".set\tmacro", 0);
6430 output_asm_insn (".set\treorder", 0);
6431 set_noreorder = set_nomacro = 0;
6434 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
6438 /* Get the function name the same way that toplev.c does before calling
6439 assemble_start_function. This is needed so that the name used here
6440 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
6441 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
6442 fputs ("\t.end\t", file);
6443 assemble_name (file, fnname);
6448 /* Emit instructions to restore register REG from slot MEM. */
6451 mips_restore_reg (rtx reg, rtx mem)
6453 /* There's no mips16 instruction to load $31 directly. Load into
6454 $7 instead and adjust the return insn appropriately. */
6455 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
6456 reg = gen_rtx_REG (GET_MODE (reg), 7);
6458 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
6460 /* Can't restore directly; move through a temporary. */
6461 emit_move_insn (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
6462 emit_move_insn (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
6465 emit_move_insn (reg, mem);
6469 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
6470 if this epilogue precedes a sibling call, false if it is for a normal
6471 "epilogue" pattern. */
6474 mips_expand_epilogue (int sibcall_p)
6476 HOST_WIDE_INT step1, step2;
6479 if (!sibcall_p && mips_can_use_return_insn ())
6481 emit_jump_insn (gen_return ());
6485 /* Split the frame into two. STEP1 is the amount of stack we should
6486 deallocate before restoring the registers. STEP2 is the amount we
6487 should deallocate afterwards.
6489 Start off by assuming that no registers need to be restored. */
6490 step1 = cfun->machine->frame.total_size;
6493 /* Work out which register holds the frame address. Account for the
6494 frame pointer offset used by mips16 code. */
6495 if (!frame_pointer_needed)
6496 base = stack_pointer_rtx;
6499 base = hard_frame_pointer_rtx;
6501 step1 -= cfun->machine->frame.args_size;
6504 /* If we need to restore registers, deallocate as much stack as
6505 possible in the second step without going out of range. */
6506 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
6508 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
6512 /* Set TARGET to BASE + STEP1. */
6518 /* Get an rtx for STEP1 that we can add to BASE. */
6519 adjust = GEN_INT (step1);
6520 if (!SMALL_OPERAND (step1))
6522 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), adjust);
6523 adjust = MIPS_EPILOGUE_TEMP (Pmode);
6526 /* Normal mode code can copy the result straight into $sp. */
6528 target = stack_pointer_rtx;
6530 emit_insn (gen_add3_insn (target, base, adjust));
6533 /* Copy TARGET into the stack pointer. */
6534 if (target != stack_pointer_rtx)
6535 emit_move_insn (stack_pointer_rtx, target);
6537 /* If we're using addressing macros for n32/n64 abicalls, $gp is
6538 implicitly used by all SYMBOL_REFs. We must emit a blockage
6539 insn before restoring it. */
6540 if (TARGET_ABICALLS && TARGET_NEWABI && !TARGET_EXPLICIT_RELOCS)
6541 emit_insn (gen_blockage ());
6543 /* Restore the registers. */
6544 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
6547 /* Deallocate the final bit of the frame. */
6549 emit_insn (gen_add3_insn (stack_pointer_rtx,
6553 /* Add in the __builtin_eh_return stack adjustment. We need to
6554 use a temporary in mips16 code. */
6555 if (current_function_calls_eh_return)
6559 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
6560 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
6561 MIPS_EPILOGUE_TEMP (Pmode),
6562 EH_RETURN_STACKADJ_RTX));
6563 emit_move_insn (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
6566 emit_insn (gen_add3_insn (stack_pointer_rtx,
6568 EH_RETURN_STACKADJ_RTX));
6573 /* The mips16 loads the return address into $7, not $31. */
6574 if (TARGET_MIPS16 && (cfun->machine->frame.mask & RA_MASK) != 0)
6575 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
6576 GP_REG_FIRST + 7)));
6578 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
6579 GP_REG_FIRST + 31)));
6583 /* Return nonzero if this function is known to have a null epilogue.
6584 This allows the optimizer to omit jumps to jumps if no stack
6588 mips_can_use_return_insn (void)
6592 if (! reload_completed)
6595 if (regs_ever_live[31] || current_function_profile)
6598 return_type = DECL_RESULT (current_function_decl);
6600 /* In mips16 mode, a function which returns a floating point value
6601 needs to arrange to copy the return value into the floating point
6604 && mips16_hard_float
6605 && ! aggregate_value_p (return_type, current_function_decl)
6606 && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
6607 && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
6610 if (cfun->machine->frame.initialized)
6611 return cfun->machine->frame.total_size == 0;
6613 return compute_frame_size (get_frame_size ()) == 0;
6616 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
6617 in order to avoid duplicating too much logic from elsewhere. */
6620 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
6621 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
6624 rtx this, temp1, temp2, insn, fnaddr;
6626 /* Pretend to be a post-reload pass while generating rtl. */
6628 reload_completed = 1;
6629 reset_block_changes ();
6631 /* Pick a global pointer for -mabicalls. Use $15 rather than $28
6632 for TARGET_NEWABI since the latter is a call-saved register. */
6633 if (TARGET_ABICALLS)
6634 cfun->machine->global_pointer
6635 = REGNO (pic_offset_table_rtx)
6636 = TARGET_NEWABI ? 15 : GLOBAL_POINTER_REGNUM;
6638 /* Set up the global pointer for n32 or n64 abicalls. */
6639 mips_emit_loadgp ();
6641 /* We need two temporary registers in some cases. */
6642 temp1 = gen_rtx_REG (Pmode, 2);
6643 temp2 = gen_rtx_REG (Pmode, 3);
6645 /* Find out which register contains the "this" pointer. */
6646 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
6647 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
6649 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
6651 /* Add DELTA to THIS. */
6654 rtx offset = GEN_INT (delta);
6655 if (!SMALL_OPERAND (delta))
6657 emit_move_insn (temp1, offset);
6660 emit_insn (gen_add3_insn (this, this, offset));
6663 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
6664 if (vcall_offset != 0)
6668 /* Set TEMP1 to *THIS. */
6669 emit_move_insn (temp1, gen_rtx_MEM (Pmode, this));
6671 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
6672 addr = mips_add_offset (temp2, temp1, vcall_offset);
6674 /* Load the offset and add it to THIS. */
6675 emit_move_insn (temp1, gen_rtx_MEM (Pmode, addr));
6676 emit_insn (gen_add3_insn (this, this, temp1));
6679 /* Jump to the target function. Use a sibcall if direct jumps are
6680 allowed, otherwise load the address into a register first. */
6681 fnaddr = XEXP (DECL_RTL (function), 0);
6682 if (TARGET_MIPS16 || TARGET_ABICALLS || TARGET_LONG_CALLS)
6684 /* This is messy. gas treats "la $25,foo" as part of a call
6685 sequence and may allow a global "foo" to be lazily bound.
6686 The general move patterns therefore reject this combination.
6688 In this context, lazy binding would actually be OK for o32 and o64,
6689 but it's still wrong for n32 and n64; see mips_load_call_address.
6690 We must therefore load the address via a temporary register if
6691 mips_dangerous_for_la25_p.
6693 If we jump to the temporary register rather than $25, the assembler
6694 can use the move insn to fill the jump's delay slot. */
6695 if (TARGET_ABICALLS && !mips_dangerous_for_la25_p (fnaddr))
6696 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
6697 mips_load_call_address (temp1, fnaddr, true);
6699 if (TARGET_ABICALLS && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
6700 emit_move_insn (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
6701 emit_jump_insn (gen_indirect_jump (temp1));
6705 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
6706 SIBLING_CALL_P (insn) = 1;
6709 /* Run just enough of rest_of_compilation. This sequence was
6710 "borrowed" from alpha.c. */
6711 insn = get_insns ();
6712 insn_locators_initialize ();
6713 split_all_insns_noflow ();
6715 mips16_lay_out_constants ();
6716 shorten_branches (insn);
6717 final_start_function (insn, file, 1);
6718 final (insn, file, 1);
6719 final_end_function ();
6721 /* Clean up the vars set above. Note that final_end_function resets
6722 the global pointer for us. */
6723 reload_completed = 0;
6727 /* Returns nonzero if X contains a SYMBOL_REF. */
6730 symbolic_expression_p (rtx x)
6732 if (GET_CODE (x) == SYMBOL_REF)
6735 if (GET_CODE (x) == CONST)
6736 return symbolic_expression_p (XEXP (x, 0));
6739 return symbolic_expression_p (XEXP (x, 0));
6741 if (ARITHMETIC_P (x))
6742 return (symbolic_expression_p (XEXP (x, 0))
6743 || symbolic_expression_p (XEXP (x, 1)));
6748 /* Choose the section to use for the constant rtx expression X that has
6752 mips_select_rtx_section (enum machine_mode mode, rtx x,
6753 unsigned HOST_WIDE_INT align)
6757 /* In mips16 mode, the constant table always goes in the same section
6758 as the function, so that constants can be loaded using PC relative
6760 function_section (current_function_decl);
6762 else if (TARGET_EMBEDDED_DATA)
6764 /* For embedded applications, always put constants in read-only data,
6765 in order to reduce RAM usage. */
6766 mergeable_constant_section (mode, align, 0);
6770 /* For hosted applications, always put constants in small data if
6771 possible, as this gives the best performance. */
6772 /* ??? Consider using mergeable small data sections. */
6774 if (GET_MODE_SIZE (mode) <= (unsigned) mips_section_threshold
6775 && mips_section_threshold > 0)
6776 named_section (0, ".sdata", 0);
6777 else if (flag_pic && symbolic_expression_p (x))
6778 named_section (0, ".data.rel.ro", 3);
6780 mergeable_constant_section (mode, align, 0);
6784 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
6786 The complication here is that, with the combination TARGET_ABICALLS
6787 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
6788 therefore not be included in the read-only part of a DSO. Handle such
6789 cases by selecting a normal data section instead of a read-only one.
6790 The logic apes that in default_function_rodata_section. */
6793 mips_function_rodata_section (tree decl)
6795 if (!TARGET_ABICALLS || TARGET_GPWORD)
6796 default_function_rodata_section (decl);
6797 else if (decl && DECL_SECTION_NAME (decl))
6799 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6800 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
6802 char *rname = ASTRDUP (name);
6804 named_section_real (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
6806 else if (flag_function_sections && flag_data_sections
6807 && strncmp (name, ".text.", 6) == 0)
6809 char *rname = ASTRDUP (name);
6810 memcpy (rname + 1, "data", 4);
6811 named_section_flags (rname, SECTION_WRITE);
6820 /* Implement TARGET_IN_SMALL_DATA_P. Return true if it would be safe to
6821 access DECL using %gp_rel(...)($gp). */
6824 mips_in_small_data_p (tree decl)
6828 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
6831 /* We don't yet generate small-data references for -mabicalls. See related
6832 -G handling in override_options. */
6833 if (TARGET_ABICALLS)
6836 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
6840 /* Reject anything that isn't in a known small-data section. */
6841 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
6842 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
6845 /* If a symbol is defined externally, the assembler will use the
6846 usual -G rules when deciding how to implement macros. */
6847 if (TARGET_EXPLICIT_RELOCS || !DECL_EXTERNAL (decl))
6850 else if (TARGET_EMBEDDED_DATA)
6852 /* Don't put constants into the small data section: we want them
6853 to be in ROM rather than RAM. */
6854 if (TREE_CODE (decl) != VAR_DECL)
6857 if (TREE_READONLY (decl)
6858 && !TREE_SIDE_EFFECTS (decl)
6859 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
6863 size = int_size_in_bytes (TREE_TYPE (decl));
6864 return (size > 0 && size <= mips_section_threshold);
6867 /* See whether VALTYPE is a record whose fields should be returned in
6868 floating-point registers. If so, return the number of fields and
6869 list them in FIELDS (which should have two elements). Return 0
6872 For n32 & n64, a structure with one or two fields is returned in
6873 floating-point registers as long as every field has a floating-point
6877 mips_fpr_return_fields (tree valtype, tree *fields)
6885 if (TREE_CODE (valtype) != RECORD_TYPE)
6889 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
6891 if (TREE_CODE (field) != FIELD_DECL)
6894 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
6900 fields[i++] = field;
6906 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
6907 a value in the most significant part of $2/$3 if:
6909 - the target is big-endian;
6911 - the value has a structure or union type (we generalize this to
6912 cover aggregates from other languages too); and
6914 - the structure is not returned in floating-point registers. */
6917 mips_return_in_msb (tree valtype)
6921 return (TARGET_NEWABI
6922 && TARGET_BIG_ENDIAN
6923 && AGGREGATE_TYPE_P (valtype)
6924 && mips_fpr_return_fields (valtype, fields) == 0);
6928 /* Return a composite value in a pair of floating-point registers.
6929 MODE1 and OFFSET1 are the mode and byte offset for the first value,
6930 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
6933 For n32 & n64, $f0 always holds the first value and $f2 the second.
6934 Otherwise the values are packed together as closely as possible. */
6937 mips_return_fpr_pair (enum machine_mode mode,
6938 enum machine_mode mode1, HOST_WIDE_INT offset1,
6939 enum machine_mode mode2, HOST_WIDE_INT offset2)
6943 inc = (TARGET_NEWABI ? 2 : FP_INC);
6944 return gen_rtx_PARALLEL
6947 gen_rtx_EXPR_LIST (VOIDmode,
6948 gen_rtx_REG (mode1, FP_RETURN),
6950 gen_rtx_EXPR_LIST (VOIDmode,
6951 gen_rtx_REG (mode2, FP_RETURN + inc),
6952 GEN_INT (offset2))));
6957 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
6958 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
6959 VALTYPE is null and MODE is the mode of the return value. */
6962 mips_function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
6963 enum machine_mode mode)
6970 mode = TYPE_MODE (valtype);
6971 unsignedp = TYPE_UNSIGNED (valtype);
6973 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
6974 true, we must promote the mode just as PROMOTE_MODE does. */
6975 mode = promote_mode (valtype, mode, &unsignedp, 1);
6977 /* Handle structures whose fields are returned in $f0/$f2. */
6978 switch (mips_fpr_return_fields (valtype, fields))
6981 return gen_rtx_REG (mode, FP_RETURN);
6984 return mips_return_fpr_pair (mode,
6985 TYPE_MODE (TREE_TYPE (fields[0])),
6986 int_byte_position (fields[0]),
6987 TYPE_MODE (TREE_TYPE (fields[1])),
6988 int_byte_position (fields[1]));
6991 /* If a value is passed in the most significant part of a register, see
6992 whether we have to round the mode up to a whole number of words. */
6993 if (mips_return_in_msb (valtype))
6995 HOST_WIDE_INT size = int_size_in_bytes (valtype);
6996 if (size % UNITS_PER_WORD != 0)
6998 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
6999 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
7003 /* For EABI, the class of return register depends entirely on MODE.
7004 For example, "struct { some_type x; }" and "union { some_type x; }"
7005 are returned in the same way as a bare "some_type" would be.
7006 Other ABIs only use FPRs for scalar, complex or vector types. */
7007 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
7008 return gen_rtx_REG (mode, GP_RETURN);
7011 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
7012 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
7013 && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE)
7014 return gen_rtx_REG (mode, FP_RETURN);
7016 /* Handle long doubles for n32 & n64. */
7018 return mips_return_fpr_pair (mode,
7020 DImode, GET_MODE_SIZE (mode) / 2);
7022 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
7023 && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE * 2)
7024 return mips_return_fpr_pair (mode,
7025 GET_MODE_INNER (mode), 0,
7026 GET_MODE_INNER (mode),
7027 GET_MODE_SIZE (mode) / 2);
7029 return gen_rtx_REG (mode, GP_RETURN);
7032 /* Return nonzero when an argument must be passed by reference. */
7035 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
7036 enum machine_mode mode, tree type,
7037 bool named ATTRIBUTE_UNUSED)
7039 if (mips_abi == ABI_EABI)
7043 /* ??? How should SCmode be handled? */
7044 if (type == NULL_TREE || mode == DImode || mode == DFmode)
7047 size = int_size_in_bytes (type);
7048 return size == -1 || size > UNITS_PER_WORD;
7052 /* If we have a variable-sized parameter, we have no choice. */
7053 return targetm.calls.must_pass_in_stack (mode, type);
7058 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
7059 enum machine_mode mode ATTRIBUTE_UNUSED,
7060 tree type ATTRIBUTE_UNUSED, bool named)
7062 return mips_abi == ABI_EABI && named;
7065 /* Return true if registers of class CLASS cannot change from mode FROM
7069 mips_cannot_change_mode_class (enum machine_mode from,
7070 enum machine_mode to, enum reg_class class)
7072 if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
7073 && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
7075 if (TARGET_BIG_ENDIAN)
7077 /* When a multi-word value is stored in paired floating-point
7078 registers, the first register always holds the low word.
7079 We therefore can't allow FPRs to change between single-word
7080 and multi-word modes. */
7081 if (FP_INC > 1 && reg_classes_intersect_p (FP_REGS, class))
7086 /* LO_REGNO == HI_REGNO + 1, so if a multi-word value is stored
7087 in LO and HI, the high word always comes first. We therefore
7088 can't allow values stored in HI to change between single-word
7089 and multi-word modes. */
7090 if (reg_classes_intersect_p (HI_REG, class))
7094 /* Loading a 32-bit value into a 64-bit floating-point register
7095 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
7096 We can't allow 64-bit float registers to change from SImode to
7100 && GET_MODE_SIZE (to) >= UNITS_PER_WORD
7101 && reg_classes_intersect_p (FP_REGS, class))
7106 /* Return true if X should not be moved directly into register $25.
7107 We need this because many versions of GAS will treat "la $25,foo" as
7108 part of a call sequence and so allow a global "foo" to be lazily bound. */
7111 mips_dangerous_for_la25_p (rtx x)
7113 HOST_WIDE_INT offset;
7115 if (TARGET_EXPLICIT_RELOCS)
7118 mips_split_const (x, &x, &offset);
7119 return global_got_operand (x, VOIDmode);
7122 /* Implement PREFERRED_RELOAD_CLASS. */
7125 mips_preferred_reload_class (rtx x, enum reg_class class)
7127 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
7130 if (TARGET_HARD_FLOAT
7131 && FLOAT_MODE_P (GET_MODE (x))
7132 && reg_class_subset_p (FP_REGS, class))
7135 if (reg_class_subset_p (GR_REGS, class))
7138 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
7144 /* This function returns the register class required for a secondary
7145 register when copying between one of the registers in CLASS, and X,
7146 using MODE. If IN_P is nonzero, the copy is going from X to the
7147 register, otherwise the register is the source. A return value of
7148 NO_REGS means that no secondary register is required. */
7151 mips_secondary_reload_class (enum reg_class class,
7152 enum machine_mode mode, rtx x, int in_p)
7154 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
7158 if (REG_P (x)|| GET_CODE (x) == SUBREG)
7159 regno = true_regnum (x);
7161 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
7163 if (mips_dangerous_for_la25_p (x))
7166 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
7170 /* Copying from HI or LO to anywhere other than a general register
7171 requires a general register. */
7172 if (class == HI_REG || class == LO_REG || class == MD_REGS)
7174 if (TARGET_MIPS16 && in_p)
7176 /* We can't really copy to HI or LO at all in mips16 mode. */
7179 return gp_reg_p ? NO_REGS : gr_regs;
7181 if (MD_REG_P (regno))
7183 if (TARGET_MIPS16 && ! in_p)
7185 /* We can't really copy to HI or LO at all in mips16 mode. */
7188 return class == gr_regs ? NO_REGS : gr_regs;
7191 /* We can only copy a value to a condition code register from a
7192 floating point register, and even then we require a scratch
7193 floating point register. We can only copy a value out of a
7194 condition code register into a general register. */
7195 if (class == ST_REGS)
7199 return gp_reg_p ? NO_REGS : gr_regs;
7201 if (ST_REG_P (regno))
7205 return class == gr_regs ? NO_REGS : gr_regs;
7208 if (class == FP_REGS)
7212 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
7215 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
7217 /* We can use the l.s and l.d macros to load floating-point
7218 constants. ??? For l.s, we could probably get better
7219 code by returning GR_REGS here. */
7222 else if (gp_reg_p || x == CONST0_RTX (mode))
7224 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
7227 else if (FP_REG_P (regno))
7229 /* In this case we can use mov.s or mov.d. */
7234 /* Otherwise, we need to reload through an integer register. */
7239 /* In mips16 mode, going between memory and anything but M16_REGS
7240 requires an M16_REG. */
7243 if (class != M16_REGS && class != M16_NA_REGS)
7251 if (class == M16_REGS || class == M16_NA_REGS)
7260 /* Implement CLASS_MAX_NREGS.
7262 Usually all registers are word-sized. The only supported exception
7263 is -mgp64 -msingle-float, which has 64-bit words but 32-bit float
7264 registers. A word-based calculation is correct even in that case,
7265 since -msingle-float disallows multi-FPR values.
7267 The FP status registers are an exception to this rule. They are always
7268 4 bytes wide as they only hold condition code modes, and CCmode is always
7269 considered to be 4 bytes wide. */
7272 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
7273 enum machine_mode mode)
7275 if (class == ST_REGS)
7276 return (GET_MODE_SIZE (mode) + 3) / 4;
7278 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7282 mips_valid_pointer_mode (enum machine_mode mode)
7284 return (mode == SImode || (TARGET_64BIT && mode == DImode));
7287 /* Target hook for vector_mode_supported_p. */
7290 mips_vector_mode_supported_p (enum machine_mode mode)
7292 if (mode == V2SFmode && TARGET_PAIRED_SINGLE_FLOAT)
7298 /* If we can access small data directly (using gp-relative relocation
7299 operators) return the small data pointer, otherwise return null.
7301 For each mips16 function which refers to GP relative symbols, we
7302 use a pseudo register, initialized at the start of the function, to
7303 hold the $gp value. */
7306 mips16_gp_pseudo_reg (void)
7308 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
7313 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
7315 /* We want to initialize this to a value which gcc will believe
7318 unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_GP);
7319 emit_move_insn (cfun->machine->mips16_gp_pseudo_rtx,
7320 gen_rtx_CONST (Pmode, unspec));
7321 insn = get_insns ();
7324 push_topmost_sequence ();
7325 /* We need to emit the initialization after the FUNCTION_BEG
7326 note, so that it will be integrated. */
7327 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
7329 && NOTE_LINE_NUMBER (scan) == NOTE_INSN_FUNCTION_BEG)
7331 if (scan == NULL_RTX)
7332 scan = get_insns ();
7333 insn = emit_insn_after (insn, scan);
7334 pop_topmost_sequence ();
7337 return cfun->machine->mips16_gp_pseudo_rtx;
7340 /* Write out code to move floating point arguments in or out of
7341 general registers. Output the instructions to FILE. FP_CODE is
7342 the code describing which arguments are present (see the comment at
7343 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
7344 we are copying from the floating point registers. */
7347 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
7353 /* This code only works for the original 32 bit ABI and the O64 ABI. */
7354 gcc_assert (TARGET_OLDABI);
7360 gparg = GP_ARG_FIRST;
7361 fparg = FP_ARG_FIRST;
7362 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
7366 if ((fparg & 1) != 0)
7368 fprintf (file, "\t%s\t%s,%s\n", s,
7369 reg_names[gparg], reg_names[fparg]);
7371 else if ((f & 3) == 2)
7374 fprintf (file, "\td%s\t%s,%s\n", s,
7375 reg_names[gparg], reg_names[fparg]);
7378 if ((fparg & 1) != 0)
7380 if (TARGET_BIG_ENDIAN)
7381 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
7382 reg_names[gparg], reg_names[fparg + 1], s,
7383 reg_names[gparg + 1], reg_names[fparg]);
7385 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
7386 reg_names[gparg], reg_names[fparg], s,
7387 reg_names[gparg + 1], reg_names[fparg + 1]);
7400 /* Build a mips16 function stub. This is used for functions which
7401 take arguments in the floating point registers. It is 32 bit code
7402 that moves the floating point args into the general registers, and
7403 then jumps to the 16 bit code. */
7406 build_mips16_function_stub (FILE *file)
7409 char *secname, *stubname;
7410 tree stubid, stubdecl;
7414 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7415 secname = (char *) alloca (strlen (fnname) + 20);
7416 sprintf (secname, ".mips16.fn.%s", fnname);
7417 stubname = (char *) alloca (strlen (fnname) + 20);
7418 sprintf (stubname, "__fn_stub_%s", fnname);
7419 stubid = get_identifier (stubname);
7420 stubdecl = build_decl (FUNCTION_DECL, stubid,
7421 build_function_type (void_type_node, NULL_TREE));
7422 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
7424 fprintf (file, "\t# Stub function for %s (", current_function_name ());
7426 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
7428 fprintf (file, "%s%s",
7429 need_comma ? ", " : "",
7430 (f & 3) == 1 ? "float" : "double");
7433 fprintf (file, ")\n");
7435 fprintf (file, "\t.set\tnomips16\n");
7436 function_section (stubdecl);
7437 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
7439 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
7440 within a .ent, and we cannot emit another .ent. */
7441 if (!FUNCTION_NAME_ALREADY_DECLARED)
7443 fputs ("\t.ent\t", file);
7444 assemble_name (file, stubname);
7448 assemble_name (file, stubname);
7449 fputs (":\n", file);
7451 /* We don't want the assembler to insert any nops here. */
7452 fprintf (file, "\t.set\tnoreorder\n");
7454 mips16_fp_args (file, current_function_args_info.fp_code, 1);
7456 fprintf (asm_out_file, "\t.set\tnoat\n");
7457 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
7458 assemble_name (file, fnname);
7459 fprintf (file, "\n");
7460 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
7461 fprintf (asm_out_file, "\t.set\tat\n");
7463 /* Unfortunately, we can't fill the jump delay slot. We can't fill
7464 with one of the mfc1 instructions, because the result is not
7465 available for one instruction, so if the very first instruction
7466 in the function refers to the register, it will see the wrong
7468 fprintf (file, "\tnop\n");
7470 fprintf (file, "\t.set\treorder\n");
7472 if (!FUNCTION_NAME_ALREADY_DECLARED)
7474 fputs ("\t.end\t", file);
7475 assemble_name (file, stubname);
7479 fprintf (file, "\t.set\tmips16\n");
7481 function_section (current_function_decl);
7484 /* We keep a list of functions for which we have already built stubs
7485 in build_mips16_call_stub. */
7489 struct mips16_stub *next;
7494 static struct mips16_stub *mips16_stubs;
7496 /* Build a call stub for a mips16 call. A stub is needed if we are
7497 passing any floating point values which should go into the floating
7498 point registers. If we are, and the call turns out to be to a 32
7499 bit function, the stub will be used to move the values into the
7500 floating point registers before calling the 32 bit function. The
7501 linker will magically adjust the function call to either the 16 bit
7502 function or the 32 bit stub, depending upon where the function call
7503 is actually defined.
7505 Similarly, we need a stub if the return value might come back in a
7506 floating point register.
7508 RETVAL is the location of the return value, or null if this is
7509 a call rather than a call_value. FN is the address of the
7510 function and ARG_SIZE is the size of the arguments. FP_CODE
7511 is the code built by function_arg. This function returns a nonzero
7512 value if it builds the call instruction itself. */
7515 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
7519 char *secname, *stubname;
7520 struct mips16_stub *l;
7521 tree stubid, stubdecl;
7525 /* We don't need to do anything if we aren't in mips16 mode, or if
7526 we were invoked with the -msoft-float option. */
7527 if (! TARGET_MIPS16 || ! mips16_hard_float)
7530 /* Figure out whether the value might come back in a floating point
7532 fpret = (retval != 0
7533 && GET_MODE_CLASS (GET_MODE (retval)) == MODE_FLOAT
7534 && GET_MODE_SIZE (GET_MODE (retval)) <= UNITS_PER_FPVALUE);
7536 /* We don't need to do anything if there were no floating point
7537 arguments and the value will not be returned in a floating point
7539 if (fp_code == 0 && ! fpret)
7542 /* We don't need to do anything if this is a call to a special
7543 mips16 support function. */
7544 if (GET_CODE (fn) == SYMBOL_REF
7545 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
7548 /* This code will only work for o32 and o64 abis. The other ABI's
7549 require more sophisticated support. */
7550 gcc_assert (TARGET_OLDABI);
7552 /* We can only handle SFmode and DFmode floating point return
7555 gcc_assert (GET_MODE (retval) == SFmode || GET_MODE (retval) == DFmode);
7557 /* If we're calling via a function pointer, then we must always call
7558 via a stub. There are magic stubs provided in libgcc.a for each
7559 of the required cases. Each of them expects the function address
7560 to arrive in register $2. */
7562 if (GET_CODE (fn) != SYMBOL_REF)
7568 /* ??? If this code is modified to support other ABI's, we need
7569 to handle PARALLEL return values here. */
7571 sprintf (buf, "__mips16_call_stub_%s%d",
7573 ? (GET_MODE (retval) == SFmode ? "sf_" : "df_")
7576 id = get_identifier (buf);
7577 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
7579 emit_move_insn (gen_rtx_REG (Pmode, 2), fn);
7581 if (retval == NULL_RTX)
7582 insn = gen_call_internal (stub_fn, arg_size);
7584 insn = gen_call_value_internal (retval, stub_fn, arg_size);
7585 insn = emit_call_insn (insn);
7587 /* Put the register usage information on the CALL. */
7588 CALL_INSN_FUNCTION_USAGE (insn) =
7589 gen_rtx_EXPR_LIST (VOIDmode,
7590 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
7591 CALL_INSN_FUNCTION_USAGE (insn));
7593 /* If we are handling a floating point return value, we need to
7594 save $18 in the function prologue. Putting a note on the
7595 call will mean that regs_ever_live[$18] will be true if the
7596 call is not eliminated, and we can check that in the prologue
7599 CALL_INSN_FUNCTION_USAGE (insn) =
7600 gen_rtx_EXPR_LIST (VOIDmode,
7601 gen_rtx_USE (VOIDmode,
7602 gen_rtx_REG (word_mode, 18)),
7603 CALL_INSN_FUNCTION_USAGE (insn));
7605 /* Return 1 to tell the caller that we've generated the call
7610 /* We know the function we are going to call. If we have already
7611 built a stub, we don't need to do anything further. */
7613 fnname = XSTR (fn, 0);
7614 for (l = mips16_stubs; l != NULL; l = l->next)
7615 if (strcmp (l->name, fnname) == 0)
7620 /* Build a special purpose stub. When the linker sees a
7621 function call in mips16 code, it will check where the target
7622 is defined. If the target is a 32 bit call, the linker will
7623 search for the section defined here. It can tell which
7624 symbol this section is associated with by looking at the
7625 relocation information (the name is unreliable, since this
7626 might be a static function). If such a section is found, the
7627 linker will redirect the call to the start of the magic
7630 If the function does not return a floating point value, the
7631 special stub section is named
7634 If the function does return a floating point value, the stub
7636 .mips16.call.fp.FNNAME
7639 secname = (char *) alloca (strlen (fnname) + 40);
7640 sprintf (secname, ".mips16.call.%s%s",
7643 stubname = (char *) alloca (strlen (fnname) + 20);
7644 sprintf (stubname, "__call_stub_%s%s",
7647 stubid = get_identifier (stubname);
7648 stubdecl = build_decl (FUNCTION_DECL, stubid,
7649 build_function_type (void_type_node, NULL_TREE));
7650 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
7652 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
7654 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
7658 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
7660 fprintf (asm_out_file, "%s%s",
7661 need_comma ? ", " : "",
7662 (f & 3) == 1 ? "float" : "double");
7665 fprintf (asm_out_file, ")\n");
7667 fprintf (asm_out_file, "\t.set\tnomips16\n");
7668 assemble_start_function (stubdecl, stubname);
7670 if (!FUNCTION_NAME_ALREADY_DECLARED)
7672 fputs ("\t.ent\t", asm_out_file);
7673 assemble_name (asm_out_file, stubname);
7674 fputs ("\n", asm_out_file);
7676 assemble_name (asm_out_file, stubname);
7677 fputs (":\n", asm_out_file);
7680 /* We build the stub code by hand. That's the only way we can
7681 do it, since we can't generate 32 bit code during a 16 bit
7684 /* We don't want the assembler to insert any nops here. */
7685 fprintf (asm_out_file, "\t.set\tnoreorder\n");
7687 mips16_fp_args (asm_out_file, fp_code, 0);
7691 fprintf (asm_out_file, "\t.set\tnoat\n");
7692 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
7694 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
7695 fprintf (asm_out_file, "\t.set\tat\n");
7696 /* Unfortunately, we can't fill the jump delay slot. We
7697 can't fill with one of the mtc1 instructions, because the
7698 result is not available for one instruction, so if the
7699 very first instruction in the function refers to the
7700 register, it will see the wrong value. */
7701 fprintf (asm_out_file, "\tnop\n");
7705 fprintf (asm_out_file, "\tmove\t%s,%s\n",
7706 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
7707 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
7708 /* As above, we can't fill the delay slot. */
7709 fprintf (asm_out_file, "\tnop\n");
7710 if (GET_MODE (retval) == SFmode)
7711 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7712 reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
7715 if (TARGET_BIG_ENDIAN)
7717 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7718 reg_names[GP_REG_FIRST + 2],
7719 reg_names[FP_REG_FIRST + 1]);
7720 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7721 reg_names[GP_REG_FIRST + 3],
7722 reg_names[FP_REG_FIRST + 0]);
7726 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7727 reg_names[GP_REG_FIRST + 2],
7728 reg_names[FP_REG_FIRST + 0]);
7729 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
7730 reg_names[GP_REG_FIRST + 3],
7731 reg_names[FP_REG_FIRST + 1]);
7734 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
7735 /* As above, we can't fill the delay slot. */
7736 fprintf (asm_out_file, "\tnop\n");
7739 fprintf (asm_out_file, "\t.set\treorder\n");
7741 #ifdef ASM_DECLARE_FUNCTION_SIZE
7742 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
7745 if (!FUNCTION_NAME_ALREADY_DECLARED)
7747 fputs ("\t.end\t", asm_out_file);
7748 assemble_name (asm_out_file, stubname);
7749 fputs ("\n", asm_out_file);
7752 fprintf (asm_out_file, "\t.set\tmips16\n");
7754 /* Record this stub. */
7755 l = (struct mips16_stub *) xmalloc (sizeof *l);
7756 l->name = xstrdup (fnname);
7758 l->next = mips16_stubs;
7762 /* If we expect a floating point return value, but we've built a
7763 stub which does not expect one, then we're in trouble. We can't
7764 use the existing stub, because it won't handle the floating point
7765 value. We can't build a new stub, because the linker won't know
7766 which stub to use for the various calls in this object file.
7767 Fortunately, this case is illegal, since it means that a function
7768 was declared in two different ways in a single compilation. */
7769 if (fpret && ! l->fpret)
7770 error ("cannot handle inconsistent calls to %qs", fnname);
7772 /* If we are calling a stub which handles a floating point return
7773 value, we need to arrange to save $18 in the prologue. We do
7774 this by marking the function call as using the register. The
7775 prologue will later see that it is used, and emit code to save
7782 if (retval == NULL_RTX)
7783 insn = gen_call_internal (fn, arg_size);
7785 insn = gen_call_value_internal (retval, fn, arg_size);
7786 insn = emit_call_insn (insn);
7788 CALL_INSN_FUNCTION_USAGE (insn) =
7789 gen_rtx_EXPR_LIST (VOIDmode,
7790 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
7791 CALL_INSN_FUNCTION_USAGE (insn));
7793 /* Return 1 to tell the caller that we've generated the call
7798 /* Return 0 to let the caller generate the call insn. */
7802 /* An entry in the mips16 constant pool. VALUE is the pool constant,
7803 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
7805 struct mips16_constant {
7806 struct mips16_constant *next;
7809 enum machine_mode mode;
7812 /* Information about an incomplete mips16 constant pool. FIRST is the
7813 first constant, HIGHEST_ADDRESS is the highest address that the first
7814 byte of the pool can have, and INSN_ADDRESS is the current instruction
7817 struct mips16_constant_pool {
7818 struct mips16_constant *first;
7819 int highest_address;
7823 /* Add constant VALUE to POOL and return its label. MODE is the
7824 value's mode (used for CONST_INTs, etc.). */
7827 add_constant (struct mips16_constant_pool *pool,
7828 rtx value, enum machine_mode mode)
7830 struct mips16_constant **p, *c;
7831 bool first_of_size_p;
7833 /* See whether the constant is already in the pool. If so, return the
7834 existing label, otherwise leave P pointing to the place where the
7835 constant should be added.
7837 Keep the pool sorted in increasing order of mode size so that we can
7838 reduce the number of alignments needed. */
7839 first_of_size_p = true;
7840 for (p = &pool->first; *p != 0; p = &(*p)->next)
7842 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
7844 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
7846 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
7847 first_of_size_p = false;
7850 /* In the worst case, the constant needed by the earliest instruction
7851 will end up at the end of the pool. The entire pool must then be
7852 accessible from that instruction.
7854 When adding the first constant, set the pool's highest address to
7855 the address of the first out-of-range byte. Adjust this address
7856 downwards each time a new constant is added. */
7857 if (pool->first == 0)
7858 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
7859 is the address of the instruction with the lowest two bits clear.
7860 The base PC value for ld has the lowest three bits clear. Assume
7861 the worst case here. */
7862 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
7863 pool->highest_address -= GET_MODE_SIZE (mode);
7864 if (first_of_size_p)
7865 /* Take into account the worst possible padding due to alignment. */
7866 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
7868 /* Create a new entry. */
7869 c = (struct mips16_constant *) xmalloc (sizeof *c);
7872 c->label = gen_label_rtx ();
7879 /* Output constant VALUE after instruction INSN and return the last
7880 instruction emitted. MODE is the mode of the constant. */
7883 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
7885 switch (GET_MODE_CLASS (mode))
7889 rtx size = GEN_INT (GET_MODE_SIZE (mode));
7890 return emit_insn_after (gen_consttable_int (value, size), insn);
7894 return emit_insn_after (gen_consttable_float (value), insn);
7896 case MODE_VECTOR_FLOAT:
7897 case MODE_VECTOR_INT:
7900 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
7901 insn = dump_constants_1 (GET_MODE_INNER (mode),
7902 CONST_VECTOR_ELT (value, i), insn);
7912 /* Dump out the constants in CONSTANTS after INSN. */
7915 dump_constants (struct mips16_constant *constants, rtx insn)
7917 struct mips16_constant *c, *next;
7921 for (c = constants; c != NULL; c = next)
7923 /* If necessary, increase the alignment of PC. */
7924 if (align < GET_MODE_SIZE (c->mode))
7926 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
7927 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
7929 align = GET_MODE_SIZE (c->mode);
7931 insn = emit_label_after (c->label, insn);
7932 insn = dump_constants_1 (c->mode, c->value, insn);
7938 emit_barrier_after (insn);
7941 /* Return the length of instruction INSN. */
7944 mips16_insn_length (rtx insn)
7948 rtx body = PATTERN (insn);
7949 if (GET_CODE (body) == ADDR_VEC)
7950 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
7951 if (GET_CODE (body) == ADDR_DIFF_VEC)
7952 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
7954 return get_attr_length (insn);
7957 /* Rewrite *X so that constant pool references refer to the constant's
7958 label instead. DATA points to the constant pool structure. */
7961 mips16_rewrite_pool_refs (rtx *x, void *data)
7963 struct mips16_constant_pool *pool = data;
7964 if (GET_CODE (*x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (*x))
7965 *x = gen_rtx_LABEL_REF (Pmode, add_constant (pool,
7966 get_pool_constant (*x),
7967 get_pool_mode (*x)));
7971 /* Build MIPS16 constant pools. */
7974 mips16_lay_out_constants (void)
7976 struct mips16_constant_pool pool;
7980 memset (&pool, 0, sizeof (pool));
7981 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
7983 /* Rewrite constant pool references in INSN. */
7985 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &pool);
7987 pool.insn_address += mips16_insn_length (insn);
7989 if (pool.first != NULL)
7991 /* If there are no natural barriers between the first user of
7992 the pool and the highest acceptable address, we'll need to
7993 create a new instruction to jump around the constant pool.
7994 In the worst case, this instruction will be 4 bytes long.
7996 If it's too late to do this transformation after INSN,
7997 do it immediately before INSN. */
7998 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
8002 label = gen_label_rtx ();
8004 jump = emit_jump_insn_before (gen_jump (label), insn);
8005 JUMP_LABEL (jump) = label;
8006 LABEL_NUSES (label) = 1;
8007 barrier = emit_barrier_after (jump);
8009 emit_label_after (label, barrier);
8010 pool.insn_address += 4;
8013 /* See whether the constant pool is now out of range of the first
8014 user. If so, output the constants after the previous barrier.
8015 Note that any instructions between BARRIER and INSN (inclusive)
8016 will use negative offsets to refer to the pool. */
8017 if (pool.insn_address > pool.highest_address)
8019 dump_constants (pool.first, barrier);
8023 else if (BARRIER_P (insn))
8027 dump_constants (pool.first, get_last_insn ());
8030 /* A temporary variable used by for_each_rtx callbacks, etc. */
8031 static rtx mips_sim_insn;
8033 /* A structure representing the state of the processor pipeline.
8034 Used by the mips_sim_* family of functions. */
8036 /* The maximum number of instructions that can be issued in a cycle.
8037 (Caches mips_issue_rate.) */
8038 unsigned int issue_rate;
8040 /* The current simulation time. */
8043 /* How many more instructions can be issued in the current cycle. */
8044 unsigned int insns_left;
8046 /* LAST_SET[X].INSN is the last instruction to set register X.
8047 LAST_SET[X].TIME is the time at which that instruction was issued.
8048 INSN is null if no instruction has yet set register X. */
8052 } last_set[FIRST_PSEUDO_REGISTER];
8054 /* The pipeline's current DFA state. */
8058 /* Reset STATE to the initial simulation state. */
8061 mips_sim_reset (struct mips_sim *state)
8064 state->insns_left = state->issue_rate;
8065 memset (&state->last_set, 0, sizeof (state->last_set));
8066 state_reset (state->dfa_state);
8069 /* Initialize STATE before its first use. DFA_STATE points to an
8070 allocated but uninitialized DFA state. */
8073 mips_sim_init (struct mips_sim *state, state_t dfa_state)
8075 state->issue_rate = mips_issue_rate ();
8076 state->dfa_state = dfa_state;
8077 mips_sim_reset (state);
8080 /* Advance STATE by one clock cycle. */
8083 mips_sim_next_cycle (struct mips_sim *state)
8086 state->insns_left = state->issue_rate;
8087 state_transition (state->dfa_state, 0);
8090 /* Advance simulation state STATE until instruction INSN can read
8094 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
8098 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
8099 if (state->last_set[REGNO (reg) + i].insn != 0)
8103 t = state->last_set[REGNO (reg) + i].time;
8104 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
8105 while (state->time < t)
8106 mips_sim_next_cycle (state);
8110 /* A for_each_rtx callback. If *X is a register, advance simulation state
8111 DATA until mips_sim_insn can read the register's value. */
8114 mips_sim_wait_regs_2 (rtx *x, void *data)
8117 mips_sim_wait_reg (data, mips_sim_insn, *x);
8121 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
8124 mips_sim_wait_regs_1 (rtx *x, void *data)
8126 for_each_rtx (x, mips_sim_wait_regs_2, data);
8129 /* Advance simulation state STATE until all of INSN's register
8130 dependencies are satisfied. */
8133 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
8135 mips_sim_insn = insn;
8136 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
8139 /* Advance simulation state STATE until the units required by
8140 instruction INSN are available. */
8143 mips_sim_wait_units (struct mips_sim *state, rtx insn)
8147 tmp_state = alloca (state_size ());
8148 while (state->insns_left == 0
8149 || (memcpy (tmp_state, state->dfa_state, state_size ()),
8150 state_transition (tmp_state, insn) >= 0))
8151 mips_sim_next_cycle (state);
8154 /* Advance simulation state STATE until INSN is ready to issue. */
8157 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
8159 mips_sim_wait_regs (state, insn);
8160 mips_sim_wait_units (state, insn);
8163 /* mips_sim_insn has just set X. Update the LAST_SET array
8164 in simulation state DATA. */
8167 mips_sim_record_set (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
8169 struct mips_sim *state;
8174 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
8176 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
8177 state->last_set[REGNO (x) + i].time = state->time;
8181 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
8182 can issue immediately (i.e., that mips_sim_wait_insn has already
8186 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
8188 state_transition (state->dfa_state, insn);
8189 state->insns_left--;
8191 mips_sim_insn = insn;
8192 note_stores (PATTERN (insn), mips_sim_record_set, state);
8195 /* Simulate issuing a NOP in state STATE. */
8198 mips_sim_issue_nop (struct mips_sim *state)
8200 if (state->insns_left == 0)
8201 mips_sim_next_cycle (state);
8202 state->insns_left--;
8205 /* Update simulation state STATE so that it's ready to accept the instruction
8206 after INSN. INSN should be part of the main rtl chain, not a member of a
8210 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
8212 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
8214 mips_sim_issue_nop (state);
8216 switch (GET_CODE (SEQ_BEGIN (insn)))
8220 /* We can't predict the processor state after a call or label. */
8221 mips_sim_reset (state);
8225 /* The delay slots of branch likely instructions are only executed
8226 when the branch is taken. Therefore, if the caller has simulated
8227 the delay slot instruction, STATE does not really reflect the state
8228 of the pipeline for the instruction after the delay slot. Also,
8229 branch likely instructions tend to incur a penalty when not taken,
8230 so there will probably be an extra delay between the branch and
8231 the instruction after the delay slot. */
8232 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
8233 mips_sim_reset (state);
8241 /* The VR4130 pipeline issues aligned pairs of instructions together,
8242 but it stalls the second instruction if it depends on the first.
8243 In order to cut down the amount of logic required, this dependence
8244 check is not based on a full instruction decode. Instead, any non-SPECIAL
8245 instruction is assumed to modify the register specified by bits 20-16
8246 (which is usually the "rt" field).
8248 In beq, beql, bne and bnel instructions, the rt field is actually an
8249 input, so we can end up with a false dependence between the branch
8250 and its delay slot. If this situation occurs in instruction INSN,
8251 try to avoid it by swapping rs and rt. */
8254 vr4130_avoid_branch_rt_conflict (rtx insn)
8258 first = SEQ_BEGIN (insn);
8259 second = SEQ_END (insn);
8261 && NONJUMP_INSN_P (second)
8262 && GET_CODE (PATTERN (first)) == SET
8263 && GET_CODE (SET_DEST (PATTERN (first))) == PC
8264 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
8266 /* Check for the right kind of condition. */
8267 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
8268 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
8269 && REG_P (XEXP (cond, 0))
8270 && REG_P (XEXP (cond, 1))
8271 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
8272 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
8274 /* SECOND mentions the rt register but not the rs register. */
8275 rtx tmp = XEXP (cond, 0);
8276 XEXP (cond, 0) = XEXP (cond, 1);
8277 XEXP (cond, 1) = tmp;
8282 /* Implement -mvr4130-align. Go through each basic block and simulate the
8283 processor pipeline. If we find that a pair of instructions could execute
8284 in parallel, and the first of those instruction is not 8-byte aligned,
8285 insert a nop to make it aligned. */
8288 vr4130_align_insns (void)
8290 struct mips_sim state;
8291 rtx insn, subinsn, last, last2, next;
8296 /* LAST is the last instruction before INSN to have a nonzero length.
8297 LAST2 is the last such instruction before LAST. */
8301 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
8304 mips_sim_init (&state, alloca (state_size ()));
8305 for (insn = get_insns (); insn != 0; insn = next)
8307 unsigned int length;
8309 next = NEXT_INSN (insn);
8311 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
8312 This isn't really related to the alignment pass, but we do it on
8313 the fly to avoid a separate instruction walk. */
8314 vr4130_avoid_branch_rt_conflict (insn);
8316 if (USEFUL_INSN_P (insn))
8317 FOR_EACH_SUBINSN (subinsn, insn)
8319 mips_sim_wait_insn (&state, subinsn);
8321 /* If we want this instruction to issue in parallel with the
8322 previous one, make sure that the previous instruction is
8323 aligned. There are several reasons why this isn't worthwhile
8324 when the second instruction is a call:
8326 - Calls are less likely to be performance critical,
8327 - There's a good chance that the delay slot can execute
8328 in parallel with the call.
8329 - The return address would then be unaligned.
8331 In general, if we're going to insert a nop between instructions
8332 X and Y, it's better to insert it immediately after X. That
8333 way, if the nop makes Y aligned, it will also align any labels
8335 if (state.insns_left != state.issue_rate
8336 && !CALL_P (subinsn))
8338 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
8340 /* SUBINSN is the first instruction in INSN and INSN is
8341 aligned. We want to align the previous instruction
8342 instead, so insert a nop between LAST2 and LAST.
8344 Note that LAST could be either a single instruction
8345 or a branch with a delay slot. In the latter case,
8346 LAST, like INSN, is already aligned, but the delay
8347 slot must have some extra delay that stops it from
8348 issuing at the same time as the branch. We therefore
8349 insert a nop before the branch in order to align its
8351 emit_insn_after (gen_nop (), last2);
8354 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
8356 /* SUBINSN is the delay slot of INSN, but INSN is
8357 currently unaligned. Insert a nop between
8358 LAST and INSN to align it. */
8359 emit_insn_after (gen_nop (), last);
8363 mips_sim_issue_insn (&state, subinsn);
8365 mips_sim_finish_insn (&state, insn);
8367 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
8368 length = get_attr_length (insn);
8371 /* If the instruction is an asm statement or multi-instruction
8372 mips.md patern, the length is only an estimate. Insert an
8373 8 byte alignment after it so that the following instructions
8374 can be handled correctly. */
8375 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
8376 && (recog_memoized (insn) < 0 || length >= 8))
8378 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
8379 next = NEXT_INSN (next);
8380 mips_sim_next_cycle (&state);
8383 else if (length & 4)
8384 aligned_p = !aligned_p;
8389 /* See whether INSN is an aligned label. */
8390 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
8396 /* Subroutine of mips_reorg. If there is a hazard between INSN
8397 and a previous instruction, avoid it by inserting nops after
8400 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
8401 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
8402 before using the value of that register. *HILO_DELAY counts the
8403 number of instructions since the last hilo hazard (that is,
8404 the number of instructions since the last mflo or mfhi).
8406 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
8407 for the next instruction.
8409 LO_REG is an rtx for the LO register, used in dependence checking. */
8412 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
8413 rtx *delayed_reg, rtx lo_reg)
8421 pattern = PATTERN (insn);
8423 /* Do not put the whole function in .set noreorder if it contains
8424 an asm statement. We don't know whether there will be hazards
8425 between the asm statement and the gcc-generated code. */
8426 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
8427 cfun->machine->all_noreorder_p = false;
8429 /* Ignore zero-length instructions (barriers and the like). */
8430 ninsns = get_attr_length (insn) / 4;
8434 /* Work out how many nops are needed. Note that we only care about
8435 registers that are explicitly mentioned in the instruction's pattern.
8436 It doesn't matter that calls use the argument registers or that they
8437 clobber hi and lo. */
8438 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
8439 nops = 2 - *hilo_delay;
8440 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
8445 /* Insert the nops between this instruction and the previous one.
8446 Each new nop takes us further from the last hilo hazard. */
8447 *hilo_delay += nops;
8449 emit_insn_after (gen_hazard_nop (), after);
8451 /* Set up the state for the next instruction. */
8452 *hilo_delay += ninsns;
8454 if (INSN_CODE (insn) >= 0)
8455 switch (get_attr_hazard (insn))
8465 set = single_set (insn);
8466 gcc_assert (set != 0);
8467 *delayed_reg = SET_DEST (set);
8473 /* Go through the instruction stream and insert nops where necessary.
8474 See if the whole function can then be put into .set noreorder &
8478 mips_avoid_hazards (void)
8480 rtx insn, last_insn, lo_reg, delayed_reg;
8483 /* Force all instructions to be split into their final form. */
8484 split_all_insns_noflow ();
8486 /* Recalculate instruction lengths without taking nops into account. */
8487 cfun->machine->ignore_hazard_length_p = true;
8488 shorten_branches (get_insns ());
8490 cfun->machine->all_noreorder_p = true;
8492 /* Profiled functions can't be all noreorder because the profiler
8493 support uses assembler macros. */
8494 if (current_function_profile)
8495 cfun->machine->all_noreorder_p = false;
8497 /* Code compiled with -mfix-vr4120 can't be all noreorder because
8498 we rely on the assembler to work around some errata. */
8499 if (TARGET_FIX_VR4120)
8500 cfun->machine->all_noreorder_p = false;
8502 /* The same is true for -mfix-vr4130 if we might generate mflo or
8503 mfhi instructions. Note that we avoid using mflo and mfhi if
8504 the VR4130 macc and dmacc instructions are available instead;
8505 see the *mfhilo_{si,di}_macc patterns. */
8506 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
8507 cfun->machine->all_noreorder_p = false;
8512 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
8514 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
8517 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
8518 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
8519 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
8520 &hilo_delay, &delayed_reg, lo_reg);
8522 mips_avoid_hazard (last_insn, insn, &hilo_delay,
8523 &delayed_reg, lo_reg);
8530 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
8536 mips16_lay_out_constants ();
8537 else if (TARGET_EXPLICIT_RELOCS)
8539 if (mips_flag_delayed_branch)
8540 dbr_schedule (get_insns (), dump_file);
8541 mips_avoid_hazards ();
8542 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
8543 vr4130_align_insns ();
8547 /* This function does three things:
8549 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
8550 - Register the mips16 hardware floating point stubs.
8551 - Register the gofast functions if selected using --enable-gofast. */
8553 #include "config/gofast.h"
8556 mips_init_libfuncs (void)
8558 if (TARGET_FIX_VR4120)
8560 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
8561 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
8564 if (TARGET_MIPS16 && mips16_hard_float)
8566 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
8567 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
8568 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
8569 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
8571 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
8572 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
8573 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
8574 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
8575 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
8576 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
8578 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
8579 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
8581 if (TARGET_DOUBLE_FLOAT)
8583 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
8584 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
8585 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
8586 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
8588 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
8589 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
8590 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
8591 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
8592 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
8593 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
8595 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
8596 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
8598 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
8599 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
8603 gofast_maybe_init_libfuncs ();
8606 /* Return a number assessing the cost of moving a register in class
8607 FROM to class TO. The classes are expressed using the enumeration
8608 values such as `GENERAL_REGS'. A value of 2 is the default; other
8609 values are interpreted relative to that.
8611 It is not required that the cost always equal 2 when FROM is the
8612 same as TO; on some machines it is expensive to move between
8613 registers if they are not general registers.
8615 If reload sees an insn consisting of a single `set' between two
8616 hard registers, and if `REGISTER_MOVE_COST' applied to their
8617 classes returns a value of 2, reload does not check to ensure that
8618 the constraints of the insn are met. Setting a cost of other than
8619 2 will allow reload to verify that the constraints are met. You
8620 should do this if the `movM' pattern's constraints do not allow
8623 ??? We make the cost of moving from HI/LO into general
8624 registers the same as for one of moving general registers to
8625 HI/LO for TARGET_MIPS16 in order to prevent allocating a
8626 pseudo to HI/LO. This might hurt optimizations though, it
8627 isn't clear if it is wise. And it might not work in all cases. We
8628 could solve the DImode LO reg problem by using a multiply, just
8629 like reload_{in,out}si. We could solve the SImode/HImode HI reg
8630 problem by using divide instructions. divu puts the remainder in
8631 the HI reg, so doing a divide by -1 will move the value in the HI
8632 reg for all values except -1. We could handle that case by using a
8633 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
8634 a compare/branch to test the input value to see which instruction
8635 we need to use. This gets pretty messy, but it is feasible. */
8638 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
8639 enum reg_class to, enum reg_class from)
8641 if (from == M16_REGS && GR_REG_CLASS_P (to))
8643 else if (from == M16_NA_REGS && GR_REG_CLASS_P (to))
8645 else if (GR_REG_CLASS_P (from))
8649 else if (to == M16_NA_REGS)
8651 else if (GR_REG_CLASS_P (to))
8658 else if (to == FP_REGS)
8660 else if (to == HI_REG || to == LO_REG || to == MD_REGS)
8667 else if (COP_REG_CLASS_P (to))
8671 } /* GR_REG_CLASS_P (from) */
8672 else if (from == FP_REGS)
8674 if (GR_REG_CLASS_P (to))
8676 else if (to == FP_REGS)
8678 else if (to == ST_REGS)
8680 } /* from == FP_REGS */
8681 else if (from == HI_REG || from == LO_REG || from == MD_REGS)
8683 if (GR_REG_CLASS_P (to))
8690 } /* from == HI_REG, etc. */
8691 else if (from == ST_REGS && GR_REG_CLASS_P (to))
8693 else if (COP_REG_CLASS_P (from))
8696 } /* COP_REG_CLASS_P (from) */
8703 /* Return the length of INSN. LENGTH is the initial length computed by
8704 attributes in the machine-description file. */
8707 mips_adjust_insn_length (rtx insn, int length)
8709 /* A unconditional jump has an unfilled delay slot if it is not part
8710 of a sequence. A conditional jump normally has a delay slot, but
8711 does not on MIPS16. */
8712 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
8715 /* See how many nops might be needed to avoid hardware hazards. */
8716 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
8717 switch (get_attr_hazard (insn))
8731 /* All MIPS16 instructions are a measly two bytes. */
8739 /* Return an asm sequence to start a noat block and load the address
8740 of a label into $1. */
8743 mips_output_load_label (void)
8745 if (TARGET_EXPLICIT_RELOCS)
8749 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
8752 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
8755 if (ISA_HAS_LOAD_DELAY)
8756 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
8757 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
8761 if (Pmode == DImode)
8762 return "%[dla\t%@,%0";
8764 return "%[la\t%@,%0";
8769 /* Output assembly instructions to peform a conditional branch.
8771 INSN is the branch instruction. OPERANDS[0] is the condition.
8772 OPERANDS[1] is the target of the branch. OPERANDS[2] is the target
8773 of the first operand to the condition. If TWO_OPERANDS_P is
8774 nonzero the comparison takes two operands; OPERANDS[3] will be the
8777 If INVERTED_P is nonzero we are to branch if the condition does
8778 not hold. If FLOAT_P is nonzero this is a floating-point comparison.
8780 LENGTH is the length (in bytes) of the sequence we are to generate.
8781 That tells us whether to generate a simple conditional branch, or a
8782 reversed conditional branch around a `jr' instruction. */
8784 mips_output_conditional_branch (rtx insn, rtx *operands, int two_operands_p,
8785 int float_p, int inverted_p, int length)
8787 static char buffer[200];
8788 /* The kind of comparison we are doing. */
8789 enum rtx_code code = GET_CODE (operands[0]);
8790 /* Nonzero if the opcode for the comparison needs a `z' indicating
8791 that it is a comparison against zero. */
8793 /* A string to use in the assembly output to represent the first
8795 const char *op1 = "%z2";
8796 /* A string to use in the assembly output to represent the second
8797 operand. Use the hard-wired zero register if there's no second
8799 const char *op2 = (two_operands_p ? ",%z3" : ",%.");
8800 /* The operand-printing string for the comparison. */
8801 const char *const comp = (float_p ? "%F0" : "%C0");
8802 /* The operand-printing string for the inverted comparison. */
8803 const char *const inverted_comp = (float_p ? "%W0" : "%N0");
8805 /* The MIPS processors (for levels of the ISA at least two), have
8806 "likely" variants of each branch instruction. These instructions
8807 annul the instruction in the delay slot if the branch is not
8809 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
8811 if (!two_operands_p)
8813 /* To compute whether than A > B, for example, we normally
8814 subtract B from A and then look at the sign bit. But, if we
8815 are doing an unsigned comparison, and B is zero, we don't
8816 have to do the subtraction. Instead, we can just check to
8817 see if A is nonzero. Thus, we change the CODE here to
8818 reflect the simpler comparison operation. */
8830 /* A condition which will always be true. */
8836 /* A condition which will always be false. */
8842 /* Not a special case. */
8847 /* Relative comparisons are always done against zero. But
8848 equality comparisons are done between two operands, and therefore
8849 do not require a `z' in the assembly language output. */
8850 need_z_p = (!float_p && code != EQ && code != NE);
8851 /* For comparisons against zero, the zero is not provided
8856 /* Begin by terminating the buffer. That way we can always use
8857 strcat to add to it. */
8864 /* Just a simple conditional branch. */
8866 sprintf (buffer, "%%*b%s%%?\t%%Z2%%1%%/",
8867 inverted_p ? inverted_comp : comp);
8869 sprintf (buffer, "%%*b%s%s%%?\t%s%s,%%1%%/",
8870 inverted_p ? inverted_comp : comp,
8871 need_z_p ? "z" : "",
8881 /* Generate a reversed conditional branch around ` j'
8894 If the original branch was a likely branch, the delay slot
8895 must be executed only if the branch is taken, so generate:
8907 When generating PIC, instead of:
8920 rtx target = gen_label_rtx ();
8922 orig_target = operands[1];
8923 operands[1] = target;
8924 /* Generate the reversed comparison. This takes four
8927 sprintf (buffer, "%%*b%s\t%%Z2%%1",
8928 inverted_p ? comp : inverted_comp);
8930 sprintf (buffer, "%%*b%s%s\t%s%s,%%1",
8931 inverted_p ? comp : inverted_comp,
8932 need_z_p ? "z" : "",
8935 output_asm_insn (buffer, operands);
8937 if (length != 16 && length != 28 && ! mips_branch_likely)
8939 /* Output delay slot instruction. */
8940 rtx insn = final_sequence;
8941 final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file,
8943 INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
8946 output_asm_insn ("%#", 0);
8949 output_asm_insn ("j\t%0", &orig_target);
8952 output_asm_insn (mips_output_load_label (), &orig_target);
8953 output_asm_insn ("jr\t%@%]", 0);
8956 if (length != 16 && length != 28 && mips_branch_likely)
8958 /* Output delay slot instruction. */
8959 rtx insn = final_sequence;
8960 final_scan_insn (XVECEXP (insn, 0, 1), asm_out_file,
8962 INSN_DELETED_P (XVECEXP (insn, 0, 1)) = 1;
8965 output_asm_insn ("%#", 0);
8967 (*targetm.asm_out.internal_label) (asm_out_file, "L",
8968 CODE_LABEL_NUMBER (target));
8981 /* Used to output div or ddiv instruction DIVISION, which has the operands
8982 given by OPERANDS. Add in a divide-by-zero check if needed.
8984 When working around R4000 and R4400 errata, we need to make sure that
8985 the division is not immediately followed by a shift[1][2]. We also
8986 need to stop the division from being put into a branch delay slot[3].
8987 The easiest way to avoid both problems is to add a nop after the
8988 division. When a divide-by-zero check is needed, this nop can be
8989 used to fill the branch delay slot.
8991 [1] If a double-word or a variable shift executes immediately
8992 after starting an integer division, the shift may give an
8993 incorrect result. See quotations of errata #16 and #28 from
8994 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
8995 in mips.md for details.
8997 [2] A similar bug to [1] exists for all revisions of the
8998 R4000 and the R4400 when run in an MC configuration.
8999 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
9001 "19. In this following sequence:
9003 ddiv (or ddivu or div or divu)
9004 dsll32 (or dsrl32, dsra32)
9006 if an MPT stall occurs, while the divide is slipping the cpu
9007 pipeline, then the following double shift would end up with an
9010 Workaround: The compiler needs to avoid generating any
9011 sequence with divide followed by extended double shift."
9013 This erratum is also present in "MIPS R4400MC Errata, Processor
9014 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
9015 & 3.0" as errata #10 and #4, respectively.
9017 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9018 (also valid for MIPS R4000MC processors):
9020 "52. R4000SC: This bug does not apply for the R4000PC.
9022 There are two flavors of this bug:
9024 1) If the instruction just after divide takes an RF exception
9025 (tlb-refill, tlb-invalid) and gets an instruction cache
9026 miss (both primary and secondary) and the line which is
9027 currently in secondary cache at this index had the first
9028 data word, where the bits 5..2 are set, then R4000 would
9029 get a wrong result for the div.
9034 ------------------- # end-of page. -tlb-refill
9039 ------------------- # end-of page. -tlb-invalid
9042 2) If the divide is in the taken branch delay slot, where the
9043 target takes RF exception and gets an I-cache miss for the
9044 exception vector or where I-cache miss occurs for the
9045 target address, under the above mentioned scenarios, the
9046 div would get wrong results.
9049 j r2 # to next page mapped or unmapped
9050 div r8,r9 # this bug would be there as long
9051 # as there is an ICache miss and
9052 nop # the "data pattern" is present
9055 beq r0, r0, NextPage # to Next page
9059 This bug is present for div, divu, ddiv, and ddivu
9062 Workaround: For item 1), OS could make sure that the next page
9063 after the divide instruction is also mapped. For item 2), the
9064 compiler could make sure that the divide instruction is not in
9065 the branch delay slot."
9067 These processors have PRId values of 0x00004220 and 0x00004300 for
9068 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
9071 mips_output_division (const char *division, rtx *operands)
9076 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
9078 output_asm_insn (s, operands);
9081 if (TARGET_CHECK_ZERO_DIV)
9085 output_asm_insn (s, operands);
9086 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
9088 else if (GENERATE_DIVIDE_TRAPS)
9090 output_asm_insn (s, operands);
9095 output_asm_insn ("%(bne\t%2,%.,1f", operands);
9096 output_asm_insn (s, operands);
9097 s = "break\t7%)\n1:";
9103 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
9104 with a final "000" replaced by "k". Ignore case.
9106 Note: this function is shared between GCC and GAS. */
9109 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
9111 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
9112 given++, canonical++;
9114 return ((*given == 0 && *canonical == 0)
9115 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
9119 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
9120 CPU name. We've traditionally allowed a lot of variation here.
9122 Note: this function is shared between GCC and GAS. */
9125 mips_matching_cpu_name_p (const char *canonical, const char *given)
9127 /* First see if the name matches exactly, or with a final "000"
9129 if (mips_strict_matching_cpu_name_p (canonical, given))
9132 /* If not, try comparing based on numerical designation alone.
9133 See if GIVEN is an unadorned number, or 'r' followed by a number. */
9134 if (TOLOWER (*given) == 'r')
9136 if (!ISDIGIT (*given))
9139 /* Skip over some well-known prefixes in the canonical name,
9140 hoping to find a number there too. */
9141 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
9143 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
9145 else if (TOLOWER (canonical[0]) == 'r')
9148 return mips_strict_matching_cpu_name_p (canonical, given);
9152 /* Return the mips_cpu_info entry for the processor or ISA given
9153 by CPU_STRING. Return null if the string isn't recognized.
9155 A similar function exists in GAS. */
9157 static const struct mips_cpu_info *
9158 mips_parse_cpu (const char *cpu_string)
9160 const struct mips_cpu_info *p;
9163 /* In the past, we allowed upper-case CPU names, but it doesn't
9164 work well with the multilib machinery. */
9165 for (s = cpu_string; *s != 0; s++)
9168 warning (0, "the cpu name must be lower case");
9172 /* 'from-abi' selects the most compatible architecture for the given
9173 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
9174 EABIs, we have to decide whether we're using the 32-bit or 64-bit
9175 version. Look first at the -mgp options, if given, otherwise base
9176 the choice on MASK_64BIT in TARGET_DEFAULT. */
9177 if (strcasecmp (cpu_string, "from-abi") == 0)
9178 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
9179 : ABI_NEEDS_64BIT_REGS ? 3
9180 : (TARGET_64BIT ? 3 : 1));
9182 /* 'default' has traditionally been a no-op. Probably not very useful. */
9183 if (strcasecmp (cpu_string, "default") == 0)
9186 for (p = mips_cpu_info_table; p->name != 0; p++)
9187 if (mips_matching_cpu_name_p (p->name, cpu_string))
9194 /* Return the processor associated with the given ISA level, or null
9195 if the ISA isn't valid. */
9197 static const struct mips_cpu_info *
9198 mips_cpu_info_from_isa (int isa)
9200 const struct mips_cpu_info *p;
9202 for (p = mips_cpu_info_table; p->name != 0; p++)
9209 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
9210 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
9211 they only hold condition code modes, and CCmode is always considered to
9212 be 4 bytes wide. All other registers are word sized. */
9215 mips_hard_regno_nregs (int regno, enum machine_mode mode)
9217 if (ST_REG_P (regno))
9218 return ((GET_MODE_SIZE (mode) + 3) / 4);
9219 else if (! FP_REG_P (regno))
9220 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
9222 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
9225 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
9226 all BLKmode objects are returned in memory. Under the new (N32 and
9227 64-bit MIPS ABIs) small structures are returned in a register.
9228 Objects with varying size must still be returned in memory, of
9232 mips_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
9235 return (TYPE_MODE (type) == BLKmode);
9237 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
9238 || (int_size_in_bytes (type) == -1));
9242 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
9244 return !TARGET_OLDABI;
9247 /* Return true if INSN is a multiply-add or multiply-subtract
9248 instruction and PREV assigns to the accumulator operand. */
9251 mips_linked_madd_p (rtx prev, rtx insn)
9255 x = single_set (insn);
9261 if (GET_CODE (x) == PLUS
9262 && GET_CODE (XEXP (x, 0)) == MULT
9263 && reg_set_p (XEXP (x, 1), prev))
9266 if (GET_CODE (x) == MINUS
9267 && GET_CODE (XEXP (x, 1)) == MULT
9268 && reg_set_p (XEXP (x, 0), prev))
9274 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
9275 that may clobber hi or lo. */
9277 static rtx mips_macc_chains_last_hilo;
9279 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
9280 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
9283 mips_macc_chains_record (rtx insn)
9285 if (get_attr_may_clobber_hilo (insn))
9286 mips_macc_chains_last_hilo = insn;
9289 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
9290 has NREADY elements, looking for a multiply-add or multiply-subtract
9291 instruction that is cumulative with mips_macc_chains_last_hilo.
9292 If there is one, promote it ahead of anything else that might
9293 clobber hi or lo. */
9296 mips_macc_chains_reorder (rtx *ready, int nready)
9300 if (mips_macc_chains_last_hilo != 0)
9301 for (i = nready - 1; i >= 0; i--)
9302 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
9304 for (j = nready - 1; j > i; j--)
9305 if (recog_memoized (ready[j]) >= 0
9306 && get_attr_may_clobber_hilo (ready[j]))
9308 mips_promote_ready (ready, i, j);
9315 /* The last instruction to be scheduled. */
9317 static rtx vr4130_last_insn;
9319 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
9320 points to an rtx that is initially an instruction. Nullify the rtx
9321 if the instruction uses the value of register X. */
9324 vr4130_true_reg_dependence_p_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
9326 rtx *insn_ptr = data;
9329 && reg_referenced_p (x, PATTERN (*insn_ptr)))
9333 /* Return true if there is true register dependence between vr4130_last_insn
9337 vr4130_true_reg_dependence_p (rtx insn)
9339 note_stores (PATTERN (vr4130_last_insn),
9340 vr4130_true_reg_dependence_p_1, &insn);
9344 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
9345 the ready queue and that INSN2 is the instruction after it, return
9346 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
9347 in which INSN1 and INSN2 can probably issue in parallel, but for
9348 which (INSN2, INSN1) should be less sensitive to instruction
9349 alignment than (INSN1, INSN2). See 4130.md for more details. */
9352 vr4130_swap_insns_p (rtx insn1, rtx insn2)
9356 /* Check for the following case:
9358 1) there is some other instruction X with an anti dependence on INSN1;
9359 2) X has a higher priority than INSN2; and
9360 3) X is an arithmetic instruction (and thus has no unit restrictions).
9362 If INSN1 is the last instruction blocking X, it would better to
9363 choose (INSN1, X) over (INSN2, INSN1). */
9364 for (dep = INSN_DEPEND (insn1); dep != 0; dep = XEXP (dep, 1))
9365 if (REG_NOTE_KIND (dep) == REG_DEP_ANTI
9366 && INSN_PRIORITY (XEXP (dep, 0)) > INSN_PRIORITY (insn2)
9367 && recog_memoized (XEXP (dep, 0)) >= 0
9368 && get_attr_vr4130_class (XEXP (dep, 0)) == VR4130_CLASS_ALU)
9371 if (vr4130_last_insn != 0
9372 && recog_memoized (insn1) >= 0
9373 && recog_memoized (insn2) >= 0)
9375 /* See whether INSN1 and INSN2 use different execution units,
9376 or if they are both ALU-type instructions. If so, they can
9377 probably execute in parallel. */
9378 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
9379 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
9380 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
9382 /* If only one of the instructions has a dependence on
9383 vr4130_last_insn, prefer to schedule the other one first. */
9384 bool dep1 = vr4130_true_reg_dependence_p (insn1);
9385 bool dep2 = vr4130_true_reg_dependence_p (insn2);
9389 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
9390 is not an ALU-type instruction and if INSN1 uses the same
9391 execution unit. (Note that if this condition holds, we already
9392 know that INSN2 uses a different execution unit.) */
9393 if (class1 != VR4130_CLASS_ALU
9394 && recog_memoized (vr4130_last_insn) >= 0
9395 && class1 == get_attr_vr4130_class (vr4130_last_insn))
9402 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
9403 queue with at least two instructions. Swap the first two if
9404 vr4130_swap_insns_p says that it could be worthwhile. */
9407 vr4130_reorder (rtx *ready, int nready)
9409 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
9410 mips_promote_ready (ready, nready - 2, nready - 1);
9413 /* Remove the instruction at index LOWER from ready queue READY and
9414 reinsert it in front of the instruction at index HIGHER. LOWER must
9418 mips_promote_ready (rtx *ready, int lower, int higher)
9423 new_head = ready[lower];
9424 for (i = lower; i < higher; i++)
9425 ready[i] = ready[i + 1];
9426 ready[i] = new_head;
9429 /* Implement TARGET_SCHED_REORDER. */
9432 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9433 rtx *ready, int *nreadyp, int cycle)
9435 if (!reload_completed && TUNE_MACC_CHAINS)
9438 mips_macc_chains_last_hilo = 0;
9440 mips_macc_chains_reorder (ready, *nreadyp);
9442 if (reload_completed && TUNE_MIPS4130 && !TARGET_VR4130_ALIGN)
9445 vr4130_last_insn = 0;
9447 vr4130_reorder (ready, *nreadyp);
9449 return mips_issue_rate ();
9452 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
9455 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9458 switch (GET_CODE (PATTERN (insn)))
9462 /* Don't count USEs and CLOBBERs against the issue rate. */
9467 if (!reload_completed && TUNE_MACC_CHAINS)
9468 mips_macc_chains_record (insn);
9469 vr4130_last_insn = insn;
9475 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
9476 dependencies have no cost. */
9479 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
9480 rtx dep ATTRIBUTE_UNUSED, int cost)
9482 if (REG_NOTE_KIND (link) != 0)
9487 /* Return the number of instructions that can be issued per cycle. */
9490 mips_issue_rate (void)
9494 case PROCESSOR_R4130:
9495 case PROCESSOR_R5400:
9496 case PROCESSOR_R5500:
9497 case PROCESSOR_R7000:
9498 case PROCESSOR_R9000:
9502 /* This is actually 4, but we get better performance if we claim 3.
9503 This is partly because of unwanted speculative code motion with the
9504 larger number, and partly because in most common cases we can't
9505 reach the theoretical max of 4. */
9513 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
9514 be as wide as the scheduling freedom in the DFA. */
9517 mips_multipass_dfa_lookahead (void)
9519 /* Can schedule up to 4 of the 6 function units in any one cycle. */
9520 if (mips_tune == PROCESSOR_SB1)
9526 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
9527 return the first operand of the associated "pref" or "prefx" insn. */
9530 mips_prefetch_cookie (rtx write, rtx locality)
9532 /* store_streamed / load_streamed. */
9533 if (INTVAL (locality) <= 0)
9534 return GEN_INT (INTVAL (write) + 4);
9537 if (INTVAL (locality) <= 2)
9540 /* store_retained / load_retained. */
9541 return GEN_INT (INTVAL (write) + 6);
9544 /* MIPS builtin function support. */
9546 struct builtin_description
9548 /* The code of the main .md file instruction. See mips_builtin_type
9549 for more information. */
9550 enum insn_code icode;
9552 /* The floating-point comparison code to use with ICODE, if any. */
9553 enum mips_fp_condition cond;
9555 /* The name of the builtin function. */
9558 /* Specifies how the function should be expanded. */
9559 enum mips_builtin_type builtin_type;
9561 /* The function's prototype. */
9562 enum mips_function_type function_type;
9564 /* The target flags required for this function. */
9568 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
9569 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
9570 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
9571 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
9572 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
9574 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
9576 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
9577 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
9578 "__builtin_mips_" #INSN "_" #COND "_s", \
9579 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
9580 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
9581 "__builtin_mips_" #INSN "_" #COND "_d", \
9582 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
9584 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
9585 The lower and upper forms require TARGET_FLAGS while the any and all
9586 forms require MASK_MIPS3D. */
9587 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
9588 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9589 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
9590 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
9591 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9592 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
9593 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
9594 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9595 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
9596 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
9597 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9598 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
9599 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
9601 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
9602 require MASK_MIPS3D. */
9603 #define CMP_4S_BUILTINS(INSN, COND) \
9604 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
9605 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
9606 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9608 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
9609 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
9610 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9613 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
9614 instruction requires TARGET_FLAGS. */
9615 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
9616 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9617 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
9618 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9620 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9621 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
9622 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9625 /* Define all the builtins related to c.cond.fmt condition COND. */
9626 #define CMP_BUILTINS(COND) \
9627 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
9628 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
9629 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
9630 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
9631 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
9632 CMP_4S_BUILTINS (c, COND), \
9633 CMP_4S_BUILTINS (cabs, COND)
9635 /* __builtin_mips_abs_ps() maps to the standard absM2 pattern. */
9636 #define CODE_FOR_mips_abs_ps CODE_FOR_absv2sf2
9638 static const struct builtin_description mips_bdesc[] =
9640 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
9641 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
9642 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
9643 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
9644 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
9645 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
9646 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
9647 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
9649 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
9650 MASK_PAIRED_SINGLE_FLOAT),
9651 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
9652 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
9653 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
9654 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
9656 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
9657 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
9658 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
9659 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
9660 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
9661 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
9663 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
9664 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
9665 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
9666 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
9667 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
9668 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
9670 MIPS_FP_CONDITIONS (CMP_BUILTINS)
9673 /* Builtin functions for the SB-1 processor. */
9675 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
9677 static const struct builtin_description sb1_bdesc[] =
9679 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
9682 /* This helps provide a mapping from builtin function codes to bdesc
9687 /* The builtin function table that this entry describes. */
9688 const struct builtin_description *bdesc;
9690 /* The number of entries in the builtin function table. */
9693 /* The target processor that supports these builtin functions.
9694 PROCESSOR_DEFAULT means we enable them for all processors. */
9695 enum processor_type proc;
9698 static const struct bdesc_map bdesc_arrays[] =
9700 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_DEFAULT },
9701 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1 }
9704 /* Take the head of argument list *ARGLIST and convert it into a form
9705 suitable for input operand OP of instruction ICODE. Return the value
9706 and point *ARGLIST at the next element of the list. */
9709 mips_prepare_builtin_arg (enum insn_code icode,
9710 unsigned int op, tree *arglist)
9713 enum machine_mode mode;
9715 value = expand_expr (TREE_VALUE (*arglist), NULL_RTX, VOIDmode, 0);
9716 mode = insn_data[icode].operand[op].mode;
9717 if (!insn_data[icode].operand[op].predicate (value, mode))
9718 value = copy_to_mode_reg (mode, value);
9720 *arglist = TREE_CHAIN (*arglist);
9724 /* Return an rtx suitable for output operand OP of instruction ICODE.
9725 If TARGET is non-null, try to use it where possible. */
9728 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
9730 enum machine_mode mode;
9732 mode = insn_data[icode].operand[op].mode;
9733 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
9734 target = gen_reg_rtx (mode);
9739 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
9742 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
9743 enum machine_mode mode ATTRIBUTE_UNUSED,
9744 int ignore ATTRIBUTE_UNUSED)
9746 enum insn_code icode;
9747 enum mips_builtin_type type;
9748 tree fndecl, arglist;
9750 const struct builtin_description *bdesc;
9751 const struct bdesc_map *m;
9753 fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
9754 arglist = TREE_OPERAND (exp, 1);
9755 fcode = DECL_FUNCTION_CODE (fndecl);
9758 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
9760 if (fcode < m->size)
9763 icode = bdesc[fcode].icode;
9764 type = bdesc[fcode].builtin_type;
9774 case MIPS_BUILTIN_DIRECT:
9775 return mips_expand_builtin_direct (icode, target, arglist);
9777 case MIPS_BUILTIN_MOVT:
9778 case MIPS_BUILTIN_MOVF:
9779 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
9782 case MIPS_BUILTIN_CMP_ANY:
9783 case MIPS_BUILTIN_CMP_ALL:
9784 case MIPS_BUILTIN_CMP_UPPER:
9785 case MIPS_BUILTIN_CMP_LOWER:
9786 case MIPS_BUILTIN_CMP_SINGLE:
9787 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
9795 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
9798 mips_init_builtins (void)
9800 const struct builtin_description *d;
9801 const struct bdesc_map *m;
9802 tree types[(int) MIPS_MAX_FTYPE_MAX];
9803 tree V2SF_type_node;
9804 unsigned int offset;
9806 /* We have only builtins for -mpaired-single and -mips3d. */
9807 if (!TARGET_PAIRED_SINGLE_FLOAT)
9810 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
9812 types[MIPS_V2SF_FTYPE_V2SF]
9813 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
9815 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
9816 = build_function_type_list (V2SF_type_node,
9817 V2SF_type_node, V2SF_type_node, NULL_TREE);
9819 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
9820 = build_function_type_list (V2SF_type_node,
9821 V2SF_type_node, V2SF_type_node,
9822 integer_type_node, NULL_TREE);
9824 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
9825 = build_function_type_list (V2SF_type_node,
9826 V2SF_type_node, V2SF_type_node,
9827 V2SF_type_node, V2SF_type_node, NULL_TREE);
9829 types[MIPS_V2SF_FTYPE_SF_SF]
9830 = build_function_type_list (V2SF_type_node,
9831 float_type_node, float_type_node, NULL_TREE);
9833 types[MIPS_INT_FTYPE_V2SF_V2SF]
9834 = build_function_type_list (integer_type_node,
9835 V2SF_type_node, V2SF_type_node, NULL_TREE);
9837 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
9838 = build_function_type_list (integer_type_node,
9839 V2SF_type_node, V2SF_type_node,
9840 V2SF_type_node, V2SF_type_node, NULL_TREE);
9842 types[MIPS_INT_FTYPE_SF_SF]
9843 = build_function_type_list (integer_type_node,
9844 float_type_node, float_type_node, NULL_TREE);
9846 types[MIPS_INT_FTYPE_DF_DF]
9847 = build_function_type_list (integer_type_node,
9848 double_type_node, double_type_node, NULL_TREE);
9850 types[MIPS_SF_FTYPE_V2SF]
9851 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
9853 types[MIPS_SF_FTYPE_SF]
9854 = build_function_type_list (float_type_node,
9855 float_type_node, NULL_TREE);
9857 types[MIPS_SF_FTYPE_SF_SF]
9858 = build_function_type_list (float_type_node,
9859 float_type_node, float_type_node, NULL_TREE);
9861 types[MIPS_DF_FTYPE_DF]
9862 = build_function_type_list (double_type_node,
9863 double_type_node, NULL_TREE);
9865 types[MIPS_DF_FTYPE_DF_DF]
9866 = build_function_type_list (double_type_node,
9867 double_type_node, double_type_node, NULL_TREE);
9869 /* Iterate through all of the bdesc arrays, initializing all of the
9870 builtin functions. */
9873 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
9875 if (m->proc == PROCESSOR_DEFAULT || (m->proc == mips_arch))
9876 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
9877 if ((d->target_flags & target_flags) == d->target_flags)
9878 lang_hooks.builtin_function (d->name, types[d->function_type],
9879 d - m->bdesc + offset,
9880 BUILT_IN_MD, NULL, NULL);
9885 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
9886 .md pattern and ARGLIST is the list of function arguments. TARGET,
9887 if nonnull, suggests a good place to put the result. */
9890 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree arglist)
9892 rtx ops[MAX_RECOG_OPERANDS];
9895 target = mips_prepare_builtin_target (icode, 0, target);
9896 for (i = 1; i < insn_data[icode].n_operands; i++)
9897 ops[i] = mips_prepare_builtin_arg (icode, i, &arglist);
9899 switch (insn_data[icode].n_operands)
9902 emit_insn (GEN_FCN (icode) (target, ops[1]));
9906 emit_insn (GEN_FCN (icode) (target, ops[1], ops[2]));
9910 emit_insn (GEN_FCN (icode) (target, ops[1], ops[2], ops[3]));
9919 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
9920 function (TYPE says which). ARGLIST is the list of arguments to the
9921 function, ICODE is the instruction that should be used to compare
9922 the first two arguments, and COND is the condition it should test.
9923 TARGET, if nonnull, suggests a good place to put the result. */
9926 mips_expand_builtin_movtf (enum mips_builtin_type type,
9927 enum insn_code icode, enum mips_fp_condition cond,
9928 rtx target, tree arglist)
9930 rtx cmp_result, op0, op1;
9932 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
9933 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
9934 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
9935 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
9937 icode = CODE_FOR_mips_cond_move_tf_ps;
9938 target = mips_prepare_builtin_target (icode, 0, target);
9939 if (type == MIPS_BUILTIN_MOVT)
9941 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
9942 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
9946 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
9947 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
9949 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
9953 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
9954 of the comparison instruction and COND is the condition it should test.
9955 ARGLIST is the list of function arguments and TARGET, if nonnull,
9956 suggests a good place to put the boolean result. */
9959 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
9960 enum insn_code icode, enum mips_fp_condition cond,
9961 rtx target, tree arglist)
9963 rtx label1, label2, if_then_else;
9964 rtx pat, cmp_result, ops[MAX_RECOG_OPERANDS];
9965 rtx target_if_equal, target_if_unequal;
9968 if (target == 0 || GET_MODE (target) != SImode)
9969 target = gen_reg_rtx (SImode);
9971 /* Prepare the operands to the comparison. */
9972 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
9973 for (i = 1; i < insn_data[icode].n_operands - 1; i++)
9974 ops[i] = mips_prepare_builtin_arg (icode, i, &arglist);
9976 switch (insn_data[icode].n_operands)
9979 pat = GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond));
9983 pat = GEN_FCN (icode) (cmp_result, ops[1], ops[2],
9984 ops[3], ops[4], GEN_INT (cond));
9991 /* If the comparison sets more than one register, we define the result
9992 to be 0 if all registers are false and -1 if all registers are true.
9993 The value of the complete result is indeterminate otherwise. It is
9994 possible to test individual registers using SUBREGs.
9996 Set up CMP_RESULT, CMP_VALUE, TARGET_IF_EQUAL and TARGET_IF_UNEQUAL so
9997 that the result should be TARGET_IF_EQUAL if (EQ CMP_RESULT CMP_VALUE)
9998 and TARGET_IF_UNEQUAL otherwise. */
9999 if (builtin_type == MIPS_BUILTIN_CMP_ALL)
10002 target_if_equal = const1_rtx;
10003 target_if_unequal = const0_rtx;
10008 target_if_equal = const0_rtx;
10009 target_if_unequal = const1_rtx;
10010 if (builtin_type == MIPS_BUILTIN_CMP_UPPER)
10011 cmp_result = simplify_gen_subreg (CCmode, cmp_result, CCV2mode, 4);
10012 else if (builtin_type == MIPS_BUILTIN_CMP_LOWER)
10013 cmp_result = simplify_gen_subreg (CCmode, cmp_result, CCV2mode, 0);
10016 /* First assume that CMP_RESULT == CMP_VALUE. */
10017 emit_move_insn (target, target_if_equal);
10019 /* Branch to LABEL1 if CMP_RESULT != CMP_VALUE. */
10021 label1 = gen_label_rtx ();
10022 label2 = gen_label_rtx ();
10024 = gen_rtx_IF_THEN_ELSE (VOIDmode,
10025 gen_rtx_fmt_ee (NE, GET_MODE (cmp_result),
10026 cmp_result, GEN_INT (cmp_value)),
10027 gen_rtx_LABEL_REF (VOIDmode, label1), pc_rtx);
10028 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, if_then_else));
10029 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
10030 gen_rtx_LABEL_REF (VOIDmode, label2)));
10032 emit_label (label1);
10034 /* Fix TARGET for CMP_RESULT != CMP_VALUE. */
10035 emit_move_insn (target, target_if_unequal);
10036 emit_label (label2);
10041 #include "gt-mips.h"