1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4 Contributed by A. Lichnewsky, lich@inria.inria.fr.
5 Changes by Michael Meissner, meissner@osf.org.
6 64 bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
7 Brendan Eich, brendan@microunity.com.
9 This file is part of GCC.
11 GCC is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2, or (at your option)
16 GCC is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License
22 along with GCC; see the file COPYING. If not, write to
23 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
24 Boston, MA 02110-1301, USA. */
28 #include "coretypes.h"
33 #include "hard-reg-set.h"
35 #include "insn-config.h"
36 #include "conditions.h"
37 #include "insn-attr.h"
53 #include "target-def.h"
54 #include "integrate.h"
55 #include "langhooks.h"
56 #include "cfglayout.h"
57 #include "sched-int.h"
58 #include "tree-gimple.h"
61 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
62 #define UNSPEC_ADDRESS_P(X) \
63 (GET_CODE (X) == UNSPEC \
64 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
65 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
67 /* Extract the symbol or label from UNSPEC wrapper X. */
68 #define UNSPEC_ADDRESS(X) \
71 /* Extract the symbol type from UNSPEC wrapper X. */
72 #define UNSPEC_ADDRESS_TYPE(X) \
73 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
75 /* The maximum distance between the top of the stack frame and the
76 value $sp has when we save & restore registers.
78 Use a maximum gap of 0x100 in the mips16 case. We can then use
79 unextended instructions to save and restore registers, and to
80 allocate and deallocate the top part of the frame.
82 The value in the !mips16 case must be a SMALL_OPERAND and must
83 preserve the maximum stack alignment. */
84 #define MIPS_MAX_FIRST_STACK_STEP (TARGET_MIPS16 ? 0x100 : 0x7ff0)
86 /* True if INSN is a mips.md pattern or asm statement. */
87 #define USEFUL_INSN_P(INSN) \
89 && GET_CODE (PATTERN (INSN)) != USE \
90 && GET_CODE (PATTERN (INSN)) != CLOBBER \
91 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
92 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
94 /* If INSN is a delayed branch sequence, return the first instruction
95 in the sequence, otherwise return INSN itself. */
96 #define SEQ_BEGIN(INSN) \
97 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
98 ? XVECEXP (PATTERN (INSN), 0, 0) \
101 /* Likewise for the last instruction in a delayed branch sequence. */
102 #define SEQ_END(INSN) \
103 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
104 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
107 /* Execute the following loop body with SUBINSN set to each instruction
108 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
109 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
110 for ((SUBINSN) = SEQ_BEGIN (INSN); \
111 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
112 (SUBINSN) = NEXT_INSN (SUBINSN))
114 /* Classifies an address.
117 A natural register + offset address. The register satisfies
118 mips_valid_base_register_p and the offset is a const_arith_operand.
121 A LO_SUM rtx. The first operand is a valid base register and
122 the second operand is a symbolic address.
125 A signed 16-bit constant address.
128 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
129 enum mips_address_type {
136 /* Classifies the prototype of a builtin function. */
137 enum mips_function_type
139 MIPS_V2SF_FTYPE_V2SF,
140 MIPS_V2SF_FTYPE_V2SF_V2SF,
141 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
142 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
143 MIPS_V2SF_FTYPE_SF_SF,
144 MIPS_INT_FTYPE_V2SF_V2SF,
145 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
146 MIPS_INT_FTYPE_SF_SF,
147 MIPS_INT_FTYPE_DF_DF,
154 /* For MIPS DSP ASE */
156 MIPS_DI_FTYPE_DI_SI_SI,
157 MIPS_DI_FTYPE_DI_V2HI_V2HI,
158 MIPS_DI_FTYPE_DI_V4QI_V4QI,
160 MIPS_SI_FTYPE_PTR_SI,
164 MIPS_SI_FTYPE_V2HI_V2HI,
166 MIPS_SI_FTYPE_V4QI_V4QI,
169 MIPS_V2HI_FTYPE_SI_SI,
170 MIPS_V2HI_FTYPE_V2HI,
171 MIPS_V2HI_FTYPE_V2HI_SI,
172 MIPS_V2HI_FTYPE_V2HI_V2HI,
173 MIPS_V2HI_FTYPE_V4QI,
174 MIPS_V2HI_FTYPE_V4QI_V2HI,
176 MIPS_V4QI_FTYPE_V2HI_V2HI,
177 MIPS_V4QI_FTYPE_V4QI_SI,
178 MIPS_V4QI_FTYPE_V4QI_V4QI,
179 MIPS_VOID_FTYPE_SI_SI,
180 MIPS_VOID_FTYPE_V2HI_V2HI,
181 MIPS_VOID_FTYPE_V4QI_V4QI,
187 /* Specifies how a builtin function should be converted into rtl. */
188 enum mips_builtin_type
190 /* The builtin corresponds directly to an .md pattern. The return
191 value is mapped to operand 0 and the arguments are mapped to
192 operands 1 and above. */
195 /* The builtin corresponds directly to an .md pattern. There is no return
196 value and the arguments are mapped to operands 0 and above. */
197 MIPS_BUILTIN_DIRECT_NO_TARGET,
199 /* The builtin corresponds to a comparison instruction followed by
200 a mips_cond_move_tf_ps pattern. The first two arguments are the
201 values to compare and the second two arguments are the vector
202 operands for the movt.ps or movf.ps instruction (in assembly order). */
206 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
207 of this instruction is the result of the comparison, which has mode
208 CCV2 or CCV4. The function arguments are mapped to operands 1 and
209 above. The function's return value is an SImode boolean that is
210 true under the following conditions:
212 MIPS_BUILTIN_CMP_ANY: one of the registers is true
213 MIPS_BUILTIN_CMP_ALL: all of the registers are true
214 MIPS_BUILTIN_CMP_LOWER: the first register is true
215 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
216 MIPS_BUILTIN_CMP_ANY,
217 MIPS_BUILTIN_CMP_ALL,
218 MIPS_BUILTIN_CMP_UPPER,
219 MIPS_BUILTIN_CMP_LOWER,
221 /* As above, but the instruction only sets a single $fcc register. */
222 MIPS_BUILTIN_CMP_SINGLE,
224 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
225 MIPS_BUILTIN_BPOSGE32
228 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
229 #define MIPS_FP_CONDITIONS(MACRO) \
247 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
248 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
249 enum mips_fp_condition {
250 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
253 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
254 #define STRINGIFY(X) #X
255 static const char *const mips_fp_conditions[] = {
256 MIPS_FP_CONDITIONS (STRINGIFY)
259 /* A function to save or store a register. The first argument is the
260 register and the second is the stack slot. */
261 typedef void (*mips_save_restore_fn) (rtx, rtx);
263 struct mips16_constant;
264 struct mips_arg_info;
265 struct mips_address_info;
266 struct mips_integer_op;
269 static enum mips_symbol_type mips_classify_symbol (rtx);
270 static void mips_split_const (rtx, rtx *, HOST_WIDE_INT *);
271 static bool mips_offset_within_object_p (rtx, HOST_WIDE_INT);
272 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
273 static bool mips_symbolic_address_p (enum mips_symbol_type, enum machine_mode);
274 static bool mips_classify_address (struct mips_address_info *, rtx,
275 enum machine_mode, int);
276 static bool mips_cannot_force_const_mem (rtx);
277 static bool mips_use_blocks_for_constant_p (enum machine_mode, rtx);
278 static int mips_symbol_insns (enum mips_symbol_type);
279 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
280 static rtx mips_force_temporary (rtx, rtx);
281 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
282 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
283 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
284 static unsigned int mips_build_lower (struct mips_integer_op *,
285 unsigned HOST_WIDE_INT);
286 static unsigned int mips_build_integer (struct mips_integer_op *,
287 unsigned HOST_WIDE_INT);
288 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
289 static int m16_check_op (rtx, int, int, int);
290 static bool mips_rtx_costs (rtx, int, int, int *);
291 static int mips_address_cost (rtx);
292 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
293 static void mips_load_call_address (rtx, rtx, int);
294 static bool mips_function_ok_for_sibcall (tree, tree);
295 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
296 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
297 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
298 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
299 tree, int, struct mips_arg_info *);
300 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
301 static void mips_set_architecture (const struct mips_cpu_info *);
302 static void mips_set_tune (const struct mips_cpu_info *);
303 static bool mips_handle_option (size_t, const char *, int);
304 static struct machine_function *mips_init_machine_status (void);
305 static void print_operand_reloc (FILE *, rtx, const char **);
307 static void irix_output_external_libcall (rtx);
309 static void mips_file_start (void);
310 static void mips_file_end (void);
311 static bool mips_rewrite_small_data_p (rtx);
312 static int mips_small_data_pattern_1 (rtx *, void *);
313 static int mips_rewrite_small_data_1 (rtx *, void *);
314 static bool mips_function_has_gp_insn (void);
315 static unsigned int mips_global_pointer (void);
316 static bool mips_save_reg_p (unsigned int);
317 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
318 mips_save_restore_fn);
319 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
320 static void mips_output_cplocal (void);
321 static void mips_emit_loadgp (void);
322 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
323 static void mips_set_frame_expr (rtx);
324 static rtx mips_frame_set (rtx, rtx);
325 static void mips_save_reg (rtx, rtx);
326 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
327 static void mips_restore_reg (rtx, rtx);
328 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
329 HOST_WIDE_INT, tree);
330 static int symbolic_expression_p (rtx);
331 static section *mips_select_rtx_section (enum machine_mode, rtx,
332 unsigned HOST_WIDE_INT);
333 static section *mips_function_rodata_section (tree);
334 static bool mips_in_small_data_p (tree);
335 static bool mips_use_anchors_for_symbol_p (rtx);
336 static int mips_fpr_return_fields (tree, tree *);
337 static bool mips_return_in_msb (tree);
338 static rtx mips_return_fpr_pair (enum machine_mode mode,
339 enum machine_mode mode1, HOST_WIDE_INT,
340 enum machine_mode mode2, HOST_WIDE_INT);
341 static rtx mips16_gp_pseudo_reg (void);
342 static void mips16_fp_args (FILE *, int, int);
343 static void build_mips16_function_stub (FILE *);
344 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
345 static void dump_constants (struct mips16_constant *, rtx);
346 static int mips16_insn_length (rtx);
347 static int mips16_rewrite_pool_refs (rtx *, void *);
348 static void mips16_lay_out_constants (void);
349 static void mips_sim_reset (struct mips_sim *);
350 static void mips_sim_init (struct mips_sim *, state_t);
351 static void mips_sim_next_cycle (struct mips_sim *);
352 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
353 static int mips_sim_wait_regs_2 (rtx *, void *);
354 static void mips_sim_wait_regs_1 (rtx *, void *);
355 static void mips_sim_wait_regs (struct mips_sim *, rtx);
356 static void mips_sim_wait_units (struct mips_sim *, rtx);
357 static void mips_sim_wait_insn (struct mips_sim *, rtx);
358 static void mips_sim_record_set (rtx, rtx, void *);
359 static void mips_sim_issue_insn (struct mips_sim *, rtx);
360 static void mips_sim_issue_nop (struct mips_sim *);
361 static void mips_sim_finish_insn (struct mips_sim *, rtx);
362 static void vr4130_avoid_branch_rt_conflict (rtx);
363 static void vr4130_align_insns (void);
364 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
365 static void mips_avoid_hazards (void);
366 static void mips_reorg (void);
367 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
368 static bool mips_matching_cpu_name_p (const char *, const char *);
369 static const struct mips_cpu_info *mips_parse_cpu (const char *);
370 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
371 static bool mips_return_in_memory (tree, tree);
372 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
373 static void mips_macc_chains_record (rtx);
374 static void mips_macc_chains_reorder (rtx *, int);
375 static void vr4130_true_reg_dependence_p_1 (rtx, rtx, void *);
376 static bool vr4130_true_reg_dependence_p (rtx);
377 static bool vr4130_swap_insns_p (rtx, rtx);
378 static void vr4130_reorder (rtx *, int);
379 static void mips_promote_ready (rtx *, int, int);
380 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
381 static int mips_variable_issue (FILE *, int, rtx, int);
382 static int mips_adjust_cost (rtx, rtx, rtx, int);
383 static int mips_issue_rate (void);
384 static int mips_multipass_dfa_lookahead (void);
385 static void mips_init_libfuncs (void);
386 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
388 static tree mips_build_builtin_va_list (void);
389 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
390 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
392 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
394 static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
396 static bool mips_valid_pointer_mode (enum machine_mode);
397 static bool mips_vector_mode_supported_p (enum machine_mode);
398 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree *);
399 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
400 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
401 static void mips_init_builtins (void);
402 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree, bool);
403 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
404 enum insn_code, enum mips_fp_condition,
406 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
407 enum insn_code, enum mips_fp_condition,
409 static rtx mips_expand_builtin_bposge (enum mips_builtin_type, rtx);
410 static void mips_encode_section_info (tree, rtx, int);
411 static void mips_extra_live_on_entry (bitmap);
413 /* Structure to be filled in by compute_frame_size with register
414 save masks, and offsets for the current function. */
416 struct mips_frame_info GTY(())
418 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
419 HOST_WIDE_INT var_size; /* # bytes that variables take up */
420 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
421 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
422 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
423 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
424 unsigned int mask; /* mask of saved gp registers */
425 unsigned int fmask; /* mask of saved fp registers */
426 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
427 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
428 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
429 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
430 bool initialized; /* true if frame size already calculated */
431 int num_gp; /* number of gp registers saved */
432 int num_fp; /* number of fp registers saved */
435 struct machine_function GTY(()) {
436 /* Pseudo-reg holding the value of $28 in a mips16 function which
437 refers to GP relative global variables. */
438 rtx mips16_gp_pseudo_rtx;
440 /* The number of extra stack bytes taken up by register varargs.
441 This area is allocated by the callee at the very top of the frame. */
444 /* Current frame information, calculated by compute_frame_size. */
445 struct mips_frame_info frame;
447 /* The register to use as the global pointer within this function. */
448 unsigned int global_pointer;
450 /* True if mips_adjust_insn_length should ignore an instruction's
452 bool ignore_hazard_length_p;
454 /* True if the whole function is suitable for .set noreorder and
456 bool all_noreorder_p;
458 /* True if the function is known to have an instruction that needs $gp. */
462 /* Information about a single argument. */
465 /* True if the argument is passed in a floating-point register, or
466 would have been if we hadn't run out of registers. */
469 /* The number of words passed in registers, rounded up. */
470 unsigned int reg_words;
472 /* For EABI, the offset of the first register from GP_ARG_FIRST or
473 FP_ARG_FIRST. For other ABIs, the offset of the first register from
474 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
475 comment for details).
477 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
479 unsigned int reg_offset;
481 /* The number of words that must be passed on the stack, rounded up. */
482 unsigned int stack_words;
484 /* The offset from the start of the stack overflow area of the argument's
485 first stack word. Only meaningful when STACK_WORDS is nonzero. */
486 unsigned int stack_offset;
490 /* Information about an address described by mips_address_type.
496 REG is the base register and OFFSET is the constant offset.
499 REG is the register that contains the high part of the address,
500 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
501 is the type of OFFSET's symbol.
504 SYMBOL_TYPE is the type of symbol being referenced. */
506 struct mips_address_info
508 enum mips_address_type type;
511 enum mips_symbol_type symbol_type;
515 /* One stage in a constant building sequence. These sequences have
519 A = A CODE[1] VALUE[1]
520 A = A CODE[2] VALUE[2]
523 where A is an accumulator, each CODE[i] is a binary rtl operation
524 and each VALUE[i] is a constant integer. */
525 struct mips_integer_op {
527 unsigned HOST_WIDE_INT value;
531 /* The largest number of operations needed to load an integer constant.
532 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
533 When the lowest bit is clear, we can try, but reject a sequence with
534 an extra SLL at the end. */
535 #define MIPS_MAX_INTEGER_OPS 7
538 /* Global variables for machine-dependent things. */
540 /* Threshold for data being put into the small data/bss area, instead
541 of the normal data area. */
542 int mips_section_threshold = -1;
544 /* Count the number of .file directives, so that .loc is up to date. */
545 int num_source_filenames = 0;
547 /* Count the number of sdb related labels are generated (to find block
548 start and end boundaries). */
549 int sdb_label_count = 0;
551 /* Next label # for each statement for Silicon Graphics IRIS systems. */
554 /* Linked list of all externals that are to be emitted when optimizing
555 for the global pointer if they haven't been declared by the end of
556 the program with an appropriate .comm or initialization. */
558 struct extern_list GTY (())
560 struct extern_list *next; /* next external */
561 const char *name; /* name of the external */
562 int size; /* size in bytes */
565 static GTY (()) struct extern_list *extern_head = 0;
567 /* Name of the file containing the current function. */
568 const char *current_function_file = "";
570 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
576 /* The next branch instruction is a branch likely, not branch normal. */
577 int mips_branch_likely;
579 /* The operands passed to the last cmpMM expander. */
582 /* The target cpu for code generation. */
583 enum processor_type mips_arch;
584 const struct mips_cpu_info *mips_arch_info;
586 /* The target cpu for optimization and scheduling. */
587 enum processor_type mips_tune;
588 const struct mips_cpu_info *mips_tune_info;
590 /* Which instruction set architecture to use. */
593 /* Which ABI to use. */
594 int mips_abi = MIPS_ABI_DEFAULT;
596 /* Cost information to use. */
597 const struct mips_rtx_cost_data *mips_cost;
599 /* Whether we are generating mips16 hard float code. In mips16 mode
600 we always set TARGET_SOFT_FLOAT; this variable is nonzero if
601 -msoft-float was not specified by the user, which means that we
602 should arrange to call mips32 hard floating point code. */
603 int mips16_hard_float;
605 /* The architecture selected by -mipsN. */
606 static const struct mips_cpu_info *mips_isa_info;
608 /* If TRUE, we split addresses into their high and low parts in the RTL. */
609 int mips_split_addresses;
611 /* Mode used for saving/restoring general purpose registers. */
612 static enum machine_mode gpr_mode;
614 /* Array giving truth value on whether or not a given hard register
615 can support a given mode. */
616 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
618 /* List of all MIPS punctuation characters used by print_operand. */
619 char mips_print_operand_punct[256];
621 /* Map GCC register number to debugger register number. */
622 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
624 /* A copy of the original flag_delayed_branch: see override_options. */
625 static int mips_flag_delayed_branch;
627 static GTY (()) int mips_output_filename_first_time = 1;
629 /* mips_split_p[X] is true if symbols of type X can be split by
630 mips_split_symbol(). */
631 bool mips_split_p[NUM_SYMBOL_TYPES];
633 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
634 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
635 if they are matched by a special .md file pattern. */
636 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
638 /* Likewise for HIGHs. */
639 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
641 /* Map hard register number to register class */
642 const enum reg_class mips_regno_to_class[] =
644 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
645 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
646 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
647 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
648 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
649 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
650 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
651 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
652 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
653 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
654 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
655 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
656 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
657 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
658 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
659 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
660 HI_REG, LO_REG, NO_REGS, ST_REGS,
661 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
662 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
663 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
664 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
665 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
666 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
667 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
668 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
669 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
670 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
671 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
672 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
673 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
674 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
675 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
676 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
677 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
678 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
679 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
680 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
681 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
682 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
683 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
684 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
685 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
686 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
687 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
688 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
689 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
690 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
693 /* Table of machine dependent attributes. */
694 const struct attribute_spec mips_attribute_table[] =
696 { "long_call", 0, 0, false, true, true, NULL },
697 { NULL, 0, 0, false, false, false, NULL }
700 /* A table describing all the processors gcc knows about. Names are
701 matched in the order listed. The first mention of an ISA level is
702 taken as the canonical name for that ISA.
704 To ease comparison, please keep this table in the same order as
705 gas's mips_cpu_info_table[]. */
706 const struct mips_cpu_info mips_cpu_info_table[] = {
707 /* Entries for generic ISAs */
708 { "mips1", PROCESSOR_R3000, 1 },
709 { "mips2", PROCESSOR_R6000, 2 },
710 { "mips3", PROCESSOR_R4000, 3 },
711 { "mips4", PROCESSOR_R8000, 4 },
712 { "mips32", PROCESSOR_4KC, 32 },
713 { "mips32r2", PROCESSOR_M4K, 33 },
714 { "mips64", PROCESSOR_5KC, 64 },
717 { "r3000", PROCESSOR_R3000, 1 },
718 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
719 { "r3900", PROCESSOR_R3900, 1 },
722 { "r6000", PROCESSOR_R6000, 2 },
725 { "r4000", PROCESSOR_R4000, 3 },
726 { "vr4100", PROCESSOR_R4100, 3 },
727 { "vr4111", PROCESSOR_R4111, 3 },
728 { "vr4120", PROCESSOR_R4120, 3 },
729 { "vr4130", PROCESSOR_R4130, 3 },
730 { "vr4300", PROCESSOR_R4300, 3 },
731 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
732 { "r4600", PROCESSOR_R4600, 3 },
733 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
734 { "r4650", PROCESSOR_R4650, 3 },
737 { "r8000", PROCESSOR_R8000, 4 },
738 { "vr5000", PROCESSOR_R5000, 4 },
739 { "vr5400", PROCESSOR_R5400, 4 },
740 { "vr5500", PROCESSOR_R5500, 4 },
741 { "rm7000", PROCESSOR_R7000, 4 },
742 { "rm9000", PROCESSOR_R9000, 4 },
745 { "4kc", PROCESSOR_4KC, 32 },
746 { "4km", PROCESSOR_4KC, 32 }, /* = 4kc */
747 { "4kp", PROCESSOR_4KP, 32 },
749 /* MIPS32 Release 2 */
750 { "m4k", PROCESSOR_M4K, 33 },
751 { "24k", PROCESSOR_24K, 33 },
752 { "24kc", PROCESSOR_24K, 33 }, /* 24K no FPU */
753 { "24kf", PROCESSOR_24K, 33 }, /* 24K 1:2 FPU */
754 { "24kx", PROCESSOR_24KX, 33 }, /* 24K 1:1 FPU */
757 { "5kc", PROCESSOR_5KC, 64 },
758 { "5kf", PROCESSOR_5KF, 64 },
759 { "20kc", PROCESSOR_20KC, 64 },
760 { "sb1", PROCESSOR_SB1, 64 },
761 { "sr71000", PROCESSOR_SR71000, 64 },
767 /* Default costs. If these are used for a processor we should look
768 up the actual costs. */
769 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
770 COSTS_N_INSNS (7), /* fp_mult_sf */ \
771 COSTS_N_INSNS (8), /* fp_mult_df */ \
772 COSTS_N_INSNS (23), /* fp_div_sf */ \
773 COSTS_N_INSNS (36), /* fp_div_df */ \
774 COSTS_N_INSNS (10), /* int_mult_si */ \
775 COSTS_N_INSNS (10), /* int_mult_di */ \
776 COSTS_N_INSNS (69), /* int_div_si */ \
777 COSTS_N_INSNS (69), /* int_div_di */ \
778 2, /* branch_cost */ \
779 4 /* memory_latency */
781 /* Need to replace these with the costs of calling the appropriate
783 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
784 COSTS_N_INSNS (256), /* fp_mult_sf */ \
785 COSTS_N_INSNS (256), /* fp_mult_df */ \
786 COSTS_N_INSNS (256), /* fp_div_sf */ \
787 COSTS_N_INSNS (256) /* fp_div_df */
789 static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
792 COSTS_N_INSNS (2), /* fp_add */
793 COSTS_N_INSNS (4), /* fp_mult_sf */
794 COSTS_N_INSNS (5), /* fp_mult_df */
795 COSTS_N_INSNS (12), /* fp_div_sf */
796 COSTS_N_INSNS (19), /* fp_div_df */
797 COSTS_N_INSNS (12), /* int_mult_si */
798 COSTS_N_INSNS (12), /* int_mult_di */
799 COSTS_N_INSNS (35), /* int_div_si */
800 COSTS_N_INSNS (35), /* int_div_di */
802 4 /* memory_latency */
807 COSTS_N_INSNS (6), /* int_mult_si */
808 COSTS_N_INSNS (6), /* int_mult_di */
809 COSTS_N_INSNS (36), /* int_div_si */
810 COSTS_N_INSNS (36), /* int_div_di */
812 4 /* memory_latency */
816 COSTS_N_INSNS (36), /* int_mult_si */
817 COSTS_N_INSNS (36), /* int_mult_di */
818 COSTS_N_INSNS (37), /* int_div_si */
819 COSTS_N_INSNS (37), /* int_div_di */
821 4 /* memory_latency */
825 COSTS_N_INSNS (4), /* int_mult_si */
826 COSTS_N_INSNS (11), /* int_mult_di */
827 COSTS_N_INSNS (36), /* int_div_si */
828 COSTS_N_INSNS (68), /* int_div_di */
830 4 /* memory_latency */
833 COSTS_N_INSNS (4), /* fp_add */
834 COSTS_N_INSNS (4), /* fp_mult_sf */
835 COSTS_N_INSNS (5), /* fp_mult_df */
836 COSTS_N_INSNS (17), /* fp_div_sf */
837 COSTS_N_INSNS (32), /* fp_div_df */
838 COSTS_N_INSNS (4), /* int_mult_si */
839 COSTS_N_INSNS (11), /* int_mult_di */
840 COSTS_N_INSNS (36), /* int_div_si */
841 COSTS_N_INSNS (68), /* int_div_di */
843 4 /* memory_latency */
849 COSTS_N_INSNS (8), /* fp_add */
850 COSTS_N_INSNS (8), /* fp_mult_sf */
851 COSTS_N_INSNS (10), /* fp_mult_df */
852 COSTS_N_INSNS (34), /* fp_div_sf */
853 COSTS_N_INSNS (64), /* fp_div_df */
854 COSTS_N_INSNS (5), /* int_mult_si */
855 COSTS_N_INSNS (5), /* int_mult_di */
856 COSTS_N_INSNS (41), /* int_div_si */
857 COSTS_N_INSNS (41), /* int_div_di */
859 4 /* memory_latency */
862 COSTS_N_INSNS (4), /* fp_add */
863 COSTS_N_INSNS (4), /* fp_mult_sf */
864 COSTS_N_INSNS (5), /* fp_mult_df */
865 COSTS_N_INSNS (17), /* fp_div_sf */
866 COSTS_N_INSNS (32), /* fp_div_df */
867 COSTS_N_INSNS (5), /* int_mult_si */
868 COSTS_N_INSNS (5), /* int_mult_di */
869 COSTS_N_INSNS (41), /* int_div_si */
870 COSTS_N_INSNS (41), /* int_div_di */
872 4 /* memory_latency */
878 COSTS_N_INSNS (2), /* fp_add */
879 COSTS_N_INSNS (4), /* fp_mult_sf */
880 COSTS_N_INSNS (5), /* fp_mult_df */
881 COSTS_N_INSNS (12), /* fp_div_sf */
882 COSTS_N_INSNS (19), /* fp_div_df */
883 COSTS_N_INSNS (2), /* int_mult_si */
884 COSTS_N_INSNS (2), /* int_mult_di */
885 COSTS_N_INSNS (35), /* int_div_si */
886 COSTS_N_INSNS (35), /* int_div_di */
888 4 /* memory_latency */
891 COSTS_N_INSNS (3), /* fp_add */
892 COSTS_N_INSNS (5), /* fp_mult_sf */
893 COSTS_N_INSNS (6), /* fp_mult_df */
894 COSTS_N_INSNS (15), /* fp_div_sf */
895 COSTS_N_INSNS (16), /* fp_div_df */
896 COSTS_N_INSNS (17), /* int_mult_si */
897 COSTS_N_INSNS (17), /* int_mult_di */
898 COSTS_N_INSNS (38), /* int_div_si */
899 COSTS_N_INSNS (38), /* int_div_di */
901 6 /* memory_latency */
904 COSTS_N_INSNS (6), /* fp_add */
905 COSTS_N_INSNS (7), /* fp_mult_sf */
906 COSTS_N_INSNS (8), /* fp_mult_df */
907 COSTS_N_INSNS (23), /* fp_div_sf */
908 COSTS_N_INSNS (36), /* fp_div_df */
909 COSTS_N_INSNS (10), /* int_mult_si */
910 COSTS_N_INSNS (10), /* int_mult_di */
911 COSTS_N_INSNS (69), /* int_div_si */
912 COSTS_N_INSNS (69), /* int_div_di */
914 6 /* memory_latency */
926 /* The only costs that appear to be updated here are
927 integer multiplication. */
929 COSTS_N_INSNS (4), /* int_mult_si */
930 COSTS_N_INSNS (6), /* int_mult_di */
931 COSTS_N_INSNS (69), /* int_div_si */
932 COSTS_N_INSNS (69), /* int_div_di */
934 4 /* memory_latency */
946 COSTS_N_INSNS (6), /* fp_add */
947 COSTS_N_INSNS (4), /* fp_mult_sf */
948 COSTS_N_INSNS (5), /* fp_mult_df */
949 COSTS_N_INSNS (23), /* fp_div_sf */
950 COSTS_N_INSNS (36), /* fp_div_df */
951 COSTS_N_INSNS (5), /* int_mult_si */
952 COSTS_N_INSNS (5), /* int_mult_di */
953 COSTS_N_INSNS (36), /* int_div_si */
954 COSTS_N_INSNS (36), /* int_div_di */
956 4 /* memory_latency */
959 COSTS_N_INSNS (6), /* fp_add */
960 COSTS_N_INSNS (5), /* fp_mult_sf */
961 COSTS_N_INSNS (6), /* fp_mult_df */
962 COSTS_N_INSNS (30), /* fp_div_sf */
963 COSTS_N_INSNS (59), /* fp_div_df */
964 COSTS_N_INSNS (3), /* int_mult_si */
965 COSTS_N_INSNS (4), /* int_mult_di */
966 COSTS_N_INSNS (42), /* int_div_si */
967 COSTS_N_INSNS (74), /* int_div_di */
969 4 /* memory_latency */
972 COSTS_N_INSNS (6), /* fp_add */
973 COSTS_N_INSNS (5), /* fp_mult_sf */
974 COSTS_N_INSNS (6), /* fp_mult_df */
975 COSTS_N_INSNS (30), /* fp_div_sf */
976 COSTS_N_INSNS (59), /* fp_div_df */
977 COSTS_N_INSNS (5), /* int_mult_si */
978 COSTS_N_INSNS (9), /* int_mult_di */
979 COSTS_N_INSNS (42), /* int_div_si */
980 COSTS_N_INSNS (74), /* int_div_di */
982 4 /* memory_latency */
985 /* The only costs that are changed here are
986 integer multiplication. */
987 COSTS_N_INSNS (6), /* fp_add */
988 COSTS_N_INSNS (7), /* fp_mult_sf */
989 COSTS_N_INSNS (8), /* fp_mult_df */
990 COSTS_N_INSNS (23), /* fp_div_sf */
991 COSTS_N_INSNS (36), /* fp_div_df */
992 COSTS_N_INSNS (5), /* int_mult_si */
993 COSTS_N_INSNS (9), /* int_mult_di */
994 COSTS_N_INSNS (69), /* int_div_si */
995 COSTS_N_INSNS (69), /* int_div_di */
997 4 /* memory_latency */
1003 /* The only costs that are changed here are
1004 integer multiplication. */
1005 COSTS_N_INSNS (6), /* fp_add */
1006 COSTS_N_INSNS (7), /* fp_mult_sf */
1007 COSTS_N_INSNS (8), /* fp_mult_df */
1008 COSTS_N_INSNS (23), /* fp_div_sf */
1009 COSTS_N_INSNS (36), /* fp_div_df */
1010 COSTS_N_INSNS (3), /* int_mult_si */
1011 COSTS_N_INSNS (8), /* int_mult_di */
1012 COSTS_N_INSNS (69), /* int_div_si */
1013 COSTS_N_INSNS (69), /* int_div_di */
1014 1, /* branch_cost */
1015 4 /* memory_latency */
1018 COSTS_N_INSNS (4), /* fp_add */
1019 COSTS_N_INSNS (4), /* fp_mult_sf */
1020 COSTS_N_INSNS (4), /* fp_mult_df */
1021 COSTS_N_INSNS (24), /* fp_div_sf */
1022 COSTS_N_INSNS (32), /* fp_div_df */
1023 COSTS_N_INSNS (3), /* int_mult_si */
1024 COSTS_N_INSNS (4), /* int_mult_di */
1025 COSTS_N_INSNS (36), /* int_div_si */
1026 COSTS_N_INSNS (68), /* int_div_di */
1027 1, /* branch_cost */
1028 4 /* memory_latency */
1036 /* Nonzero if -march should decide the default value of MASK_SOFT_FLOAT. */
1037 #ifndef MIPS_MARCH_CONTROLS_SOFT_FLOAT
1038 #define MIPS_MARCH_CONTROLS_SOFT_FLOAT 0
1041 /* Initialize the GCC target structure. */
1042 #undef TARGET_ASM_ALIGNED_HI_OP
1043 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1044 #undef TARGET_ASM_ALIGNED_SI_OP
1045 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1046 #undef TARGET_ASM_ALIGNED_DI_OP
1047 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1049 #undef TARGET_ASM_FUNCTION_PROLOGUE
1050 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
1051 #undef TARGET_ASM_FUNCTION_EPILOGUE
1052 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
1053 #undef TARGET_ASM_SELECT_RTX_SECTION
1054 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
1055 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
1056 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
1058 #undef TARGET_SCHED_REORDER
1059 #define TARGET_SCHED_REORDER mips_sched_reorder
1060 #undef TARGET_SCHED_VARIABLE_ISSUE
1061 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
1062 #undef TARGET_SCHED_ADJUST_COST
1063 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
1064 #undef TARGET_SCHED_ISSUE_RATE
1065 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
1066 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1067 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1068 mips_multipass_dfa_lookahead
1070 #undef TARGET_DEFAULT_TARGET_FLAGS
1071 #define TARGET_DEFAULT_TARGET_FLAGS \
1073 | TARGET_CPU_DEFAULT \
1074 | TARGET_ENDIAN_DEFAULT \
1075 | TARGET_FP_EXCEPTIONS_DEFAULT \
1076 | MASK_CHECK_ZERO_DIV \
1078 #undef TARGET_HANDLE_OPTION
1079 #define TARGET_HANDLE_OPTION mips_handle_option
1081 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1082 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
1084 #undef TARGET_VALID_POINTER_MODE
1085 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
1086 #undef TARGET_RTX_COSTS
1087 #define TARGET_RTX_COSTS mips_rtx_costs
1088 #undef TARGET_ADDRESS_COST
1089 #define TARGET_ADDRESS_COST mips_address_cost
1091 #undef TARGET_IN_SMALL_DATA_P
1092 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
1094 #undef TARGET_MACHINE_DEPENDENT_REORG
1095 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
1097 #undef TARGET_ASM_FILE_START
1098 #undef TARGET_ASM_FILE_END
1099 #define TARGET_ASM_FILE_START mips_file_start
1100 #define TARGET_ASM_FILE_END mips_file_end
1101 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
1102 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
1104 #undef TARGET_INIT_LIBFUNCS
1105 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
1107 #undef TARGET_BUILD_BUILTIN_VA_LIST
1108 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
1109 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1110 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
1112 #undef TARGET_PROMOTE_FUNCTION_ARGS
1113 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
1114 #undef TARGET_PROMOTE_FUNCTION_RETURN
1115 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
1116 #undef TARGET_PROMOTE_PROTOTYPES
1117 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1119 #undef TARGET_RETURN_IN_MEMORY
1120 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
1121 #undef TARGET_RETURN_IN_MSB
1122 #define TARGET_RETURN_IN_MSB mips_return_in_msb
1124 #undef TARGET_ASM_OUTPUT_MI_THUNK
1125 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
1126 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1127 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
1129 #undef TARGET_SETUP_INCOMING_VARARGS
1130 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
1131 #undef TARGET_STRICT_ARGUMENT_NAMING
1132 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
1133 #undef TARGET_MUST_PASS_IN_STACK
1134 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
1135 #undef TARGET_PASS_BY_REFERENCE
1136 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
1137 #undef TARGET_CALLEE_COPIES
1138 #define TARGET_CALLEE_COPIES mips_callee_copies
1139 #undef TARGET_ARG_PARTIAL_BYTES
1140 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
1142 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1143 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
1145 #undef TARGET_INIT_BUILTINS
1146 #define TARGET_INIT_BUILTINS mips_init_builtins
1147 #undef TARGET_EXPAND_BUILTIN
1148 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
1150 #undef TARGET_HAVE_TLS
1151 #define TARGET_HAVE_TLS HAVE_AS_TLS
1153 #undef TARGET_CANNOT_FORCE_CONST_MEM
1154 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
1156 #undef TARGET_ENCODE_SECTION_INFO
1157 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
1159 #undef TARGET_ATTRIBUTE_TABLE
1160 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
1162 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1163 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
1165 #undef TARGET_MIN_ANCHOR_OFFSET
1166 #define TARGET_MIN_ANCHOR_OFFSET -32768
1167 #undef TARGET_MAX_ANCHOR_OFFSET
1168 #define TARGET_MAX_ANCHOR_OFFSET 32767
1169 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1170 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
1171 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
1172 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
1174 struct gcc_target targetm = TARGET_INITIALIZER;
1176 /* Classify symbol X, which must be a SYMBOL_REF or a LABEL_REF. */
1178 static enum mips_symbol_type
1179 mips_classify_symbol (rtx x)
1181 if (GET_CODE (x) == LABEL_REF)
1184 return SYMBOL_CONSTANT_POOL;
1185 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1186 return SYMBOL_GOT_LOCAL;
1187 return SYMBOL_GENERAL;
1190 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1192 if (SYMBOL_REF_TLS_MODEL (x))
1195 if (CONSTANT_POOL_ADDRESS_P (x))
1198 return SYMBOL_CONSTANT_POOL;
1200 if (GET_MODE_SIZE (get_pool_mode (x)) <= mips_section_threshold)
1201 return SYMBOL_SMALL_DATA;
1204 if (SYMBOL_REF_SMALL_P (x))
1205 return SYMBOL_SMALL_DATA;
1207 if (TARGET_ABICALLS)
1209 if (SYMBOL_REF_DECL (x) == 0)
1211 if (!SYMBOL_REF_LOCAL_P (x))
1212 return SYMBOL_GOT_GLOBAL;
1216 /* Don't use GOT accesses for locally-binding symbols if
1217 TARGET_ABSOLUTE_ABICALLS. Otherwise, there are three
1220 - o32 PIC (either with or without explicit relocs)
1221 - n32/n64 PIC without explicit relocs
1222 - n32/n64 PIC with explicit relocs
1224 In the first case, both local and global accesses will use an
1225 R_MIPS_GOT16 relocation. We must correctly predict which of
1226 the two semantics (local or global) the assembler and linker
1227 will apply. The choice doesn't depend on the symbol's
1228 visibility, so we deliberately ignore decl_visibility and
1231 In the second case, the assembler will not use R_MIPS_GOT16
1232 relocations, but it chooses between local and global accesses
1233 in the same way as for o32 PIC.
1235 In the third case we have more freedom since both forms of
1236 access will work for any kind of symbol. However, there seems
1237 little point in doing things differently. */
1238 if (DECL_P (SYMBOL_REF_DECL (x))
1239 && TREE_PUBLIC (SYMBOL_REF_DECL (x))
1240 && !(TARGET_ABSOLUTE_ABICALLS
1241 && targetm.binds_local_p (SYMBOL_REF_DECL (x))))
1242 return SYMBOL_GOT_GLOBAL;
1245 if (!TARGET_ABSOLUTE_ABICALLS)
1246 return SYMBOL_GOT_LOCAL;
1249 return SYMBOL_GENERAL;
1253 /* Split X into a base and a constant offset, storing them in *BASE
1254 and *OFFSET respectively. */
1257 mips_split_const (rtx x, rtx *base, HOST_WIDE_INT *offset)
1261 if (GET_CODE (x) == CONST)
1264 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1266 *offset += INTVAL (XEXP (x, 1));
1273 /* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
1274 to the same object as SYMBOL, or to the same object_block. */
1277 mips_offset_within_object_p (rtx symbol, HOST_WIDE_INT offset)
1279 if (GET_CODE (symbol) != SYMBOL_REF)
1282 if (CONSTANT_POOL_ADDRESS_P (symbol)
1284 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
1287 if (SYMBOL_REF_DECL (symbol) != 0
1289 && offset < int_size_in_bytes (TREE_TYPE (SYMBOL_REF_DECL (symbol))))
1292 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
1293 && SYMBOL_REF_BLOCK (symbol)
1294 && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
1295 && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
1296 < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
1303 /* Return true if X is a symbolic constant that can be calculated in
1304 the same way as a bare symbol. If it is, store the type of the
1305 symbol in *SYMBOL_TYPE. */
1308 mips_symbolic_constant_p (rtx x, enum mips_symbol_type *symbol_type)
1310 HOST_WIDE_INT offset;
1312 mips_split_const (x, &x, &offset);
1313 if (UNSPEC_ADDRESS_P (x))
1314 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1315 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1317 *symbol_type = mips_classify_symbol (x);
1318 if (*symbol_type == SYMBOL_TLS)
1327 /* Check whether a nonzero offset is valid for the underlying
1329 switch (*symbol_type)
1331 case SYMBOL_GENERAL:
1332 case SYMBOL_64_HIGH:
1335 /* If the target has 64-bit pointers and the object file only
1336 supports 32-bit symbols, the values of those symbols will be
1337 sign-extended. In this case we can't allow an arbitrary offset
1338 in case the 32-bit value X + OFFSET has a different sign from X. */
1339 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1340 return mips_offset_within_object_p (x, offset);
1342 /* In other cases the relocations can handle any offset. */
1345 case SYMBOL_CONSTANT_POOL:
1346 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1347 In this case, we no longer have access to the underlying constant,
1348 but the original symbol-based access was known to be valid. */
1349 if (GET_CODE (x) == LABEL_REF)
1354 case SYMBOL_SMALL_DATA:
1355 /* Make sure that the offset refers to something within the
1356 underlying object. This should guarantee that the final
1357 PC- or GP-relative offset is within the 16-bit limit. */
1358 return mips_offset_within_object_p (x, offset);
1360 case SYMBOL_GOT_LOCAL:
1361 case SYMBOL_GOTOFF_PAGE:
1362 /* The linker should provide enough local GOT entries for a
1363 16-bit offset. Larger offsets may lead to GOT overflow. */
1364 return SMALL_OPERAND (offset);
1366 case SYMBOL_GOT_GLOBAL:
1367 case SYMBOL_GOTOFF_GLOBAL:
1368 case SYMBOL_GOTOFF_CALL:
1369 case SYMBOL_GOTOFF_LOADGP:
1374 case SYMBOL_GOTTPREL:
1382 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1385 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1387 if (regno >= FIRST_PSEUDO_REGISTER)
1391 regno = reg_renumber[regno];
1394 /* These fake registers will be eliminated to either the stack or
1395 hard frame pointer, both of which are usually valid base registers.
1396 Reload deals with the cases where the eliminated form isn't valid. */
1397 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1400 /* In mips16 mode, the stack pointer can only address word and doubleword
1401 values, nothing smaller. There are two problems here:
1403 (a) Instantiating virtual registers can introduce new uses of the
1404 stack pointer. If these virtual registers are valid addresses,
1405 the stack pointer should be too.
1407 (b) Most uses of the stack pointer are not made explicit until
1408 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1409 We don't know until that stage whether we'll be eliminating to the
1410 stack pointer (which needs the restriction) or the hard frame
1411 pointer (which doesn't).
1413 All in all, it seems more consistent to only enforce this restriction
1414 during and after reload. */
1415 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1416 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1418 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1422 /* Return true if X is a valid base register for the given mode.
1423 Allow only hard registers if STRICT. */
1426 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1428 if (!strict && GET_CODE (x) == SUBREG)
1432 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1436 /* Return true if symbols of type SYMBOL_TYPE can directly address a value
1437 with mode MODE. This is used for both symbolic and LO_SUM addresses. */
1440 mips_symbolic_address_p (enum mips_symbol_type symbol_type,
1441 enum machine_mode mode)
1443 switch (symbol_type)
1445 case SYMBOL_GENERAL:
1446 return !TARGET_MIPS16;
1448 case SYMBOL_SMALL_DATA:
1451 case SYMBOL_CONSTANT_POOL:
1452 /* PC-relative addressing is only available for lw and ld. */
1453 return GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1455 case SYMBOL_GOT_LOCAL:
1458 case SYMBOL_GOT_GLOBAL:
1459 /* The address will have to be loaded from the GOT first. */
1466 case SYMBOL_GOTTPREL:
1470 case SYMBOL_GOTOFF_PAGE:
1471 case SYMBOL_GOTOFF_GLOBAL:
1472 case SYMBOL_GOTOFF_CALL:
1473 case SYMBOL_GOTOFF_LOADGP:
1474 case SYMBOL_64_HIGH:
1483 /* Return true if X is a valid address for machine mode MODE. If it is,
1484 fill in INFO appropriately. STRICT is true if we should only accept
1485 hard base registers. */
1488 mips_classify_address (struct mips_address_info *info, rtx x,
1489 enum machine_mode mode, int strict)
1491 switch (GET_CODE (x))
1495 info->type = ADDRESS_REG;
1497 info->offset = const0_rtx;
1498 return mips_valid_base_register_p (info->reg, mode, strict);
1501 info->type = ADDRESS_REG;
1502 info->reg = XEXP (x, 0);
1503 info->offset = XEXP (x, 1);
1504 return (mips_valid_base_register_p (info->reg, mode, strict)
1505 && const_arith_operand (info->offset, VOIDmode));
1508 info->type = ADDRESS_LO_SUM;
1509 info->reg = XEXP (x, 0);
1510 info->offset = XEXP (x, 1);
1511 return (mips_valid_base_register_p (info->reg, mode, strict)
1512 && mips_symbolic_constant_p (info->offset, &info->symbol_type)
1513 && mips_symbolic_address_p (info->symbol_type, mode)
1514 && mips_lo_relocs[info->symbol_type] != 0);
1517 /* Small-integer addresses don't occur very often, but they
1518 are legitimate if $0 is a valid base register. */
1519 info->type = ADDRESS_CONST_INT;
1520 return !TARGET_MIPS16 && SMALL_INT (x);
1525 info->type = ADDRESS_SYMBOLIC;
1526 return (mips_symbolic_constant_p (x, &info->symbol_type)
1527 && mips_symbolic_address_p (info->symbol_type, mode)
1528 && !mips_split_p[info->symbol_type]);
1535 /* Return true if X is a thread-local symbol. */
1538 mips_tls_operand_p (rtx x)
1540 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1543 /* Return true if X can not be forced into a constant pool. */
1546 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1548 return mips_tls_operand_p (*x);
1551 /* Return true if X can not be forced into a constant pool. */
1554 mips_cannot_force_const_mem (rtx x)
1557 HOST_WIDE_INT offset;
1561 /* As an optimization, reject constants that mips_legitimize_move
1564 Suppose we have a multi-instruction sequence that loads constant C
1565 into register R. If R does not get allocated a hard register, and
1566 R is used in an operand that allows both registers and memory
1567 references, reload will consider forcing C into memory and using
1568 one of the instruction's memory alternatives. Returning false
1569 here will force it to use an input reload instead. */
1570 if (GET_CODE (x) == CONST_INT)
1573 mips_split_const (x, &base, &offset);
1574 if (symbolic_operand (base, VOIDmode) && SMALL_OPERAND (offset))
1578 if (TARGET_HAVE_TLS && for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
1584 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. MIPS16 uses per-function
1585 constant pools, but normal-mode code doesn't need to. */
1588 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1589 rtx x ATTRIBUTE_UNUSED)
1591 return !TARGET_MIPS16;
1594 /* Return the number of instructions needed to load a symbol of the
1595 given type into a register. If valid in an address, the same number
1596 of instructions are needed for loads and stores. Treat extended
1597 mips16 instructions as two instructions. */
1600 mips_symbol_insns (enum mips_symbol_type type)
1604 case SYMBOL_GENERAL:
1605 /* In mips16 code, general symbols must be fetched from the
1610 /* When using 64-bit symbols, we need 5 preparatory instructions,
1613 lui $at,%highest(symbol)
1614 daddiu $at,$at,%higher(symbol)
1616 daddiu $at,$at,%hi(symbol)
1619 The final address is then $at + %lo(symbol). With 32-bit
1620 symbols we just need a preparatory lui. */
1621 return (ABI_HAS_64BIT_SYMBOLS ? 6 : 2);
1623 case SYMBOL_SMALL_DATA:
1626 case SYMBOL_CONSTANT_POOL:
1627 /* This case is for mips16 only. Assume we'll need an
1628 extended instruction. */
1631 case SYMBOL_GOT_LOCAL:
1632 case SYMBOL_GOT_GLOBAL:
1633 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1634 the local/global classification is accurate. See override_options
1637 The worst cases are:
1639 (1) For local symbols when generating o32 or o64 code. The assembler
1645 ...and the final address will be $at + %lo(symbol).
1647 (2) For global symbols when -mxgot. The assembler will use:
1649 lui $at,%got_hi(symbol)
1652 ...and the final address will be $at + %got_lo(symbol). */
1655 case SYMBOL_GOTOFF_PAGE:
1656 case SYMBOL_GOTOFF_GLOBAL:
1657 case SYMBOL_GOTOFF_CALL:
1658 case SYMBOL_GOTOFF_LOADGP:
1659 case SYMBOL_64_HIGH:
1665 case SYMBOL_GOTTPREL:
1667 /* Check whether the offset is a 16- or 32-bit value. */
1668 return mips_split_p[type] ? 2 : 1;
1671 /* We don't treat a bare TLS symbol as a constant. */
1677 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1680 mips_stack_address_p (rtx x, enum machine_mode mode)
1682 struct mips_address_info addr;
1684 return (mips_classify_address (&addr, x, mode, false)
1685 && addr.type == ADDRESS_REG
1686 && addr.reg == stack_pointer_rtx);
1689 /* Return true if a value at OFFSET bytes from BASE can be accessed
1690 using an unextended mips16 instruction. MODE is the mode of the
1693 Usually the offset in an unextended instruction is a 5-bit field.
1694 The offset is unsigned and shifted left once for HIs, twice
1695 for SIs, and so on. An exception is SImode accesses off the
1696 stack pointer, which have an 8-bit immediate field. */
1699 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
1702 && GET_CODE (offset) == CONST_INT
1703 && INTVAL (offset) >= 0
1704 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
1706 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1707 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
1708 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
1714 /* Return the number of instructions needed to load or store a value
1715 of mode MODE at X. Return 0 if X isn't valid for MODE.
1717 For mips16 code, count extended instructions as two instructions. */
1720 mips_address_insns (rtx x, enum machine_mode mode)
1722 struct mips_address_info addr;
1725 if (mode == BLKmode)
1726 /* BLKmode is used for single unaligned loads and stores. */
1729 /* Each word of a multi-word value will be accessed individually. */
1730 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1732 if (mips_classify_address (&addr, x, mode, false))
1737 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
1741 case ADDRESS_LO_SUM:
1742 return (TARGET_MIPS16 ? factor * 2 : factor);
1744 case ADDRESS_CONST_INT:
1747 case ADDRESS_SYMBOLIC:
1748 return factor * mips_symbol_insns (addr.symbol_type);
1754 /* Likewise for constant X. */
1757 mips_const_insns (rtx x)
1759 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
1760 enum mips_symbol_type symbol_type;
1761 HOST_WIDE_INT offset;
1763 switch (GET_CODE (x))
1767 || !mips_symbolic_constant_p (XEXP (x, 0), &symbol_type)
1768 || !mips_split_p[symbol_type])
1775 /* Unsigned 8-bit constants can be loaded using an unextended
1776 LI instruction. Unsigned 16-bit constants can be loaded
1777 using an extended LI. Negative constants must be loaded
1778 using LI and then negated. */
1779 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
1780 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
1781 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
1782 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
1785 return mips_build_integer (codes, INTVAL (x));
1789 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
1795 /* See if we can refer to X directly. */
1796 if (mips_symbolic_constant_p (x, &symbol_type))
1797 return mips_symbol_insns (symbol_type);
1799 /* Otherwise try splitting the constant into a base and offset.
1800 16-bit offsets can be added using an extra addiu. Larger offsets
1801 must be calculated separately and then added to the base. */
1802 mips_split_const (x, &x, &offset);
1805 int n = mips_const_insns (x);
1808 if (SMALL_OPERAND (offset))
1811 return n + 1 + mips_build_integer (codes, offset);
1818 return mips_symbol_insns (mips_classify_symbol (x));
1826 /* Return the number of instructions needed for memory reference X.
1827 Count extended mips16 instructions as two instructions. */
1830 mips_fetch_insns (rtx x)
1832 gcc_assert (MEM_P (x));
1833 return mips_address_insns (XEXP (x, 0), GET_MODE (x));
1837 /* Return the number of instructions needed for an integer division. */
1840 mips_idiv_insns (void)
1845 if (TARGET_CHECK_ZERO_DIV)
1847 if (GENERATE_DIVIDE_TRAPS)
1853 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
1858 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
1859 returns a nonzero value if X is a legitimate address for a memory
1860 operand of the indicated MODE. STRICT is nonzero if this function
1861 is called during reload. */
1864 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1866 struct mips_address_info addr;
1868 return mips_classify_address (&addr, x, mode, strict);
1872 /* Copy VALUE to a register and return that register. If new psuedos
1873 are allowed, copy it into a new register, otherwise use DEST. */
1876 mips_force_temporary (rtx dest, rtx value)
1878 if (!no_new_pseudos)
1879 return force_reg (Pmode, value);
1882 emit_move_insn (copy_rtx (dest), value);
1888 /* Return a LO_SUM expression for ADDR. TEMP is as for mips_force_temporary
1889 and is used to load the high part into a register. */
1892 mips_split_symbol (rtx temp, rtx addr)
1897 high = mips16_gp_pseudo_reg ();
1899 high = mips_force_temporary (temp, gen_rtx_HIGH (Pmode, copy_rtx (addr)));
1900 return gen_rtx_LO_SUM (Pmode, high, addr);
1904 /* Return an UNSPEC address with underlying address ADDRESS and symbol
1905 type SYMBOL_TYPE. */
1908 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
1911 HOST_WIDE_INT offset;
1913 mips_split_const (address, &base, &offset);
1914 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
1915 UNSPEC_ADDRESS_FIRST + symbol_type);
1916 return plus_constant (gen_rtx_CONST (Pmode, base), offset);
1920 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
1921 high part to BASE and return the result. Just return BASE otherwise.
1922 TEMP is available as a temporary register if needed.
1924 The returned expression can be used as the first operand to a LO_SUM. */
1927 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
1928 enum mips_symbol_type symbol_type)
1930 if (mips_split_p[symbol_type])
1932 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
1933 addr = mips_force_temporary (temp, addr);
1934 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
1940 /* Return a legitimate address for REG + OFFSET. TEMP is as for
1941 mips_force_temporary; it is only needed when OFFSET is not a
1945 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
1947 if (!SMALL_OPERAND (offset))
1952 /* Load the full offset into a register so that we can use
1953 an unextended instruction for the address itself. */
1954 high = GEN_INT (offset);
1959 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
1960 high = GEN_INT (CONST_HIGH_PART (offset));
1961 offset = CONST_LOW_PART (offset);
1963 high = mips_force_temporary (temp, high);
1964 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
1966 return plus_constant (reg, offset);
1969 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
1970 referencing, and TYPE is the symbol type to use (either global
1971 dynamic or local dynamic). V0 is an RTX for the return value
1972 location. The entire insn sequence is returned. */
1974 static GTY(()) rtx mips_tls_symbol;
1977 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
1979 rtx insn, loc, tga, a0;
1981 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
1983 if (!mips_tls_symbol)
1984 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
1986 loc = mips_unspec_address (sym, type);
1990 emit_insn (gen_rtx_SET (Pmode, a0,
1991 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
1992 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
1993 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
1994 CONST_OR_PURE_CALL_P (insn) = 1;
1995 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
1996 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
1997 insn = get_insns ();
2004 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2005 return value will be a valid address and move_operand (either a REG
2009 mips_legitimize_tls_address (rtx loc)
2011 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2012 enum tls_model model;
2014 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2015 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2017 model = SYMBOL_REF_TLS_MODEL (loc);
2021 case TLS_MODEL_GLOBAL_DYNAMIC:
2022 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2023 dest = gen_reg_rtx (Pmode);
2024 emit_libcall_block (insn, dest, v0, loc);
2027 case TLS_MODEL_LOCAL_DYNAMIC:
2028 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2029 tmp1 = gen_reg_rtx (Pmode);
2031 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2032 share the LDM result with other LD model accesses. */
2033 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2035 emit_libcall_block (insn, tmp1, v0, eqv);
2037 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2038 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2039 mips_unspec_address (loc, SYMBOL_DTPREL));
2042 case TLS_MODEL_INITIAL_EXEC:
2043 tmp1 = gen_reg_rtx (Pmode);
2044 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2045 if (Pmode == DImode)
2047 emit_insn (gen_tls_get_tp_di (v1));
2048 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2052 emit_insn (gen_tls_get_tp_si (v1));
2053 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2055 dest = gen_reg_rtx (Pmode);
2056 emit_insn (gen_add3_insn (dest, tmp1, v1));
2059 case TLS_MODEL_LOCAL_EXEC:
2061 if (Pmode == DImode)
2062 emit_insn (gen_tls_get_tp_di (v1));
2064 emit_insn (gen_tls_get_tp_si (v1));
2066 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2067 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2068 mips_unspec_address (loc, SYMBOL_TPREL));
2078 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2079 be legitimized in a way that the generic machinery might not expect,
2080 put the new address in *XLOC and return true. MODE is the mode of
2081 the memory being accessed. */
2084 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2086 enum mips_symbol_type symbol_type;
2088 if (mips_tls_operand_p (*xloc))
2090 *xloc = mips_legitimize_tls_address (*xloc);
2094 /* See if the address can split into a high part and a LO_SUM. */
2095 if (mips_symbolic_constant_p (*xloc, &symbol_type)
2096 && mips_symbolic_address_p (symbol_type, mode)
2097 && mips_split_p[symbol_type])
2099 *xloc = mips_split_symbol (0, *xloc);
2103 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
2105 /* Handle REG + CONSTANT using mips_add_offset. */
2108 reg = XEXP (*xloc, 0);
2109 if (!mips_valid_base_register_p (reg, mode, 0))
2110 reg = copy_to_mode_reg (Pmode, reg);
2111 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
2119 /* Subroutine of mips_build_integer (with the same interface).
2120 Assume that the final action in the sequence should be a left shift. */
2123 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
2125 unsigned int i, shift;
2127 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
2128 since signed numbers are easier to load than unsigned ones. */
2130 while ((value & 1) == 0)
2131 value /= 2, shift++;
2133 i = mips_build_integer (codes, value);
2134 codes[i].code = ASHIFT;
2135 codes[i].value = shift;
2140 /* As for mips_build_shift, but assume that the final action will be
2141 an IOR or PLUS operation. */
2144 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
2146 unsigned HOST_WIDE_INT high;
2149 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
2150 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
2152 /* The constant is too complex to load with a simple lui/ori pair
2153 so our goal is to clear as many trailing zeros as possible.
2154 In this case, we know bit 16 is set and that the low 16 bits
2155 form a negative number. If we subtract that number from VALUE,
2156 we will clear at least the lowest 17 bits, maybe more. */
2157 i = mips_build_integer (codes, CONST_HIGH_PART (value));
2158 codes[i].code = PLUS;
2159 codes[i].value = CONST_LOW_PART (value);
2163 i = mips_build_integer (codes, high);
2164 codes[i].code = IOR;
2165 codes[i].value = value & 0xffff;
2171 /* Fill CODES with a sequence of rtl operations to load VALUE.
2172 Return the number of operations needed. */
2175 mips_build_integer (struct mips_integer_op *codes,
2176 unsigned HOST_WIDE_INT value)
2178 if (SMALL_OPERAND (value)
2179 || SMALL_OPERAND_UNSIGNED (value)
2180 || LUI_OPERAND (value))
2182 /* The value can be loaded with a single instruction. */
2183 codes[0].code = UNKNOWN;
2184 codes[0].value = value;
2187 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
2189 /* Either the constant is a simple LUI/ORI combination or its
2190 lowest bit is set. We don't want to shift in this case. */
2191 return mips_build_lower (codes, value);
2193 else if ((value & 0xffff) == 0)
2195 /* The constant will need at least three actions. The lowest
2196 16 bits are clear, so the final action will be a shift. */
2197 return mips_build_shift (codes, value);
2201 /* The final action could be a shift, add or inclusive OR.
2202 Rather than use a complex condition to select the best
2203 approach, try both mips_build_shift and mips_build_lower
2204 and pick the one that gives the shortest sequence.
2205 Note that this case is only used once per constant. */
2206 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
2207 unsigned int cost, alt_cost;
2209 cost = mips_build_shift (codes, value);
2210 alt_cost = mips_build_lower (alt_codes, value);
2211 if (alt_cost < cost)
2213 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
2221 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2224 mips_move_integer (rtx dest, rtx temp, unsigned HOST_WIDE_INT value)
2226 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2227 enum machine_mode mode;
2228 unsigned int i, cost;
2231 mode = GET_MODE (dest);
2232 cost = mips_build_integer (codes, value);
2234 /* Apply each binary operation to X. Invariant: X is a legitimate
2235 source operand for a SET pattern. */
2236 x = GEN_INT (codes[0].value);
2237 for (i = 1; i < cost; i++)
2241 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2245 x = force_reg (mode, x);
2246 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2249 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2253 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2254 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2258 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2261 HOST_WIDE_INT offset;
2263 /* Split moves of big integers into smaller pieces. */
2264 if (splittable_const_int_operand (src, mode))
2266 mips_move_integer (dest, dest, INTVAL (src));
2270 /* Split moves of symbolic constants into high/low pairs. */
2271 if (splittable_symbolic_operand (src, mode))
2273 emit_insn (gen_rtx_SET (VOIDmode, dest, mips_split_symbol (dest, src)));
2277 if (mips_tls_operand_p (src))
2279 emit_move_insn (dest, mips_legitimize_tls_address (src));
2283 /* If we have (const (plus symbol offset)), load the symbol first
2284 and then add in the offset. This is usually better than forcing
2285 the constant into memory, at least in non-mips16 code. */
2286 mips_split_const (src, &base, &offset);
2289 && (!no_new_pseudos || SMALL_OPERAND (offset)))
2291 base = mips_force_temporary (dest, base);
2292 emit_move_insn (dest, mips_add_offset (0, base, offset));
2296 src = force_const_mem (mode, src);
2298 /* When using explicit relocs, constant pool references are sometimes
2299 not legitimate addresses. */
2300 if (!memory_operand (src, VOIDmode))
2301 src = replace_equiv_address (src, mips_split_symbol (dest, XEXP (src, 0)));
2302 emit_move_insn (dest, src);
2306 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2307 sequence that is valid. */
2310 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2312 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2314 emit_move_insn (dest, force_reg (mode, src));
2318 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2319 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2320 && REG_P (src) && MD_REG_P (REGNO (src))
2321 && REG_P (dest) && GP_REG_P (REGNO (dest)))
2323 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2324 if (GET_MODE_SIZE (mode) <= 4)
2325 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
2326 gen_rtx_REG (SImode, REGNO (src)),
2327 gen_rtx_REG (SImode, other_regno)));
2329 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
2330 gen_rtx_REG (DImode, REGNO (src)),
2331 gen_rtx_REG (DImode, other_regno)));
2335 /* We need to deal with constants that would be legitimate
2336 immediate_operands but not legitimate move_operands. */
2337 if (CONSTANT_P (src) && !move_operand (src, mode))
2339 mips_legitimize_const_move (mode, dest, src);
2340 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2346 /* We need a lot of little routines to check constant values on the
2347 mips16. These are used to figure out how long the instruction will
2348 be. It would be much better to do this using constraints, but
2349 there aren't nearly enough letters available. */
2352 m16_check_op (rtx op, int low, int high, int mask)
2354 return (GET_CODE (op) == CONST_INT
2355 && INTVAL (op) >= low
2356 && INTVAL (op) <= high
2357 && (INTVAL (op) & mask) == 0);
2361 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2363 return m16_check_op (op, 0x1, 0x8, 0);
2367 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2369 return m16_check_op (op, - 0x8, 0x7, 0);
2373 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2375 return m16_check_op (op, - 0x7, 0x8, 0);
2379 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2381 return m16_check_op (op, - 0x10, 0xf, 0);
2385 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2387 return m16_check_op (op, - 0xf, 0x10, 0);
2391 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2393 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2397 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2399 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2403 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2405 return m16_check_op (op, - 0x80, 0x7f, 0);
2409 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2411 return m16_check_op (op, - 0x7f, 0x80, 0);
2415 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2417 return m16_check_op (op, 0x0, 0xff, 0);
2421 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2423 return m16_check_op (op, - 0xff, 0x0, 0);
2427 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2429 return m16_check_op (op, - 0x1, 0xfe, 0);
2433 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2435 return m16_check_op (op, 0x0, 0xff << 2, 3);
2439 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2441 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2445 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2447 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2451 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2453 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2457 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
2459 enum machine_mode mode = GET_MODE (x);
2460 bool float_mode_p = FLOAT_MODE_P (mode);
2467 /* A number between 1 and 8 inclusive is efficient for a shift.
2468 Otherwise, we will need an extended instruction. */
2469 if ((outer_code) == ASHIFT || (outer_code) == ASHIFTRT
2470 || (outer_code) == LSHIFTRT)
2472 if (INTVAL (x) >= 1 && INTVAL (x) <= 8)
2475 *total = COSTS_N_INSNS (1);
2479 /* We can use cmpi for an xor with an unsigned 16 bit value. */
2480 if ((outer_code) == XOR
2481 && INTVAL (x) >= 0 && INTVAL (x) < 0x10000)
2487 /* We may be able to use slt or sltu for a comparison with a
2488 signed 16 bit value. (The boundary conditions aren't quite
2489 right, but this is just a heuristic anyhow.) */
2490 if (((outer_code) == LT || (outer_code) == LE
2491 || (outer_code) == GE || (outer_code) == GT
2492 || (outer_code) == LTU || (outer_code) == LEU
2493 || (outer_code) == GEU || (outer_code) == GTU)
2494 && INTVAL (x) >= -0x8000 && INTVAL (x) < 0x8000)
2500 /* Equality comparisons with 0 are cheap. */
2501 if (((outer_code) == EQ || (outer_code) == NE)
2508 /* Constants in the range 0...255 can be loaded with an unextended
2509 instruction. They are therefore as cheap as a register move.
2511 Given the choice between "li R1,0...255" and "move R1,R2"
2512 (where R2 is a known constant), it is usually better to use "li",
2513 since we do not want to unnecessarily extend the lifetime
2515 if (outer_code == SET
2517 && INTVAL (x) < 256)
2525 /* These can be used anywhere. */
2530 /* Otherwise fall through to the handling below because
2531 we'll need to construct the constant. */
2537 if (LEGITIMATE_CONSTANT_P (x))
2539 *total = COSTS_N_INSNS (1);
2544 /* The value will need to be fetched from the constant pool. */
2545 *total = CONSTANT_POOL_COST;
2551 /* If the address is legitimate, return the number of
2552 instructions it needs, otherwise use the default handling. */
2553 int n = mips_address_insns (XEXP (x, 0), GET_MODE (x));
2556 *total = COSTS_N_INSNS (n + 1);
2563 *total = COSTS_N_INSNS (6);
2567 *total = COSTS_N_INSNS ((mode == DImode && !TARGET_64BIT) ? 2 : 1);
2573 if (mode == DImode && !TARGET_64BIT)
2575 *total = COSTS_N_INSNS (2);
2583 if (mode == DImode && !TARGET_64BIT)
2585 *total = COSTS_N_INSNS ((GET_CODE (XEXP (x, 1)) == CONST_INT)
2593 *total = COSTS_N_INSNS (1);
2595 *total = COSTS_N_INSNS (4);
2599 *total = COSTS_N_INSNS (1);
2606 *total = mips_cost->fp_add;
2610 else if (mode == DImode && !TARGET_64BIT)
2612 *total = COSTS_N_INSNS (4);
2618 if (mode == DImode && !TARGET_64BIT)
2620 *total = COSTS_N_INSNS (4);
2627 *total = mips_cost->fp_mult_sf;
2629 else if (mode == DFmode)
2630 *total = mips_cost->fp_mult_df;
2632 else if (mode == SImode)
2633 *total = mips_cost->int_mult_si;
2636 *total = mips_cost->int_mult_di;
2645 *total = mips_cost->fp_div_sf;
2647 *total = mips_cost->fp_div_df;
2656 *total = mips_cost->int_div_di;
2658 *total = mips_cost->int_div_si;
2663 /* A sign extend from SImode to DImode in 64 bit mode is often
2664 zero instructions, because the result can often be used
2665 directly by another instruction; we'll call it one. */
2666 if (TARGET_64BIT && mode == DImode
2667 && GET_MODE (XEXP (x, 0)) == SImode)
2668 *total = COSTS_N_INSNS (1);
2670 *total = COSTS_N_INSNS (2);
2674 if (TARGET_64BIT && mode == DImode
2675 && GET_MODE (XEXP (x, 0)) == SImode)
2676 *total = COSTS_N_INSNS (2);
2678 *total = COSTS_N_INSNS (1);
2682 case UNSIGNED_FLOAT:
2685 case FLOAT_TRUNCATE:
2687 *total = mips_cost->fp_add;
2695 /* Provide the costs of an addressing mode that contains ADDR.
2696 If ADDR is not a valid address, its cost is irrelevant. */
2699 mips_address_cost (rtx addr)
2701 return mips_address_insns (addr, SImode);
2704 /* Return one word of double-word value OP, taking into account the fixed
2705 endianness of certain registers. HIGH_P is true to select the high part,
2706 false to select the low part. */
2709 mips_subword (rtx op, int high_p)
2712 enum machine_mode mode;
2714 mode = GET_MODE (op);
2715 if (mode == VOIDmode)
2718 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
2719 byte = UNITS_PER_WORD;
2725 if (FP_REG_P (REGNO (op)))
2726 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
2727 if (ACC_HI_REG_P (REGNO (op)))
2728 return gen_rtx_REG (word_mode, high_p ? REGNO (op) : REGNO (op) + 1);
2732 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
2734 return simplify_gen_subreg (word_mode, op, mode, byte);
2738 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
2741 mips_split_64bit_move_p (rtx dest, rtx src)
2746 /* FP->FP moves can be done in a single instruction. */
2747 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
2750 /* Check for floating-point loads and stores. They can be done using
2751 ldc1 and sdc1 on MIPS II and above. */
2754 if (FP_REG_RTX_P (dest) && MEM_P (src))
2756 if (FP_REG_RTX_P (src) && MEM_P (dest))
2763 /* Split a 64-bit move from SRC to DEST assuming that
2764 mips_split_64bit_move_p holds.
2766 Moves into and out of FPRs cause some difficulty here. Such moves
2767 will always be DFmode, since paired FPRs are not allowed to store
2768 DImode values. The most natural representation would be two separate
2769 32-bit moves, such as:
2771 (set (reg:SI $f0) (mem:SI ...))
2772 (set (reg:SI $f1) (mem:SI ...))
2774 However, the second insn is invalid because odd-numbered FPRs are
2775 not allowed to store independent values. Use the patterns load_df_low,
2776 load_df_high and store_df_high instead. */
2779 mips_split_64bit_move (rtx dest, rtx src)
2781 if (FP_REG_RTX_P (dest))
2783 /* Loading an FPR from memory or from GPRs. */
2784 emit_insn (gen_load_df_low (copy_rtx (dest), mips_subword (src, 0)));
2785 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
2788 else if (FP_REG_RTX_P (src))
2790 /* Storing an FPR into memory or GPRs. */
2791 emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
2792 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
2796 /* The operation can be split into two normal moves. Decide in
2797 which order to do them. */
2800 low_dest = mips_subword (dest, 0);
2801 if (REG_P (low_dest)
2802 && reg_overlap_mentioned_p (low_dest, src))
2804 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2805 emit_move_insn (low_dest, mips_subword (src, 0));
2809 emit_move_insn (low_dest, mips_subword (src, 0));
2810 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2815 /* Return the appropriate instructions to move SRC into DEST. Assume
2816 that SRC is operand 1 and DEST is operand 0. */
2819 mips_output_move (rtx dest, rtx src)
2821 enum rtx_code dest_code, src_code;
2824 dest_code = GET_CODE (dest);
2825 src_code = GET_CODE (src);
2826 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
2828 if (dbl_p && mips_split_64bit_move_p (dest, src))
2831 if ((src_code == REG && GP_REG_P (REGNO (src)))
2832 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
2834 if (dest_code == REG)
2836 if (GP_REG_P (REGNO (dest)))
2837 return "move\t%0,%z1";
2839 if (MD_REG_P (REGNO (dest)))
2842 if (DSP_ACC_REG_P (REGNO (dest)))
2844 static char retval[] = "mt__\t%z1,%q0";
2845 retval[2] = reg_names[REGNO (dest)][4];
2846 retval[3] = reg_names[REGNO (dest)][5];
2850 if (FP_REG_P (REGNO (dest)))
2851 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
2853 if (ALL_COP_REG_P (REGNO (dest)))
2855 static char retval[] = "dmtc_\t%z1,%0";
2857 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2858 return (dbl_p ? retval : retval + 1);
2861 if (dest_code == MEM)
2862 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
2864 if (dest_code == REG && GP_REG_P (REGNO (dest)))
2866 if (src_code == REG)
2868 if (DSP_ACC_REG_P (REGNO (src)))
2870 static char retval[] = "mf__\t%0,%q1";
2871 retval[2] = reg_names[REGNO (src)][4];
2872 retval[3] = reg_names[REGNO (src)][5];
2876 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
2877 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
2879 if (FP_REG_P (REGNO (src)))
2880 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
2882 if (ALL_COP_REG_P (REGNO (src)))
2884 static char retval[] = "dmfc_\t%0,%1";
2886 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
2887 return (dbl_p ? retval : retval + 1);
2891 if (src_code == MEM)
2892 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
2894 if (src_code == CONST_INT)
2896 /* Don't use the X format, because that will give out of
2897 range numbers for 64 bit hosts and 32 bit targets. */
2899 return "li\t%0,%1\t\t\t# %X1";
2901 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
2904 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
2908 if (src_code == HIGH)
2909 return "lui\t%0,%h1";
2911 if (CONST_GP_P (src))
2912 return "move\t%0,%1";
2914 if (symbolic_operand (src, VOIDmode))
2915 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
2917 if (src_code == REG && FP_REG_P (REGNO (src)))
2919 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2921 if (GET_MODE (dest) == V2SFmode)
2922 return "mov.ps\t%0,%1";
2924 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
2927 if (dest_code == MEM)
2928 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
2930 if (dest_code == REG && FP_REG_P (REGNO (dest)))
2932 if (src_code == MEM)
2933 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
2935 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
2937 static char retval[] = "l_c_\t%0,%1";
2939 retval[1] = (dbl_p ? 'd' : 'w');
2940 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2943 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
2945 static char retval[] = "s_c_\t%1,%0";
2947 retval[1] = (dbl_p ? 'd' : 'w');
2948 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
2954 /* Restore $gp from its save slot. Valid only when using o32 or
2958 mips_restore_gp (void)
2962 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
2964 address = mips_add_offset (pic_offset_table_rtx,
2965 frame_pointer_needed
2966 ? hard_frame_pointer_rtx
2967 : stack_pointer_rtx,
2968 current_function_outgoing_args_size);
2969 slot = gen_rtx_MEM (Pmode, address);
2971 emit_move_insn (pic_offset_table_rtx, slot);
2972 if (!TARGET_EXPLICIT_RELOCS)
2973 emit_insn (gen_blockage ());
2976 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
2979 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
2981 emit_insn (gen_rtx_SET (VOIDmode, target,
2982 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
2985 /* Return true if CMP1 is a suitable second operand for relational
2986 operator CODE. See also the *sCC patterns in mips.md. */
2989 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
2995 return reg_or_0_operand (cmp1, VOIDmode);
2999 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3003 return arith_operand (cmp1, VOIDmode);
3006 return sle_operand (cmp1, VOIDmode);
3009 return sleu_operand (cmp1, VOIDmode);
3016 /* Canonicalize LE or LEU comparisons into LT comparisons when
3017 possible to avoid extra instructions or inverting the
3021 mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
3022 enum machine_mode mode)
3024 HOST_WIDE_INT original, plus_one;
3026 if (GET_CODE (*cmp1) != CONST_INT)
3029 original = INTVAL (*cmp1);
3030 plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
3035 if (original < plus_one)
3038 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3047 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3060 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3061 result in TARGET. CMP0 and TARGET are register_operands that have
3062 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3063 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3066 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
3067 rtx target, rtx cmp0, rtx cmp1)
3069 /* First see if there is a MIPS instruction that can do this operation
3070 with CMP1 in its current form. If not, try to canonicalize the
3071 comparison to LT. If that fails, try doing the same for the
3072 inverse operation. If that also fails, force CMP1 into a register
3074 if (mips_relational_operand_ok_p (code, cmp1))
3075 mips_emit_binary (code, target, cmp0, cmp1);
3076 else if (mips_canonicalize_comparison (&code, &cmp1, GET_MODE (target)))
3077 mips_emit_binary (code, target, cmp0, cmp1);
3080 enum rtx_code inv_code = reverse_condition (code);
3081 if (!mips_relational_operand_ok_p (inv_code, cmp1))
3083 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
3084 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
3086 else if (invert_ptr == 0)
3088 rtx inv_target = gen_reg_rtx (GET_MODE (target));
3089 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3090 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3094 *invert_ptr = !*invert_ptr;
3095 mips_emit_binary (inv_code, target, cmp0, cmp1);
3100 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3101 The register will have the same mode as CMP0. */
3104 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3106 if (cmp1 == const0_rtx)
3109 if (uns_arith_operand (cmp1, VOIDmode))
3110 return expand_binop (GET_MODE (cmp0), xor_optab,
3111 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3113 return expand_binop (GET_MODE (cmp0), sub_optab,
3114 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3117 /* Convert a comparison into something that can be used in a branch or
3118 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3119 being compared and *CODE is the code used to compare them.
3121 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3122 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3123 otherwise any standard branch condition can be used. The standard branch
3126 - EQ/NE between two registers.
3127 - any comparison between a register and zero. */
3130 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3132 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3134 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3136 *op0 = cmp_operands[0];
3137 *op1 = cmp_operands[1];
3139 else if (*code == EQ || *code == NE)
3143 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3148 *op0 = cmp_operands[0];
3149 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3154 /* The comparison needs a separate scc instruction. Store the
3155 result of the scc in *OP0 and compare it against zero. */
3156 bool invert = false;
3157 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3159 mips_emit_int_relational (*code, &invert, *op0,
3160 cmp_operands[0], cmp_operands[1]);
3161 *code = (invert ? EQ : NE);
3166 enum rtx_code cmp_code;
3168 /* Floating-point tests use a separate c.cond.fmt comparison to
3169 set a condition code register. The branch or conditional move
3170 will then compare that register against zero.
3172 Set CMP_CODE to the code of the comparison instruction and
3173 *CODE to the code that the branch or move should use. */
3179 cmp_code = reverse_condition_maybe_unordered (*code);
3189 ? gen_reg_rtx (CCmode)
3190 : gen_rtx_REG (CCmode, FPSW_REGNUM));
3192 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
3196 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
3197 Store the result in TARGET and return true if successful.
3199 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
3202 mips_emit_scc (enum rtx_code code, rtx target)
3204 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
3207 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
3208 if (code == EQ || code == NE)
3210 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3211 mips_emit_binary (code, target, zie, const0_rtx);
3214 mips_emit_int_relational (code, 0, target,
3215 cmp_operands[0], cmp_operands[1]);
3219 /* Emit the common code for doing conditional branches.
3220 operand[0] is the label to jump to.
3221 The comparison operands are saved away by cmp{si,di,sf,df}. */
3224 gen_conditional_branch (rtx *operands, enum rtx_code code)
3226 rtx op0, op1, condition;
3228 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
3229 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
3230 emit_jump_insn (gen_condjump (condition, operands[0]));
3233 /* Emit the common code for conditional moves. OPERANDS is the array
3234 of operands passed to the conditional move define_expand. */
3237 gen_conditional_move (rtx *operands)
3242 code = GET_CODE (operands[1]);
3243 mips_emit_compare (&code, &op0, &op1, true);
3244 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
3245 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3246 gen_rtx_fmt_ee (code,
3249 operands[2], operands[3])));
3252 /* Emit a conditional trap. OPERANDS is the array of operands passed to
3253 the conditional_trap expander. */
3256 mips_gen_conditional_trap (rtx *operands)
3259 enum rtx_code cmp_code = GET_CODE (operands[0]);
3260 enum machine_mode mode = GET_MODE (cmp_operands[0]);
3262 /* MIPS conditional trap machine instructions don't have GT or LE
3263 flavors, so we must invert the comparison and convert to LT and
3264 GE, respectively. */
3267 case GT: cmp_code = LT; break;
3268 case LE: cmp_code = GE; break;
3269 case GTU: cmp_code = LTU; break;
3270 case LEU: cmp_code = GEU; break;
3273 if (cmp_code == GET_CODE (operands[0]))
3275 op0 = cmp_operands[0];
3276 op1 = cmp_operands[1];
3280 op0 = cmp_operands[1];
3281 op1 = cmp_operands[0];
3283 op0 = force_reg (mode, op0);
3284 if (!arith_operand (op1, mode))
3285 op1 = force_reg (mode, op1);
3287 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
3288 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
3292 /* Load function address ADDR into register DEST. SIBCALL_P is true
3293 if the address is needed for a sibling call. */
3296 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
3298 /* If we're generating PIC, and this call is to a global function,
3299 try to allow its address to be resolved lazily. This isn't
3300 possible for NewABI sibcalls since the value of $gp on entry
3301 to the stub would be our caller's gp, not ours. */
3302 if (TARGET_EXPLICIT_RELOCS
3303 && !(sibcall_p && TARGET_NEWABI)
3304 && global_got_operand (addr, VOIDmode))
3306 rtx high, lo_sum_symbol;
3308 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
3309 addr, SYMBOL_GOTOFF_CALL);
3310 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
3311 if (Pmode == SImode)
3312 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
3314 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
3317 emit_move_insn (dest, addr);
3321 /* Expand a call or call_value instruction. RESULT is where the
3322 result will go (null for calls), ADDR is the address of the
3323 function, ARGS_SIZE is the size of the arguments and AUX is
3324 the value passed to us by mips_function_arg. SIBCALL_P is true
3325 if we are expanding a sibling call, false if we're expanding
3329 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
3331 rtx orig_addr, pattern, insn;
3334 if (!call_insn_operand (addr, VOIDmode))
3336 addr = gen_reg_rtx (Pmode);
3337 mips_load_call_address (addr, orig_addr, sibcall_p);
3341 && mips16_hard_float
3342 && build_mips16_call_stub (result, addr, args_size,
3343 aux == 0 ? 0 : (int) GET_MODE (aux)))
3347 pattern = (sibcall_p
3348 ? gen_sibcall_internal (addr, args_size)
3349 : gen_call_internal (addr, args_size));
3350 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
3354 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
3355 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
3358 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
3359 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
3362 pattern = (sibcall_p
3363 ? gen_sibcall_value_internal (result, addr, args_size)
3364 : gen_call_value_internal (result, addr, args_size));
3366 insn = emit_call_insn (pattern);
3368 /* Lazy-binding stubs require $gp to be valid on entry. */
3369 if (global_got_operand (orig_addr, VOIDmode))
3370 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3374 /* We can handle any sibcall when TARGET_SIBCALLS is true. */
3377 mips_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
3378 tree exp ATTRIBUTE_UNUSED)
3380 return TARGET_SIBCALLS;
3383 /* Emit code to move general operand SRC into condition-code
3384 register DEST. SCRATCH is a scratch TFmode float register.
3391 where FP1 and FP2 are single-precision float registers
3392 taken from SCRATCH. */
3395 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
3399 /* Change the source to SFmode. */
3401 src = adjust_address (src, SFmode, 0);
3402 else if (REG_P (src) || GET_CODE (src) == SUBREG)
3403 src = gen_rtx_REG (SFmode, true_regnum (src));
3405 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
3406 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + FP_INC);
3408 emit_move_insn (copy_rtx (fp1), src);
3409 emit_move_insn (copy_rtx (fp2), CONST0_RTX (SFmode));
3410 emit_insn (gen_slt_sf (dest, fp2, fp1));
3413 /* Emit code to change the current function's return address to
3414 ADDRESS. SCRATCH is available as a scratch register, if needed.
3415 ADDRESS and SCRATCH are both word-mode GPRs. */
3418 mips_set_return_address (rtx address, rtx scratch)
3422 compute_frame_size (get_frame_size ());
3423 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
3424 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
3425 cfun->machine->frame.gp_sp_offset);
3427 emit_move_insn (gen_rtx_MEM (GET_MODE (address), slot_address), address);
3430 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
3431 Assume that the areas do not overlap. */
3434 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
3436 HOST_WIDE_INT offset, delta;
3437 unsigned HOST_WIDE_INT bits;
3439 enum machine_mode mode;
3442 /* Work out how many bits to move at a time. If both operands have
3443 half-word alignment, it is usually better to move in half words.
3444 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
3445 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
3446 Otherwise move word-sized chunks. */
3447 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
3448 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
3449 bits = BITS_PER_WORD / 2;
3451 bits = BITS_PER_WORD;
3453 mode = mode_for_size (bits, MODE_INT, 0);
3454 delta = bits / BITS_PER_UNIT;
3456 /* Allocate a buffer for the temporary registers. */
3457 regs = alloca (sizeof (rtx) * length / delta);
3459 /* Load as many BITS-sized chunks as possible. Use a normal load if
3460 the source has enough alignment, otherwise use left/right pairs. */
3461 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3463 regs[i] = gen_reg_rtx (mode);
3464 if (MEM_ALIGN (src) >= bits)
3465 emit_move_insn (regs[i], adjust_address (src, mode, offset));
3468 rtx part = adjust_address (src, BLKmode, offset);
3469 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
3474 /* Copy the chunks to the destination. */
3475 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3476 if (MEM_ALIGN (dest) >= bits)
3477 emit_move_insn (adjust_address (dest, mode, offset), regs[i]);
3480 rtx part = adjust_address (dest, BLKmode, offset);
3481 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
3485 /* Mop up any left-over bytes. */
3486 if (offset < length)
3488 src = adjust_address (src, BLKmode, offset);
3489 dest = adjust_address (dest, BLKmode, offset);
3490 move_by_pieces (dest, src, length - offset,
3491 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
3495 #define MAX_MOVE_REGS 4
3496 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
3499 /* Helper function for doing a loop-based block operation on memory
3500 reference MEM. Each iteration of the loop will operate on LENGTH
3503 Create a new base register for use within the loop and point it to
3504 the start of MEM. Create a new memory reference that uses this
3505 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
3508 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
3509 rtx *loop_reg, rtx *loop_mem)
3511 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
3513 /* Although the new mem does not refer to a known location,
3514 it does keep up to LENGTH bytes of alignment. */
3515 *loop_mem = change_address (mem, BLKmode, *loop_reg);
3516 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
3520 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
3521 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
3522 memory regions do not overlap. */
3525 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
3527 rtx label, src_reg, dest_reg, final_src;
3528 HOST_WIDE_INT leftover;
3530 leftover = length % MAX_MOVE_BYTES;
3533 /* Create registers and memory references for use within the loop. */
3534 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
3535 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
3537 /* Calculate the value that SRC_REG should have after the last iteration
3539 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
3542 /* Emit the start of the loop. */
3543 label = gen_label_rtx ();
3546 /* Emit the loop body. */
3547 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
3549 /* Move on to the next block. */
3550 emit_move_insn (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
3551 emit_move_insn (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
3553 /* Emit the loop condition. */
3554 if (Pmode == DImode)
3555 emit_insn (gen_cmpdi (src_reg, final_src));
3557 emit_insn (gen_cmpsi (src_reg, final_src));
3558 emit_jump_insn (gen_bne (label));
3560 /* Mop up any left-over bytes. */
3562 mips_block_move_straight (dest, src, leftover);
3565 /* Expand a movmemsi instruction. */
3568 mips_expand_block_move (rtx dest, rtx src, rtx length)
3570 if (GET_CODE (length) == CONST_INT)
3572 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
3574 mips_block_move_straight (dest, src, INTVAL (length));
3579 mips_block_move_loop (dest, src, INTVAL (length));
3586 /* Argument support functions. */
3588 /* Initialize CUMULATIVE_ARGS for a function. */
3591 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
3592 rtx libname ATTRIBUTE_UNUSED)
3594 static CUMULATIVE_ARGS zero_cum;
3595 tree param, next_param;
3598 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
3600 /* Determine if this function has variable arguments. This is
3601 indicated by the last argument being 'void_type_mode' if there
3602 are no variable arguments. The standard MIPS calling sequence
3603 passes all arguments in the general purpose registers in this case. */
3605 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
3606 param != 0; param = next_param)
3608 next_param = TREE_CHAIN (param);
3609 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
3610 cum->gp_reg_found = 1;
3615 /* Fill INFO with information about a single argument. CUM is the
3616 cumulative state for earlier arguments. MODE is the mode of this
3617 argument and TYPE is its type (if known). NAMED is true if this
3618 is a named (fixed) argument rather than a variable one. */
3621 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3622 tree type, int named, struct mips_arg_info *info)
3624 bool doubleword_aligned_p;
3625 unsigned int num_bytes, num_words, max_regs;
3627 /* Work out the size of the argument. */
3628 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
3629 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3631 /* Decide whether it should go in a floating-point register, assuming
3632 one is free. Later code checks for availability.
3634 The checks against UNITS_PER_FPVALUE handle the soft-float and
3635 single-float cases. */
3639 /* The EABI conventions have traditionally been defined in terms
3640 of TYPE_MODE, regardless of the actual type. */
3641 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
3642 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3643 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3648 /* Only leading floating-point scalars are passed in
3649 floating-point registers. We also handle vector floats the same
3650 say, which is OK because they are not covered by the standard ABI. */
3651 info->fpr_p = (!cum->gp_reg_found
3652 && cum->arg_number < 2
3653 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
3654 || VECTOR_FLOAT_TYPE_P (type))
3655 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3656 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3657 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3662 /* Scalar and complex floating-point types are passed in
3663 floating-point registers. */
3664 info->fpr_p = (named
3665 && (type == 0 || FLOAT_TYPE_P (type))
3666 && (GET_MODE_CLASS (mode) == MODE_FLOAT
3667 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3668 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3669 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
3671 /* ??? According to the ABI documentation, the real and imaginary
3672 parts of complex floats should be passed in individual registers.
3673 The real and imaginary parts of stack arguments are supposed
3674 to be contiguous and there should be an extra word of padding
3677 This has two problems. First, it makes it impossible to use a
3678 single "void *" va_list type, since register and stack arguments
3679 are passed differently. (At the time of writing, MIPSpro cannot
3680 handle complex float varargs correctly.) Second, it's unclear
3681 what should happen when there is only one register free.
3683 For now, we assume that named complex floats should go into FPRs
3684 if there are two FPRs free, otherwise they should be passed in the
3685 same way as a struct containing two floats. */
3687 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3688 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
3690 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
3691 info->fpr_p = false;
3701 /* See whether the argument has doubleword alignment. */
3702 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
3704 /* Set REG_OFFSET to the register count we're interested in.
3705 The EABI allocates the floating-point registers separately,
3706 but the other ABIs allocate them like integer registers. */
3707 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
3711 /* Advance to an even register if the argument is doubleword-aligned. */
3712 if (doubleword_aligned_p)
3713 info->reg_offset += info->reg_offset & 1;
3715 /* Work out the offset of a stack argument. */
3716 info->stack_offset = cum->stack_words;
3717 if (doubleword_aligned_p)
3718 info->stack_offset += info->stack_offset & 1;
3720 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
3722 /* Partition the argument between registers and stack. */
3723 info->reg_words = MIN (num_words, max_regs);
3724 info->stack_words = num_words - info->reg_words;
3728 /* Implement FUNCTION_ARG_ADVANCE. */
3731 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3732 tree type, int named)
3734 struct mips_arg_info info;
3736 mips_arg_info (cum, mode, type, named, &info);
3739 cum->gp_reg_found = true;
3741 /* See the comment above the cumulative args structure in mips.h
3742 for an explanation of what this code does. It assumes the O32
3743 ABI, which passes at most 2 arguments in float registers. */
3744 if (cum->arg_number < 2 && info.fpr_p)
3745 cum->fp_code += (mode == SFmode ? 1 : 2) << ((cum->arg_number - 1) * 2);
3747 if (mips_abi != ABI_EABI || !info.fpr_p)
3748 cum->num_gprs = info.reg_offset + info.reg_words;
3749 else if (info.reg_words > 0)
3750 cum->num_fprs += FP_INC;
3752 if (info.stack_words > 0)
3753 cum->stack_words = info.stack_offset + info.stack_words;
3758 /* Implement FUNCTION_ARG. */
3761 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3762 tree type, int named)
3764 struct mips_arg_info info;
3766 /* We will be called with a mode of VOIDmode after the last argument
3767 has been seen. Whatever we return will be passed to the call
3768 insn. If we need a mips16 fp_code, return a REG with the code
3769 stored as the mode. */
3770 if (mode == VOIDmode)
3772 if (TARGET_MIPS16 && cum->fp_code != 0)
3773 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
3779 mips_arg_info (cum, mode, type, named, &info);
3781 /* Return straight away if the whole argument is passed on the stack. */
3782 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
3786 && TREE_CODE (type) == RECORD_TYPE
3788 && TYPE_SIZE_UNIT (type)
3789 && host_integerp (TYPE_SIZE_UNIT (type), 1)
3792 /* The Irix 6 n32/n64 ABIs say that if any 64 bit chunk of the
3793 structure contains a double in its entirety, then that 64 bit
3794 chunk is passed in a floating point register. */
3797 /* First check to see if there is any such field. */
3798 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3799 if (TREE_CODE (field) == FIELD_DECL
3800 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3801 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
3802 && host_integerp (bit_position (field), 0)
3803 && int_bit_position (field) % BITS_PER_WORD == 0)
3808 /* Now handle the special case by returning a PARALLEL
3809 indicating where each 64 bit chunk goes. INFO.REG_WORDS
3810 chunks are passed in registers. */
3812 HOST_WIDE_INT bitpos;
3815 /* assign_parms checks the mode of ENTRY_PARM, so we must
3816 use the actual mode here. */
3817 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
3820 field = TYPE_FIELDS (type);
3821 for (i = 0; i < info.reg_words; i++)
3825 for (; field; field = TREE_CHAIN (field))
3826 if (TREE_CODE (field) == FIELD_DECL
3827 && int_bit_position (field) >= bitpos)
3831 && int_bit_position (field) == bitpos
3832 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3833 && !TARGET_SOFT_FLOAT
3834 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
3835 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
3837 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
3840 = gen_rtx_EXPR_LIST (VOIDmode, reg,
3841 GEN_INT (bitpos / BITS_PER_UNIT));
3843 bitpos += BITS_PER_WORD;
3849 /* Handle the n32/n64 conventions for passing complex floating-point
3850 arguments in FPR pairs. The real part goes in the lower register
3851 and the imaginary part goes in the upper register. */
3854 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
3857 enum machine_mode inner;
3860 inner = GET_MODE_INNER (mode);
3861 reg = FP_ARG_FIRST + info.reg_offset;
3862 real = gen_rtx_EXPR_LIST (VOIDmode,
3863 gen_rtx_REG (inner, reg),
3865 imag = gen_rtx_EXPR_LIST (VOIDmode,
3866 gen_rtx_REG (inner, reg + info.reg_words / 2),
3867 GEN_INT (GET_MODE_SIZE (inner)));
3868 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
3872 return gen_rtx_REG (mode, GP_ARG_FIRST + info.reg_offset);
3873 else if (info.reg_offset == 1)
3874 /* This code handles the special o32 case in which the second word
3875 of the argument structure is passed in floating-point registers. */
3876 return gen_rtx_REG (mode, FP_ARG_FIRST + FP_INC);
3878 return gen_rtx_REG (mode, FP_ARG_FIRST + info.reg_offset);
3882 /* Implement TARGET_ARG_PARTIAL_BYTES. */
3885 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
3886 enum machine_mode mode, tree type, bool named)
3888 struct mips_arg_info info;
3890 mips_arg_info (cum, mode, type, named, &info);
3891 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
3895 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
3896 PARM_BOUNDARY bits of alignment, but will be given anything up
3897 to STACK_BOUNDARY bits if the type requires it. */
3900 function_arg_boundary (enum machine_mode mode, tree type)
3902 unsigned int alignment;
3904 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
3905 if (alignment < PARM_BOUNDARY)
3906 alignment = PARM_BOUNDARY;
3907 if (alignment > STACK_BOUNDARY)
3908 alignment = STACK_BOUNDARY;
3912 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
3913 upward rather than downward. In other words, return true if the
3914 first byte of the stack slot has useful data, false if the last
3918 mips_pad_arg_upward (enum machine_mode mode, tree type)
3920 /* On little-endian targets, the first byte of every stack argument
3921 is passed in the first byte of the stack slot. */
3922 if (!BYTES_BIG_ENDIAN)
3925 /* Otherwise, integral types are padded downward: the last byte of a
3926 stack argument is passed in the last byte of the stack slot. */
3928 ? INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)
3929 : GET_MODE_CLASS (mode) == MODE_INT)
3932 /* Big-endian o64 pads floating-point arguments downward. */
3933 if (mips_abi == ABI_O64)
3934 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
3937 /* Other types are padded upward for o32, o64, n32 and n64. */
3938 if (mips_abi != ABI_EABI)
3941 /* Arguments smaller than a stack slot are padded downward. */
3942 if (mode != BLKmode)
3943 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
3945 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
3949 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
3950 if the least significant byte of the register has useful data. Return
3951 the opposite if the most significant byte does. */
3954 mips_pad_reg_upward (enum machine_mode mode, tree type)
3956 /* No shifting is required for floating-point arguments. */
3957 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
3958 return !BYTES_BIG_ENDIAN;
3960 /* Otherwise, apply the same padding to register arguments as we do
3961 to stack arguments. */
3962 return mips_pad_arg_upward (mode, type);
3966 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3967 tree type, int *pretend_size ATTRIBUTE_UNUSED,
3970 CUMULATIVE_ARGS local_cum;
3971 int gp_saved, fp_saved;
3973 /* The caller has advanced CUM up to, but not beyond, the last named
3974 argument. Advance a local copy of CUM past the last "real" named
3975 argument, to find out how many registers are left over. */
3978 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
3980 /* Found out how many registers we need to save. */
3981 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
3982 fp_saved = (EABI_FLOAT_VARARGS_P
3983 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
3992 ptr = plus_constant (virtual_incoming_args_rtx,
3993 REG_PARM_STACK_SPACE (cfun->decl)
3994 - gp_saved * UNITS_PER_WORD);
3995 mem = gen_rtx_MEM (BLKmode, ptr);
3996 set_mem_alias_set (mem, get_varargs_alias_set ());
3998 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4003 /* We can't use move_block_from_reg, because it will use
4005 enum machine_mode mode;
4008 /* Set OFF to the offset from virtual_incoming_args_rtx of
4009 the first float register. The FP save area lies below
4010 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4011 off = -gp_saved * UNITS_PER_WORD;
4012 off &= ~(UNITS_PER_FPVALUE - 1);
4013 off -= fp_saved * UNITS_PER_FPREG;
4015 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4017 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS; i += FP_INC)
4021 ptr = plus_constant (virtual_incoming_args_rtx, off);
4022 mem = gen_rtx_MEM (mode, ptr);
4023 set_mem_alias_set (mem, get_varargs_alias_set ());
4024 emit_move_insn (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4025 off += UNITS_PER_HWFPVALUE;
4029 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4030 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4031 + fp_saved * UNITS_PER_FPREG);
4034 /* Create the va_list data type.
4035 We keep 3 pointers, and two offsets.
4036 Two pointers are to the overflow area, which starts at the CFA.
4037 One of these is constant, for addressing into the GPR save area below it.
4038 The other is advanced up the stack through the overflow region.
4039 The third pointer is to the GPR save area. Since the FPR save area
4040 is just below it, we can address FPR slots off this pointer.
4041 We also keep two one-byte offsets, which are to be subtracted from the
4042 constant pointers to yield addresses in the GPR and FPR save areas.
4043 These are downcounted as float or non-float arguments are used,
4044 and when they get to zero, the argument must be obtained from the
4046 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4047 pointer is enough. It's started at the GPR save area, and is
4049 Note that the GPR save area is not constant size, due to optimization
4050 in the prologue. Hence, we can't use a design with two pointers
4051 and two offsets, although we could have designed this with two pointers
4052 and three offsets. */
4055 mips_build_builtin_va_list (void)
4057 if (EABI_FLOAT_VARARGS_P)
4059 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4062 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4064 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4066 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4068 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4070 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4071 unsigned_char_type_node);
4072 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4073 unsigned_char_type_node);
4074 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4075 warn on every user file. */
4076 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4077 array = build_array_type (unsigned_char_type_node,
4078 build_index_type (index));
4079 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4081 DECL_FIELD_CONTEXT (f_ovfl) = record;
4082 DECL_FIELD_CONTEXT (f_gtop) = record;
4083 DECL_FIELD_CONTEXT (f_ftop) = record;
4084 DECL_FIELD_CONTEXT (f_goff) = record;
4085 DECL_FIELD_CONTEXT (f_foff) = record;
4086 DECL_FIELD_CONTEXT (f_res) = record;
4088 TYPE_FIELDS (record) = f_ovfl;
4089 TREE_CHAIN (f_ovfl) = f_gtop;
4090 TREE_CHAIN (f_gtop) = f_ftop;
4091 TREE_CHAIN (f_ftop) = f_goff;
4092 TREE_CHAIN (f_goff) = f_foff;
4093 TREE_CHAIN (f_foff) = f_res;
4095 layout_type (record);
4098 else if (TARGET_IRIX && TARGET_IRIX6)
4099 /* On IRIX 6, this type is 'char *'. */
4100 return build_pointer_type (char_type_node);
4102 /* Otherwise, we use 'void *'. */
4103 return ptr_type_node;
4106 /* Implement va_start. */
4109 mips_va_start (tree valist, rtx nextarg)
4111 if (EABI_FLOAT_VARARGS_P)
4113 const CUMULATIVE_ARGS *cum;
4114 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4115 tree ovfl, gtop, ftop, goff, foff;
4117 int gpr_save_area_size;
4118 int fpr_save_area_size;
4121 cum = ¤t_function_args_info;
4123 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
4125 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
4127 f_ovfl = TYPE_FIELDS (va_list_type_node);
4128 f_gtop = TREE_CHAIN (f_ovfl);
4129 f_ftop = TREE_CHAIN (f_gtop);
4130 f_goff = TREE_CHAIN (f_ftop);
4131 f_foff = TREE_CHAIN (f_goff);
4133 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4135 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4137 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4139 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4141 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4144 /* Emit code to initialize OVFL, which points to the next varargs
4145 stack argument. CUM->STACK_WORDS gives the number of stack
4146 words used by named arguments. */
4147 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
4148 if (cum->stack_words > 0)
4149 t = build2 (PLUS_EXPR, TREE_TYPE (ovfl), t,
4150 build_int_cst (NULL_TREE,
4151 cum->stack_words * UNITS_PER_WORD));
4152 t = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
4153 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4155 /* Emit code to initialize GTOP, the top of the GPR save area. */
4156 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
4157 t = build2 (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
4158 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4160 /* Emit code to initialize FTOP, the top of the FPR save area.
4161 This address is gpr_save_area_bytes below GTOP, rounded
4162 down to the next fp-aligned boundary. */
4163 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
4164 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
4165 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
4167 t = build2 (PLUS_EXPR, TREE_TYPE (ftop), t,
4168 build_int_cst (NULL_TREE, -fpr_offset));
4169 t = build2 (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
4170 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4172 /* Emit code to initialize GOFF, the offset from GTOP of the
4173 next GPR argument. */
4174 t = build2 (MODIFY_EXPR, TREE_TYPE (goff), goff,
4175 build_int_cst (NULL_TREE, gpr_save_area_size));
4176 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4178 /* Likewise emit code to initialize FOFF, the offset from FTOP
4179 of the next FPR argument. */
4180 t = build2 (MODIFY_EXPR, TREE_TYPE (foff), foff,
4181 build_int_cst (NULL_TREE, fpr_save_area_size));
4182 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4186 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
4187 std_expand_builtin_va_start (valist, nextarg);
4191 /* Implement va_arg. */
4194 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
4196 HOST_WIDE_INT size, rsize;
4200 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
4203 type = build_pointer_type (type);
4205 size = int_size_in_bytes (type);
4206 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
4208 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
4209 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4212 /* Not a simple merged stack. */
4214 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4215 tree ovfl, top, off, align;
4216 HOST_WIDE_INT osize;
4219 f_ovfl = TYPE_FIELDS (va_list_type_node);
4220 f_gtop = TREE_CHAIN (f_ovfl);
4221 f_ftop = TREE_CHAIN (f_gtop);
4222 f_goff = TREE_CHAIN (f_ftop);
4223 f_foff = TREE_CHAIN (f_goff);
4225 /* We maintain separate pointers and offsets for floating-point
4226 and integer arguments, but we need similar code in both cases.
4229 TOP be the top of the register save area;
4230 OFF be the offset from TOP of the next register;
4231 ADDR_RTX be the address of the argument;
4232 RSIZE be the number of bytes used to store the argument
4233 when it's in the register save area;
4234 OSIZE be the number of bytes used to store it when it's
4235 in the stack overflow area; and
4236 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
4238 The code we want is:
4240 1: off &= -rsize; // round down
4243 4: addr_rtx = top - off;
4248 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
4249 10: addr_rtx = ovfl + PADDING;
4253 [1] and [9] can sometimes be optimized away. */
4255 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4258 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
4259 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
4261 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4263 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4266 /* When floating-point registers are saved to the stack,
4267 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
4268 of the float's precision. */
4269 rsize = UNITS_PER_HWFPVALUE;
4271 /* Overflow arguments are padded to UNITS_PER_WORD bytes
4272 (= PARM_BOUNDARY bits). This can be different from RSIZE
4275 (1) On 32-bit targets when TYPE is a structure such as:
4277 struct s { float f; };
4279 Such structures are passed in paired FPRs, so RSIZE
4280 will be 8 bytes. However, the structure only takes
4281 up 4 bytes of memory, so OSIZE will only be 4.
4283 (2) In combinations such as -mgp64 -msingle-float
4284 -fshort-double. Doubles passed in registers
4285 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
4286 but those passed on the stack take up
4287 UNITS_PER_WORD bytes. */
4288 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
4292 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4294 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4296 if (rsize > UNITS_PER_WORD)
4298 /* [1] Emit code for: off &= -rsize. */
4299 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
4300 build_int_cst (NULL_TREE, -rsize));
4301 t = build2 (MODIFY_EXPR, TREE_TYPE (off), off, t);
4302 gimplify_and_add (t, pre_p);
4307 /* [2] Emit code to branch if off == 0. */
4308 t = build2 (NE_EXPR, boolean_type_node, off,
4309 build_int_cst (TREE_TYPE (off), 0));
4310 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
4312 /* [5] Emit code for: off -= rsize. We do this as a form of
4313 post-increment not available to C. Also widen for the
4314 coming pointer arithmetic. */
4315 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
4316 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
4317 t = fold_convert (sizetype, t);
4318 t = fold_convert (TREE_TYPE (top), t);
4320 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
4321 the argument has RSIZE - SIZE bytes of leading padding. */
4322 t = build2 (MINUS_EXPR, TREE_TYPE (top), top, t);
4323 if (BYTES_BIG_ENDIAN && rsize > size)
4325 u = fold_convert (TREE_TYPE (t), build_int_cst (NULL_TREE,
4327 t = build2 (PLUS_EXPR, TREE_TYPE (t), t, u);
4329 COND_EXPR_THEN (addr) = t;
4331 if (osize > UNITS_PER_WORD)
4333 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
4334 u = fold_convert (TREE_TYPE (ovfl),
4335 build_int_cst (NULL_TREE, osize - 1));
4336 t = build2 (PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
4337 u = fold_convert (TREE_TYPE (ovfl),
4338 build_int_cst (NULL_TREE, -osize));
4339 t = build2 (BIT_AND_EXPR, TREE_TYPE (ovfl), t, u);
4340 align = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
4345 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
4346 post-increment ovfl by osize. On big-endian machines,
4347 the argument has OSIZE - SIZE bytes of leading padding. */
4348 u = fold_convert (TREE_TYPE (ovfl),
4349 build_int_cst (NULL_TREE, osize));
4350 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
4351 if (BYTES_BIG_ENDIAN && osize > size)
4353 u = fold_convert (TREE_TYPE (t),
4354 build_int_cst (NULL_TREE, osize - size));
4355 t = build2 (PLUS_EXPR, TREE_TYPE (t), t, u);
4358 /* String [9] and [10,11] together. */
4360 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
4361 COND_EXPR_ELSE (addr) = t;
4363 addr = fold_convert (build_pointer_type (type), addr);
4364 addr = build_va_arg_indirect_ref (addr);
4368 addr = build_va_arg_indirect_ref (addr);
4373 /* Return true if it is possible to use left/right accesses for a
4374 bitfield of WIDTH bits starting BITPOS bits into *OP. When
4375 returning true, update *OP, *LEFT and *RIGHT as follows:
4377 *OP is a BLKmode reference to the whole field.
4379 *LEFT is a QImode reference to the first byte if big endian or
4380 the last byte if little endian. This address can be used in the
4381 left-side instructions (lwl, swl, ldl, sdl).
4383 *RIGHT is a QImode reference to the opposite end of the field and
4384 can be used in the patterning right-side instruction. */
4387 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
4388 rtx *left, rtx *right)
4392 /* Check that the operand really is a MEM. Not all the extv and
4393 extzv predicates are checked. */
4397 /* Check that the size is valid. */
4398 if (width != 32 && (!TARGET_64BIT || width != 64))
4401 /* We can only access byte-aligned values. Since we are always passed
4402 a reference to the first byte of the field, it is not necessary to
4403 do anything with BITPOS after this check. */
4404 if (bitpos % BITS_PER_UNIT != 0)
4407 /* Reject aligned bitfields: we want to use a normal load or store
4408 instead of a left/right pair. */
4409 if (MEM_ALIGN (*op) >= width)
4412 /* Adjust *OP to refer to the whole field. This also has the effect
4413 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
4414 *op = adjust_address (*op, BLKmode, 0);
4415 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
4417 /* Get references to both ends of the field. We deliberately don't
4418 use the original QImode *OP for FIRST since the new BLKmode one
4419 might have a simpler address. */
4420 first = adjust_address (*op, QImode, 0);
4421 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
4423 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
4424 be the upper word and RIGHT the lower word. */
4425 if (TARGET_BIG_ENDIAN)
4426 *left = first, *right = last;
4428 *left = last, *right = first;
4434 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
4435 Return true on success. We only handle cases where zero_extract is
4436 equivalent to sign_extract. */
4439 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
4441 rtx left, right, temp;
4443 /* If TARGET_64BIT, the destination of a 32-bit load will be a
4444 paradoxical word_mode subreg. This is the only case in which
4445 we allow the destination to be larger than the source. */
4446 if (GET_CODE (dest) == SUBREG
4447 && GET_MODE (dest) == DImode
4448 && SUBREG_BYTE (dest) == 0
4449 && GET_MODE (SUBREG_REG (dest)) == SImode)
4450 dest = SUBREG_REG (dest);
4452 /* After the above adjustment, the destination must be the same
4453 width as the source. */
4454 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
4457 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
4460 temp = gen_reg_rtx (GET_MODE (dest));
4461 if (GET_MODE (dest) == DImode)
4463 emit_insn (gen_mov_ldl (temp, src, left));
4464 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
4468 emit_insn (gen_mov_lwl (temp, src, left));
4469 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
4475 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
4479 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
4483 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
4486 src = gen_lowpart (mode_for_size (width, MODE_INT, 0), src);
4488 if (GET_MODE (src) == DImode)
4490 emit_insn (gen_mov_sdl (dest, src, left));
4491 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
4495 emit_insn (gen_mov_swl (dest, src, left));
4496 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
4501 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
4502 source of an "ext" instruction or the destination of an "ins"
4503 instruction. OP must be a register operand and the following
4504 conditions must hold:
4506 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
4507 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4508 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4510 Also reject lengths equal to a word as they are better handled
4511 by the move patterns. */
4514 mips_use_ins_ext_p (rtx op, rtx size, rtx position)
4516 HOST_WIDE_INT len, pos;
4518 if (!ISA_HAS_EXT_INS
4519 || !register_operand (op, VOIDmode)
4520 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
4523 len = INTVAL (size);
4524 pos = INTVAL (position);
4526 if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
4527 || pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
4533 /* Set up globals to generate code for the ISA or processor
4534 described by INFO. */
4537 mips_set_architecture (const struct mips_cpu_info *info)
4541 mips_arch_info = info;
4542 mips_arch = info->cpu;
4543 mips_isa = info->isa;
4548 /* Likewise for tuning. */
4551 mips_set_tune (const struct mips_cpu_info *info)
4555 mips_tune_info = info;
4556 mips_tune = info->cpu;
4560 /* Implement TARGET_HANDLE_OPTION. */
4563 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
4568 if (strcmp (arg, "32") == 0)
4570 else if (strcmp (arg, "o64") == 0)
4572 else if (strcmp (arg, "n32") == 0)
4574 else if (strcmp (arg, "64") == 0)
4576 else if (strcmp (arg, "eabi") == 0)
4577 mips_abi = ABI_EABI;
4584 return mips_parse_cpu (arg) != 0;
4587 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
4588 return mips_isa_info != 0;
4590 case OPT_mno_flush_func:
4591 mips_cache_flush_func = NULL;
4599 /* Set up the threshold for data to go into the small data area, instead
4600 of the normal data area, and detect any conflicts in the switches. */
4603 override_options (void)
4605 int i, start, regno;
4606 enum machine_mode mode;
4608 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
4610 /* The following code determines the architecture and register size.
4611 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
4612 The GAS and GCC code should be kept in sync as much as possible. */
4614 if (mips_arch_string != 0)
4615 mips_set_architecture (mips_parse_cpu (mips_arch_string));
4617 if (mips_isa_info != 0)
4619 if (mips_arch_info == 0)
4620 mips_set_architecture (mips_isa_info);
4621 else if (mips_arch_info->isa != mips_isa_info->isa)
4622 error ("-%s conflicts with the other architecture options, "
4623 "which specify a %s processor",
4624 mips_isa_info->name,
4625 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
4628 if (mips_arch_info == 0)
4630 #ifdef MIPS_CPU_STRING_DEFAULT
4631 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
4633 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
4637 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
4638 error ("-march=%s is not compatible with the selected ABI",
4639 mips_arch_info->name);
4641 /* Optimize for mips_arch, unless -mtune selects a different processor. */
4642 if (mips_tune_string != 0)
4643 mips_set_tune (mips_parse_cpu (mips_tune_string));
4645 if (mips_tune_info == 0)
4646 mips_set_tune (mips_arch_info);
4648 /* Set cost structure for the processor. */
4649 mips_cost = &mips_rtx_cost_data[mips_tune];
4651 if ((target_flags_explicit & MASK_64BIT) != 0)
4653 /* The user specified the size of the integer registers. Make sure
4654 it agrees with the ABI and ISA. */
4655 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
4656 error ("-mgp64 used with a 32-bit processor");
4657 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
4658 error ("-mgp32 used with a 64-bit ABI");
4659 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
4660 error ("-mgp64 used with a 32-bit ABI");
4664 /* Infer the integer register size from the ABI and processor.
4665 Restrict ourselves to 32-bit registers if that's all the
4666 processor has, or if the ABI cannot handle 64-bit registers. */
4667 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
4668 target_flags &= ~MASK_64BIT;
4670 target_flags |= MASK_64BIT;
4673 if ((target_flags_explicit & MASK_FLOAT64) != 0)
4675 /* Really, -mfp32 and -mfp64 are ornamental options. There's
4676 only one right answer here. */
4677 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
4678 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
4679 else if (!TARGET_64BIT && TARGET_FLOAT64)
4680 error ("unsupported combination: %s", "-mgp32 -mfp64");
4681 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
4682 error ("unsupported combination: %s", "-mfp64 -msingle-float");
4686 /* -msingle-float selects 32-bit float registers. Otherwise the
4687 float registers should be the same size as the integer ones. */
4688 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
4689 target_flags |= MASK_FLOAT64;
4691 target_flags &= ~MASK_FLOAT64;
4694 /* End of code shared with GAS. */
4696 if ((target_flags_explicit & MASK_LONG64) == 0)
4698 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
4699 target_flags |= MASK_LONG64;
4701 target_flags &= ~MASK_LONG64;
4704 if (MIPS_MARCH_CONTROLS_SOFT_FLOAT
4705 && (target_flags_explicit & MASK_SOFT_FLOAT) == 0)
4707 /* For some configurations, it is useful to have -march control
4708 the default setting of MASK_SOFT_FLOAT. */
4709 switch ((int) mips_arch)
4711 case PROCESSOR_R4100:
4712 case PROCESSOR_R4111:
4713 case PROCESSOR_R4120:
4714 case PROCESSOR_R4130:
4715 target_flags |= MASK_SOFT_FLOAT;
4719 target_flags &= ~MASK_SOFT_FLOAT;
4725 flag_pcc_struct_return = 0;
4727 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
4729 /* If neither -mbranch-likely nor -mno-branch-likely was given
4730 on the command line, set MASK_BRANCHLIKELY based on the target
4733 By default, we enable use of Branch Likely instructions on
4734 all architectures which support them with the following
4735 exceptions: when creating MIPS32 or MIPS64 code, and when
4736 tuning for architectures where their use tends to hurt
4739 The MIPS32 and MIPS64 architecture specifications say "Software
4740 is strongly encouraged to avoid use of Branch Likely
4741 instructions, as they will be removed from a future revision
4742 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
4743 issue those instructions unless instructed to do so by
4745 if (ISA_HAS_BRANCHLIKELY
4746 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
4747 && !(TUNE_MIPS5500 || TUNE_SB1))
4748 target_flags |= MASK_BRANCHLIKELY;
4750 target_flags &= ~MASK_BRANCHLIKELY;
4752 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
4753 warning (0, "generation of Branch Likely instructions enabled, but not supported by architecture");
4755 /* The effect of -mabicalls isn't defined for the EABI. */
4756 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
4758 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
4759 target_flags &= ~MASK_ABICALLS;
4762 if (TARGET_ABICALLS)
4764 /* We need to set flag_pic for executables as well as DSOs
4765 because we may reference symbols that are not defined in
4766 the final executable. (MIPS does not use things like
4767 copy relocs, for example.)
4769 Also, there is a body of code that uses __PIC__ to distinguish
4770 between -mabicalls and -mno-abicalls code. */
4772 if (mips_section_threshold > 0)
4773 warning (0, "%<-G%> is incompatible with %<-mabicalls%>");
4776 /* mips_split_addresses is a half-way house between explicit
4777 relocations and the traditional assembler macros. It can
4778 split absolute 32-bit symbolic constants into a high/lo_sum
4779 pair but uses macros for other sorts of access.
4781 Like explicit relocation support for REL targets, it relies
4782 on GNU extensions in the assembler and the linker.
4784 Although this code should work for -O0, it has traditionally
4785 been treated as an optimization. */
4786 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
4787 && optimize && !flag_pic
4788 && !ABI_HAS_64BIT_SYMBOLS)
4789 mips_split_addresses = 1;
4791 mips_split_addresses = 0;
4793 /* -mvr4130-align is a "speed over size" optimization: it usually produces
4794 faster code, but at the expense of more nops. Enable it at -O3 and
4796 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
4797 target_flags |= MASK_VR4130_ALIGN;
4799 /* When compiling for the mips16, we cannot use floating point. We
4800 record the original hard float value in mips16_hard_float. */
4803 if (TARGET_SOFT_FLOAT)
4804 mips16_hard_float = 0;
4806 mips16_hard_float = 1;
4807 target_flags |= MASK_SOFT_FLOAT;
4809 /* Don't run the scheduler before reload, since it tends to
4810 increase register pressure. */
4811 flag_schedule_insns = 0;
4813 /* Don't do hot/cold partitioning. The constant layout code expects
4814 the whole function to be in a single section. */
4815 flag_reorder_blocks_and_partition = 0;
4817 /* Silently disable -mexplicit-relocs since it doesn't apply
4818 to mips16 code. Even so, it would overly pedantic to warn
4819 about "-mips16 -mexplicit-relocs", especially given that
4820 we use a %gprel() operator. */
4821 target_flags &= ~MASK_EXPLICIT_RELOCS;
4824 /* When using explicit relocs, we call dbr_schedule from within
4826 if (TARGET_EXPLICIT_RELOCS)
4828 mips_flag_delayed_branch = flag_delayed_branch;
4829 flag_delayed_branch = 0;
4832 #ifdef MIPS_TFMODE_FORMAT
4833 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
4836 /* Make sure that the user didn't turn off paired single support when
4837 MIPS-3D support is requested. */
4838 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
4839 && !TARGET_PAIRED_SINGLE_FLOAT)
4840 error ("-mips3d requires -mpaired-single");
4842 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
4844 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
4846 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
4847 and TARGET_HARD_FLOAT are both true. */
4848 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
4849 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
4851 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
4853 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
4854 error ("-mips3d/-mpaired-single must be used with -mips64");
4856 if (TARGET_MIPS16 && TARGET_DSP)
4857 error ("-mips16 and -mdsp cannot be used together");
4859 mips_print_operand_punct['?'] = 1;
4860 mips_print_operand_punct['#'] = 1;
4861 mips_print_operand_punct['/'] = 1;
4862 mips_print_operand_punct['&'] = 1;
4863 mips_print_operand_punct['!'] = 1;
4864 mips_print_operand_punct['*'] = 1;
4865 mips_print_operand_punct['@'] = 1;
4866 mips_print_operand_punct['.'] = 1;
4867 mips_print_operand_punct['('] = 1;
4868 mips_print_operand_punct[')'] = 1;
4869 mips_print_operand_punct['['] = 1;
4870 mips_print_operand_punct[']'] = 1;
4871 mips_print_operand_punct['<'] = 1;
4872 mips_print_operand_punct['>'] = 1;
4873 mips_print_operand_punct['{'] = 1;
4874 mips_print_operand_punct['}'] = 1;
4875 mips_print_operand_punct['^'] = 1;
4876 mips_print_operand_punct['$'] = 1;
4877 mips_print_operand_punct['+'] = 1;
4878 mips_print_operand_punct['~'] = 1;
4880 /* Set up array to map GCC register number to debug register number.
4881 Ignore the special purpose register numbers. */
4883 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4884 mips_dbx_regno[i] = -1;
4886 start = GP_DBX_FIRST - GP_REG_FIRST;
4887 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
4888 mips_dbx_regno[i] = i + start;
4890 start = FP_DBX_FIRST - FP_REG_FIRST;
4891 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
4892 mips_dbx_regno[i] = i + start;
4894 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
4895 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
4897 /* Set up array giving whether a given register can hold a given mode. */
4899 for (mode = VOIDmode;
4900 mode != MAX_MACHINE_MODE;
4901 mode = (enum machine_mode) ((int)mode + 1))
4903 register int size = GET_MODE_SIZE (mode);
4904 register enum mode_class class = GET_MODE_CLASS (mode);
4906 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
4910 if (mode == CCV2mode)
4913 && (regno - ST_REG_FIRST) % 2 == 0);
4915 else if (mode == CCV4mode)
4918 && (regno - ST_REG_FIRST) % 4 == 0);
4920 else if (mode == CCmode)
4923 temp = (regno == FPSW_REGNUM);
4925 temp = (ST_REG_P (regno) || GP_REG_P (regno)
4926 || FP_REG_P (regno));
4929 else if (GP_REG_P (regno))
4930 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
4932 else if (FP_REG_P (regno))
4933 temp = ((regno % FP_INC) == 0)
4934 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
4935 || class == MODE_VECTOR_FLOAT)
4936 && size <= UNITS_PER_FPVALUE)
4937 /* Allow integer modes that fit into a single
4938 register. We need to put integers into FPRs
4939 when using instructions like cvt and trunc.
4940 We can't allow sizes smaller than a word,
4941 the FPU has no appropriate load/store
4942 instructions for those. */
4943 || (class == MODE_INT
4944 && size >= MIN_UNITS_PER_WORD
4945 && size <= UNITS_PER_FPREG)
4946 /* Allow TFmode for CCmode reloads. */
4947 || (ISA_HAS_8CC && mode == TFmode));
4949 else if (ACC_REG_P (regno))
4950 temp = (INTEGRAL_MODE_P (mode)
4951 && (size <= UNITS_PER_WORD
4952 || (ACC_HI_REG_P (regno)
4953 && size == 2 * UNITS_PER_WORD)));
4955 else if (ALL_COP_REG_P (regno))
4956 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
4960 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
4964 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
4965 initialized yet, so we can't use that here. */
4966 gpr_mode = TARGET_64BIT ? DImode : SImode;
4968 /* Provide default values for align_* for 64-bit targets. */
4969 if (TARGET_64BIT && !TARGET_MIPS16)
4971 if (align_loops == 0)
4973 if (align_jumps == 0)
4975 if (align_functions == 0)
4976 align_functions = 8;
4979 /* Function to allocate machine-dependent function status. */
4980 init_machine_status = &mips_init_machine_status;
4982 if (ABI_HAS_64BIT_SYMBOLS)
4984 if (TARGET_EXPLICIT_RELOCS)
4986 mips_split_p[SYMBOL_64_HIGH] = true;
4987 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
4988 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
4990 mips_split_p[SYMBOL_64_MID] = true;
4991 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
4992 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
4994 mips_split_p[SYMBOL_64_LOW] = true;
4995 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
4996 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
4998 mips_split_p[SYMBOL_GENERAL] = true;
4999 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
5004 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses)
5006 mips_split_p[SYMBOL_GENERAL] = true;
5007 mips_hi_relocs[SYMBOL_GENERAL] = "%hi(";
5008 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
5014 /* The high part is provided by a pseudo copy of $gp. */
5015 mips_split_p[SYMBOL_SMALL_DATA] = true;
5016 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gprel(";
5019 if (TARGET_EXPLICIT_RELOCS)
5021 /* Small data constants are kept whole until after reload,
5022 then lowered by mips_rewrite_small_data. */
5023 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gp_rel(";
5025 mips_split_p[SYMBOL_GOT_LOCAL] = true;
5028 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
5029 mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%got_ofst(";
5033 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
5034 mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%lo(";
5039 /* The HIGH and LO_SUM are matched by special .md patterns. */
5040 mips_split_p[SYMBOL_GOT_GLOBAL] = true;
5042 mips_split_p[SYMBOL_GOTOFF_GLOBAL] = true;
5043 mips_hi_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_hi(";
5044 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_lo(";
5046 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
5047 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
5048 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
5053 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_disp(";
5055 mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got(";
5056 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
5062 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
5063 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
5064 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
5067 /* Thread-local relocation operators. */
5068 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
5069 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
5070 mips_split_p[SYMBOL_DTPREL] = 1;
5071 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
5072 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
5073 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
5074 mips_split_p[SYMBOL_TPREL] = 1;
5075 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
5076 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
5078 /* We don't have a thread pointer access instruction on MIPS16, or
5079 appropriate TLS relocations. */
5081 targetm.have_tls = false;
5083 /* Default to working around R4000 errata only if the processor
5084 was selected explicitly. */
5085 if ((target_flags_explicit & MASK_FIX_R4000) == 0
5086 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
5087 target_flags |= MASK_FIX_R4000;
5089 /* Default to working around R4400 errata only if the processor
5090 was selected explicitly. */
5091 if ((target_flags_explicit & MASK_FIX_R4400) == 0
5092 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
5093 target_flags |= MASK_FIX_R4400;
5096 /* Implement CONDITIONAL_REGISTER_USAGE. */
5099 mips_conditional_register_usage (void)
5105 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
5106 fixed_regs[regno] = call_used_regs[regno] = 1;
5108 if (!TARGET_HARD_FLOAT)
5112 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
5113 fixed_regs[regno] = call_used_regs[regno] = 1;
5114 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5115 fixed_regs[regno] = call_used_regs[regno] = 1;
5117 else if (! ISA_HAS_8CC)
5121 /* We only have a single condition code register. We
5122 implement this by hiding all the condition code registers,
5123 and generating RTL that refers directly to ST_REG_FIRST. */
5124 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5125 fixed_regs[regno] = call_used_regs[regno] = 1;
5127 /* In mips16 mode, we permit the $t temporary registers to be used
5128 for reload. We prohibit the unused $s registers, since they
5129 are caller saved, and saving them via a mips16 register would
5130 probably waste more time than just reloading the value. */
5133 fixed_regs[18] = call_used_regs[18] = 1;
5134 fixed_regs[19] = call_used_regs[19] = 1;
5135 fixed_regs[20] = call_used_regs[20] = 1;
5136 fixed_regs[21] = call_used_regs[21] = 1;
5137 fixed_regs[22] = call_used_regs[22] = 1;
5138 fixed_regs[23] = call_used_regs[23] = 1;
5139 fixed_regs[26] = call_used_regs[26] = 1;
5140 fixed_regs[27] = call_used_regs[27] = 1;
5141 fixed_regs[30] = call_used_regs[30] = 1;
5143 /* fp20-23 are now caller saved. */
5144 if (mips_abi == ABI_64)
5147 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
5148 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5150 /* Odd registers from fp21 to fp31 are now caller saved. */
5151 if (mips_abi == ABI_N32)
5154 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
5155 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5159 /* Allocate a chunk of memory for per-function machine-dependent data. */
5160 static struct machine_function *
5161 mips_init_machine_status (void)
5163 return ((struct machine_function *)
5164 ggc_alloc_cleared (sizeof (struct machine_function)));
5167 /* On the mips16, we want to allocate $24 (T_REG) before other
5168 registers for instructions for which it is possible. This helps
5169 avoid shuffling registers around in order to set up for an xor,
5170 encouraging the compiler to use a cmp instead. */
5173 mips_order_regs_for_local_alloc (void)
5177 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5178 reg_alloc_order[i] = i;
5182 /* It really doesn't matter where we put register 0, since it is
5183 a fixed register anyhow. */
5184 reg_alloc_order[0] = 24;
5185 reg_alloc_order[24] = 0;
5190 /* The MIPS debug format wants all automatic variables and arguments
5191 to be in terms of the virtual frame pointer (stack pointer before
5192 any adjustment in the function), while the MIPS 3.0 linker wants
5193 the frame pointer to be the stack pointer after the initial
5194 adjustment. So, we do the adjustment here. The arg pointer (which
5195 is eliminated) points to the virtual frame pointer, while the frame
5196 pointer (which may be eliminated) points to the stack pointer after
5197 the initial adjustments. */
5200 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
5202 rtx offset2 = const0_rtx;
5203 rtx reg = eliminate_constant_term (addr, &offset2);
5206 offset = INTVAL (offset2);
5208 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
5209 || reg == hard_frame_pointer_rtx)
5211 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
5212 ? compute_frame_size (get_frame_size ())
5213 : cfun->machine->frame.total_size;
5215 /* MIPS16 frame is smaller */
5216 if (frame_pointer_needed && TARGET_MIPS16)
5217 frame_size -= cfun->machine->frame.args_size;
5219 offset = offset - frame_size;
5222 /* sdbout_parms does not want this to crash for unrecognized cases. */
5224 else if (reg != arg_pointer_rtx)
5225 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
5232 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
5234 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
5235 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
5236 'h' OP is HIGH, prints %hi(X),
5237 'd' output integer constant in decimal,
5238 'z' if the operand is 0, use $0 instead of normal operand.
5239 'D' print second part of double-word register or memory operand.
5240 'L' print low-order register of double-word register operand.
5241 'M' print high-order register of double-word register operand.
5242 'C' print part of opcode for a branch condition.
5243 'F' print part of opcode for a floating-point branch condition.
5244 'N' print part of opcode for a branch condition, inverted.
5245 'W' print part of opcode for a floating-point branch condition, inverted.
5246 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
5247 'z' for (eq:?I ...), 'n' for (ne:?I ...).
5248 't' like 'T', but with the EQ/NE cases reversed
5249 'Y' for a CONST_INT X, print mips_fp_conditions[X]
5250 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
5251 'R' print the reloc associated with LO_SUM
5252 'q' print DSP accumulator registers
5254 The punctuation characters are:
5256 '(' Turn on .set noreorder
5257 ')' Turn on .set reorder
5258 '[' Turn on .set noat
5260 '<' Turn on .set nomacro
5261 '>' Turn on .set macro
5262 '{' Turn on .set volatile (not GAS)
5263 '}' Turn on .set novolatile (not GAS)
5264 '&' Turn on .set noreorder if filling delay slots
5265 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
5266 '!' Turn on .set nomacro if filling delay slots
5267 '#' Print nop if in a .set noreorder section.
5268 '/' Like '#', but does nothing within a delayed branch sequence
5269 '?' Print 'l' if we are to use a branch likely instead of normal branch.
5270 '@' Print the name of the assembler temporary register (at or $1).
5271 '.' Print the name of the register with a hard-wired zero (zero or $0).
5272 '^' Print the name of the pic call-through register (t9 or $25).
5273 '$' Print the name of the stack pointer register (sp or $29).
5274 '+' Print the name of the gp register (usually gp or $28).
5275 '~' Output a branch alignment to LABEL_ALIGN(NULL). */
5278 print_operand (FILE *file, rtx op, int letter)
5280 register enum rtx_code code;
5282 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
5287 if (mips_branch_likely)
5292 fputs (reg_names [GP_REG_FIRST + 1], file);
5296 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
5300 fputs (reg_names [GP_REG_FIRST + 0], file);
5304 fputs (reg_names[STACK_POINTER_REGNUM], file);
5308 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
5312 if (final_sequence != 0 && set_noreorder++ == 0)
5313 fputs (".set\tnoreorder\n\t", file);
5317 if (final_sequence != 0)
5319 if (set_noreorder++ == 0)
5320 fputs (".set\tnoreorder\n\t", file);
5322 if (set_nomacro++ == 0)
5323 fputs (".set\tnomacro\n\t", file);
5328 if (final_sequence != 0 && set_nomacro++ == 0)
5329 fputs ("\n\t.set\tnomacro", file);
5333 if (set_noreorder != 0)
5334 fputs ("\n\tnop", file);
5338 /* Print an extra newline so that the delayed insn is separated
5339 from the following ones. This looks neater and is consistent
5340 with non-nop delayed sequences. */
5341 if (set_noreorder != 0 && final_sequence == 0)
5342 fputs ("\n\tnop\n", file);
5346 if (set_noreorder++ == 0)
5347 fputs (".set\tnoreorder\n\t", file);
5351 if (set_noreorder == 0)
5352 error ("internal error: %%) found without a %%( in assembler pattern");
5354 else if (--set_noreorder == 0)
5355 fputs ("\n\t.set\treorder", file);
5360 if (set_noat++ == 0)
5361 fputs (".set\tnoat\n\t", file);
5366 error ("internal error: %%] found without a %%[ in assembler pattern");
5367 else if (--set_noat == 0)
5368 fputs ("\n\t.set\tat", file);
5373 if (set_nomacro++ == 0)
5374 fputs (".set\tnomacro\n\t", file);
5378 if (set_nomacro == 0)
5379 error ("internal error: %%> found without a %%< in assembler pattern");
5380 else if (--set_nomacro == 0)
5381 fputs ("\n\t.set\tmacro", file);
5386 if (set_volatile++ == 0)
5387 fputs ("#.set\tvolatile\n\t", file);
5391 if (set_volatile == 0)
5392 error ("internal error: %%} found without a %%{ in assembler pattern");
5393 else if (--set_volatile == 0)
5394 fputs ("\n\t#.set\tnovolatile", file);
5400 if (align_labels_log > 0)
5401 ASM_OUTPUT_ALIGN (file, align_labels_log);
5406 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
5415 error ("PRINT_OPERAND null pointer");
5419 code = GET_CODE (op);
5424 case EQ: fputs ("eq", file); break;
5425 case NE: fputs ("ne", file); break;
5426 case GT: fputs ("gt", file); break;
5427 case GE: fputs ("ge", file); break;
5428 case LT: fputs ("lt", file); break;
5429 case LE: fputs ("le", file); break;
5430 case GTU: fputs ("gtu", file); break;
5431 case GEU: fputs ("geu", file); break;
5432 case LTU: fputs ("ltu", file); break;
5433 case LEU: fputs ("leu", file); break;
5435 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
5438 else if (letter == 'N')
5441 case EQ: fputs ("ne", file); break;
5442 case NE: fputs ("eq", file); break;
5443 case GT: fputs ("le", file); break;
5444 case GE: fputs ("lt", file); break;
5445 case LT: fputs ("ge", file); break;
5446 case LE: fputs ("gt", file); break;
5447 case GTU: fputs ("leu", file); break;
5448 case GEU: fputs ("ltu", file); break;
5449 case LTU: fputs ("geu", file); break;
5450 case LEU: fputs ("gtu", file); break;
5452 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
5455 else if (letter == 'F')
5458 case EQ: fputs ("c1f", file); break;
5459 case NE: fputs ("c1t", file); break;
5461 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
5464 else if (letter == 'W')
5467 case EQ: fputs ("c1t", file); break;
5468 case NE: fputs ("c1f", file); break;
5470 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
5473 else if (letter == 'h')
5475 if (GET_CODE (op) == HIGH)
5478 print_operand_reloc (file, op, mips_hi_relocs);
5481 else if (letter == 'R')
5482 print_operand_reloc (file, op, mips_lo_relocs);
5484 else if (letter == 'Y')
5486 if (GET_CODE (op) == CONST_INT
5487 && ((unsigned HOST_WIDE_INT) INTVAL (op)
5488 < ARRAY_SIZE (mips_fp_conditions)))
5489 fputs (mips_fp_conditions[INTVAL (op)], file);
5491 output_operand_lossage ("invalid %%Y value");
5494 else if (letter == 'Z')
5498 print_operand (file, op, 0);
5503 else if (letter == 'q')
5508 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
5510 regnum = REGNO (op);
5511 if (MD_REG_P (regnum))
5512 fprintf (file, "$ac0");
5513 else if (DSP_ACC_REG_P (regnum))
5514 fprintf (file, "$ac%c", reg_names[regnum][3]);
5516 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
5519 else if (code == REG || code == SUBREG)
5521 register int regnum;
5524 regnum = REGNO (op);
5526 regnum = true_regnum (op);
5528 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
5529 || (letter == 'L' && WORDS_BIG_ENDIAN)
5533 fprintf (file, "%s", reg_names[regnum]);
5536 else if (code == MEM)
5539 output_address (plus_constant (XEXP (op, 0), 4));
5541 output_address (XEXP (op, 0));
5544 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
5545 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
5547 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
5548 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
5550 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
5551 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
5553 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
5554 fputs (reg_names[GP_REG_FIRST], file);
5556 else if (letter == 'd' || letter == 'x' || letter == 'X')
5557 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
5559 else if (letter == 'T' || letter == 't')
5561 int truth = (code == NE) == (letter == 'T');
5562 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
5565 else if (CONST_GP_P (op))
5566 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
5569 output_addr_const (file, op);
5573 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM.
5574 RELOCS is the array of relocations to use. */
5577 print_operand_reloc (FILE *file, rtx op, const char **relocs)
5579 enum mips_symbol_type symbol_type;
5582 HOST_WIDE_INT offset;
5584 if (!mips_symbolic_constant_p (op, &symbol_type) || relocs[symbol_type] == 0)
5585 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
5587 /* If OP uses an UNSPEC address, we want to print the inner symbol. */
5588 mips_split_const (op, &base, &offset);
5589 if (UNSPEC_ADDRESS_P (base))
5590 op = plus_constant (UNSPEC_ADDRESS (base), offset);
5592 fputs (relocs[symbol_type], file);
5593 output_addr_const (file, op);
5594 for (p = relocs[symbol_type]; *p != 0; p++)
5599 /* Output address operand X to FILE. */
5602 print_operand_address (FILE *file, rtx x)
5604 struct mips_address_info addr;
5606 if (mips_classify_address (&addr, x, word_mode, true))
5610 print_operand (file, addr.offset, 0);
5611 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
5614 case ADDRESS_LO_SUM:
5615 print_operand (file, addr.offset, 'R');
5616 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
5619 case ADDRESS_CONST_INT:
5620 output_addr_const (file, x);
5621 fprintf (file, "(%s)", reg_names[0]);
5624 case ADDRESS_SYMBOLIC:
5625 output_addr_const (file, x);
5631 /* When using assembler macros, keep track of all of small-data externs
5632 so that mips_file_end can emit the appropriate declarations for them.
5634 In most cases it would be safe (though pointless) to emit .externs
5635 for other symbols too. One exception is when an object is within
5636 the -G limit but declared by the user to be in a section other
5637 than .sbss or .sdata. */
5640 mips_output_external (FILE *file ATTRIBUTE_UNUSED, tree decl, const char *name)
5642 register struct extern_list *p;
5644 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
5646 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5647 p->next = extern_head;
5649 p->size = int_size_in_bytes (TREE_TYPE (decl));
5653 if (TARGET_IRIX && mips_abi == ABI_32 && TREE_CODE (decl) == FUNCTION_DECL)
5655 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5656 p->next = extern_head;
5667 irix_output_external_libcall (rtx fun)
5669 register struct extern_list *p;
5671 if (mips_abi == ABI_32)
5673 p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5674 p->next = extern_head;
5675 p->name = XSTR (fun, 0);
5682 /* Emit a new filename to a stream. If we are smuggling stabs, try to
5683 put out a MIPS ECOFF file and a stab. */
5686 mips_output_filename (FILE *stream, const char *name)
5689 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
5691 if (write_symbols == DWARF2_DEBUG)
5693 else if (mips_output_filename_first_time)
5695 mips_output_filename_first_time = 0;
5696 num_source_filenames += 1;
5697 current_function_file = name;
5698 fprintf (stream, "\t.file\t%d ", num_source_filenames);
5699 output_quoted_string (stream, name);
5700 putc ('\n', stream);
5703 /* If we are emitting stabs, let dbxout.c handle this (except for
5704 the mips_output_filename_first_time case). */
5705 else if (write_symbols == DBX_DEBUG)
5708 else if (name != current_function_file
5709 && strcmp (name, current_function_file) != 0)
5711 num_source_filenames += 1;
5712 current_function_file = name;
5713 fprintf (stream, "\t.file\t%d ", num_source_filenames);
5714 output_quoted_string (stream, name);
5715 putc ('\n', stream);
5719 /* Output an ASCII string, in a space-saving way. PREFIX is the string
5720 that should be written before the opening quote, such as "\t.ascii\t"
5721 for real string data or "\t# " for a comment. */
5724 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
5729 register const unsigned char *string =
5730 (const unsigned char *)string_param;
5732 fprintf (stream, "%s\"", prefix);
5733 for (i = 0; i < len; i++)
5735 register int c = string[i];
5739 if (c == '\\' || c == '\"')
5741 putc ('\\', stream);
5749 fprintf (stream, "\\%03o", c);
5753 if (cur_pos > 72 && i+1 < len)
5756 fprintf (stream, "\"\n%s\"", prefix);
5759 fprintf (stream, "\"\n");
5762 /* Implement TARGET_ASM_FILE_START. */
5765 mips_file_start (void)
5767 default_file_start ();
5771 /* Generate a special section to describe the ABI switches used to
5772 produce the resultant binary. This used to be done by the assembler
5773 setting bits in the ELF header's flags field, but we have run out of
5774 bits. GDB needs this information in order to be able to correctly
5775 debug these binaries. See the function mips_gdbarch_init() in
5776 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
5777 causes unnecessary IRIX 6 ld warnings. */
5778 const char * abi_string = NULL;
5782 case ABI_32: abi_string = "abi32"; break;
5783 case ABI_N32: abi_string = "abiN32"; break;
5784 case ABI_64: abi_string = "abi64"; break;
5785 case ABI_O64: abi_string = "abiO64"; break;
5786 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
5790 /* Note - we use fprintf directly rather than calling switch_to_section
5791 because in this way we can avoid creating an allocated section. We
5792 do not want this section to take up any space in the running
5794 fprintf (asm_out_file, "\t.section .mdebug.%s\n", abi_string);
5796 /* There is no ELF header flag to distinguish long32 forms of the
5797 EABI from long64 forms. Emit a special section to help tools
5799 if (mips_abi == ABI_EABI)
5800 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n",
5801 TARGET_LONG64 ? 64 : 32);
5803 /* Restore the default section. */
5804 fprintf (asm_out_file, "\t.previous\n");
5807 /* Generate the pseudo ops that System V.4 wants. */
5808 if (TARGET_ABICALLS)
5809 fprintf (asm_out_file, "\t.abicalls\n");
5812 fprintf (asm_out_file, "\t.set\tmips16\n");
5814 if (flag_verbose_asm)
5815 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
5817 mips_section_threshold, mips_arch_info->name, mips_isa);
5820 #ifdef BSS_SECTION_ASM_OP
5821 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
5822 in the use of sbss. */
5825 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
5826 unsigned HOST_WIDE_INT size, int align)
5828 extern tree last_assemble_variable_decl;
5830 if (mips_in_small_data_p (decl))
5831 switch_to_section (get_named_section (NULL, ".sbss", 0));
5833 switch_to_section (bss_section);
5834 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
5835 last_assemble_variable_decl = decl;
5836 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
5837 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
5841 /* Implement TARGET_ASM_FILE_END. When using assembler macros, emit
5842 .externs for any small-data variables that turned out to be external. */
5845 mips_file_end (void)
5848 struct extern_list *p;
5852 fputs ("\n", asm_out_file);
5854 for (p = extern_head; p != 0; p = p->next)
5856 name_tree = get_identifier (p->name);
5858 /* Positively ensure only one .extern for any given symbol. */
5859 if (!TREE_ASM_WRITTEN (name_tree)
5860 && TREE_SYMBOL_REFERENCED (name_tree))
5862 TREE_ASM_WRITTEN (name_tree) = 1;
5863 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
5864 `.global name .text' directive for every used but
5865 undefined function. If we don't, the linker may perform
5866 an optimization (skipping over the insns that set $gp)
5867 when it is unsafe. */
5868 if (TARGET_IRIX && mips_abi == ABI_32 && p->size == -1)
5870 fputs ("\t.globl ", asm_out_file);
5871 assemble_name (asm_out_file, p->name);
5872 fputs (" .text\n", asm_out_file);
5876 fputs ("\t.extern\t", asm_out_file);
5877 assemble_name (asm_out_file, p->name);
5878 fprintf (asm_out_file, ", %d\n", p->size);
5885 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
5886 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
5889 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
5890 unsigned HOST_WIDE_INT size,
5893 /* If the target wants uninitialized const declarations in
5894 .rdata then don't put them in .comm. */
5895 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
5896 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
5897 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
5899 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
5900 targetm.asm_out.globalize_label (stream, name);
5902 switch_to_section (readonly_data_section);
5903 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
5904 mips_declare_object (stream, name, "",
5905 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
5909 mips_declare_common_object (stream, name, "\n\t.comm\t",
5913 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
5914 NAME is the name of the object and ALIGN is the required alignment
5915 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
5916 alignment argument. */
5919 mips_declare_common_object (FILE *stream, const char *name,
5920 const char *init_string,
5921 unsigned HOST_WIDE_INT size,
5922 unsigned int align, bool takes_alignment_p)
5924 if (!takes_alignment_p)
5926 size += (align / BITS_PER_UNIT) - 1;
5927 size -= size % (align / BITS_PER_UNIT);
5928 mips_declare_object (stream, name, init_string,
5929 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
5932 mips_declare_object (stream, name, init_string,
5933 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
5934 size, align / BITS_PER_UNIT);
5937 /* Emit either a label, .comm, or .lcomm directive. When using assembler
5938 macros, mark the symbol as written so that mips_file_end won't emit an
5939 .extern for it. STREAM is the output file, NAME is the name of the
5940 symbol, INIT_STRING is the string that should be written before the
5941 symbol and FINAL_STRING is the string that should be written after it.
5942 FINAL_STRING is a printf() format that consumes the remaining arguments. */
5945 mips_declare_object (FILE *stream, const char *name, const char *init_string,
5946 const char *final_string, ...)
5950 fputs (init_string, stream);
5951 assemble_name (stream, name);
5952 va_start (ap, final_string);
5953 vfprintf (stream, final_string, ap);
5956 if (!TARGET_EXPLICIT_RELOCS)
5958 tree name_tree = get_identifier (name);
5959 TREE_ASM_WRITTEN (name_tree) = 1;
5963 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
5964 extern int size_directive_output;
5966 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
5967 definitions except that it uses mips_declare_object() to emit the label. */
5970 mips_declare_object_name (FILE *stream, const char *name,
5971 tree decl ATTRIBUTE_UNUSED)
5973 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
5974 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
5977 size_directive_output = 0;
5978 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
5982 size_directive_output = 1;
5983 size = int_size_in_bytes (TREE_TYPE (decl));
5984 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
5987 mips_declare_object (stream, name, "", ":\n");
5990 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
5993 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
5997 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
5998 if (!flag_inhibit_size_directive
5999 && DECL_SIZE (decl) != 0
6000 && !at_end && top_level
6001 && DECL_INITIAL (decl) == error_mark_node
6002 && !size_directive_output)
6006 size_directive_output = 1;
6007 size = int_size_in_bytes (TREE_TYPE (decl));
6008 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6013 /* Return true if X is a small data address that can be rewritten
6017 mips_rewrite_small_data_p (rtx x)
6019 enum mips_symbol_type symbol_type;
6021 return (TARGET_EXPLICIT_RELOCS
6022 && mips_symbolic_constant_p (x, &symbol_type)
6023 && symbol_type == SYMBOL_SMALL_DATA);
6027 /* A for_each_rtx callback for mips_small_data_pattern_p. */
6030 mips_small_data_pattern_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
6032 if (GET_CODE (*loc) == LO_SUM)
6035 return mips_rewrite_small_data_p (*loc);
6038 /* Return true if OP refers to small data symbols directly, not through
6042 mips_small_data_pattern_p (rtx op)
6044 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
6047 /* A for_each_rtx callback, used by mips_rewrite_small_data. */
6050 mips_rewrite_small_data_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
6052 if (mips_rewrite_small_data_p (*loc))
6053 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
6055 if (GET_CODE (*loc) == LO_SUM)
6061 /* If possible, rewrite OP so that it refers to small data using
6062 explicit relocations. */
6065 mips_rewrite_small_data (rtx op)
6067 op = copy_insn (op);
6068 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
6072 /* Return true if the current function has an insn that implicitly
6076 mips_function_has_gp_insn (void)
6078 /* Don't bother rechecking if we found one last time. */
6079 if (!cfun->machine->has_gp_insn_p)
6083 push_topmost_sequence ();
6084 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6086 && GET_CODE (PATTERN (insn)) != USE
6087 && GET_CODE (PATTERN (insn)) != CLOBBER
6088 && (get_attr_got (insn) != GOT_UNSET
6089 || small_data_pattern (PATTERN (insn), VOIDmode)))
6091 pop_topmost_sequence ();
6093 cfun->machine->has_gp_insn_p = (insn != 0);
6095 return cfun->machine->has_gp_insn_p;
6099 /* Return the register that should be used as the global pointer
6100 within this function. Return 0 if the function doesn't need
6101 a global pointer. */
6104 mips_global_pointer (void)
6108 /* $gp is always available in non-abicalls code. */
6109 if (!TARGET_ABICALLS)
6110 return GLOBAL_POINTER_REGNUM;
6112 /* We must always provide $gp when it is used implicitly. */
6113 if (!TARGET_EXPLICIT_RELOCS)
6114 return GLOBAL_POINTER_REGNUM;
6116 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
6118 if (current_function_profile)
6119 return GLOBAL_POINTER_REGNUM;
6121 /* If the function has a nonlocal goto, $gp must hold the correct
6122 global pointer for the target function. */
6123 if (current_function_has_nonlocal_goto)
6124 return GLOBAL_POINTER_REGNUM;
6126 /* If the gp is never referenced, there's no need to initialize it.
6127 Note that reload can sometimes introduce constant pool references
6128 into a function that otherwise didn't need them. For example,
6129 suppose we have an instruction like:
6131 (set (reg:DF R1) (float:DF (reg:SI R2)))
6133 If R2 turns out to be constant such as 1, the instruction may have a
6134 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
6135 using this constant if R2 doesn't get allocated to a register.
6137 In cases like these, reload will have added the constant to the pool
6138 but no instruction will yet refer to it. */
6139 if (!regs_ever_live[GLOBAL_POINTER_REGNUM]
6140 && !current_function_uses_const_pool
6141 && !mips_function_has_gp_insn ())
6144 /* We need a global pointer, but perhaps we can use a call-clobbered
6145 register instead of $gp. */
6146 if (TARGET_NEWABI && current_function_is_leaf)
6147 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6148 if (!regs_ever_live[regno]
6149 && call_used_regs[regno]
6150 && !fixed_regs[regno]
6151 && regno != PIC_FUNCTION_ADDR_REGNUM)
6154 return GLOBAL_POINTER_REGNUM;
6158 /* Return true if the current function must save REGNO. */
6161 mips_save_reg_p (unsigned int regno)
6163 /* We only need to save $gp for NewABI PIC. */
6164 if (regno == GLOBAL_POINTER_REGNUM)
6165 return (TARGET_ABICALLS && TARGET_NEWABI
6166 && cfun->machine->global_pointer == regno);
6168 /* Check call-saved registers. */
6169 if (regs_ever_live[regno] && !call_used_regs[regno])
6172 /* We need to save the old frame pointer before setting up a new one. */
6173 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
6176 /* We need to save the incoming return address if it is ever clobbered
6177 within the function. */
6178 if (regno == GP_REG_FIRST + 31 && regs_ever_live[regno])
6185 return_type = DECL_RESULT (current_function_decl);
6187 /* $18 is a special case in mips16 code. It may be used to call
6188 a function which returns a floating point value, but it is
6189 marked in call_used_regs. */
6190 if (regno == GP_REG_FIRST + 18 && regs_ever_live[regno])
6193 /* $31 is also a special case. It will be used to copy a return
6194 value into the floating point registers if the return value is
6196 if (regno == GP_REG_FIRST + 31
6197 && mips16_hard_float
6198 && !aggregate_value_p (return_type, current_function_decl)
6199 && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
6200 && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
6208 /* Return the bytes needed to compute the frame pointer from the current
6209 stack pointer. SIZE is the size (in bytes) of the local variables.
6211 MIPS stack frames look like:
6213 Before call After call
6214 +-----------------------+ +-----------------------+
6217 | caller's temps. | | caller's temps. |
6219 +-----------------------+ +-----------------------+
6221 | arguments on stack. | | arguments on stack. |
6223 +-----------------------+ +-----------------------+
6224 | 4 words to save | | 4 words to save |
6225 | arguments passed | | arguments passed |
6226 | in registers, even | | in registers, even |
6227 SP->| if not passed. | VFP->| if not passed. |
6228 +-----------------------+ +-----------------------+
6230 | fp register save |
6232 +-----------------------+
6234 | gp register save |
6236 +-----------------------+
6240 +-----------------------+
6242 | alloca allocations |
6244 +-----------------------+
6246 | GP save for V.4 abi |
6248 +-----------------------+
6250 | arguments on stack |
6252 +-----------------------+
6254 | arguments passed |
6255 | in registers, even |
6256 low SP->| if not passed. |
6257 memory +-----------------------+
6262 compute_frame_size (HOST_WIDE_INT size)
6265 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
6266 HOST_WIDE_INT var_size; /* # bytes that variables take up */
6267 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
6268 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
6269 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
6270 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
6271 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
6272 unsigned int mask; /* mask of saved gp registers */
6273 unsigned int fmask; /* mask of saved fp registers */
6275 cfun->machine->global_pointer = mips_global_pointer ();
6281 var_size = MIPS_STACK_ALIGN (size);
6282 args_size = current_function_outgoing_args_size;
6283 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
6285 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
6286 functions. If the function has local variables, we're committed
6287 to allocating it anyway. Otherwise reclaim it here. */
6288 if (var_size == 0 && current_function_is_leaf)
6289 cprestore_size = args_size = 0;
6291 /* The MIPS 3.0 linker does not like functions that dynamically
6292 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
6293 looks like we are trying to create a second frame pointer to the
6294 function, so allocate some stack space to make it happy. */
6296 if (args_size == 0 && current_function_calls_alloca)
6297 args_size = 4 * UNITS_PER_WORD;
6299 total_size = var_size + args_size + cprestore_size;
6301 /* Calculate space needed for gp registers. */
6302 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6303 if (mips_save_reg_p (regno))
6305 gp_reg_size += GET_MODE_SIZE (gpr_mode);
6306 mask |= 1 << (regno - GP_REG_FIRST);
6309 /* We need to restore these for the handler. */
6310 if (current_function_calls_eh_return)
6315 regno = EH_RETURN_DATA_REGNO (i);
6316 if (regno == INVALID_REGNUM)
6318 gp_reg_size += GET_MODE_SIZE (gpr_mode);
6319 mask |= 1 << (regno - GP_REG_FIRST);
6323 /* This loop must iterate over the same space as its companion in
6324 save_restore_insns. */
6325 for (regno = (FP_REG_LAST - FP_INC + 1);
6326 regno >= FP_REG_FIRST;
6329 if (mips_save_reg_p (regno))
6331 fp_reg_size += FP_INC * UNITS_PER_FPREG;
6332 fmask |= ((1 << FP_INC) - 1) << (regno - FP_REG_FIRST);
6336 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
6337 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
6339 /* Add in the space required for saving incoming register arguments. */
6340 total_size += current_function_pretend_args_size;
6341 total_size += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
6343 /* Save other computed information. */
6344 cfun->machine->frame.total_size = total_size;
6345 cfun->machine->frame.var_size = var_size;
6346 cfun->machine->frame.args_size = args_size;
6347 cfun->machine->frame.cprestore_size = cprestore_size;
6348 cfun->machine->frame.gp_reg_size = gp_reg_size;
6349 cfun->machine->frame.fp_reg_size = fp_reg_size;
6350 cfun->machine->frame.mask = mask;
6351 cfun->machine->frame.fmask = fmask;
6352 cfun->machine->frame.initialized = reload_completed;
6353 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
6354 cfun->machine->frame.num_fp = fp_reg_size / (FP_INC * UNITS_PER_FPREG);
6358 HOST_WIDE_INT offset;
6360 offset = (args_size + cprestore_size + var_size
6361 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
6362 cfun->machine->frame.gp_sp_offset = offset;
6363 cfun->machine->frame.gp_save_offset = offset - total_size;
6367 cfun->machine->frame.gp_sp_offset = 0;
6368 cfun->machine->frame.gp_save_offset = 0;
6373 HOST_WIDE_INT offset;
6375 offset = (args_size + cprestore_size + var_size
6376 + gp_reg_rounded + fp_reg_size
6377 - FP_INC * UNITS_PER_FPREG);
6378 cfun->machine->frame.fp_sp_offset = offset;
6379 cfun->machine->frame.fp_save_offset = offset - total_size;
6383 cfun->machine->frame.fp_sp_offset = 0;
6384 cfun->machine->frame.fp_save_offset = 0;
6387 /* Ok, we're done. */
6391 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
6392 pointer or argument pointer. TO is either the stack pointer or
6393 hard frame pointer. */
6396 mips_initial_elimination_offset (int from, int to)
6398 HOST_WIDE_INT offset;
6400 compute_frame_size (get_frame_size ());
6402 /* Set OFFSET to the offset from the stack pointer. */
6405 case FRAME_POINTER_REGNUM:
6409 case ARG_POINTER_REGNUM:
6410 offset = (cfun->machine->frame.total_size
6411 - current_function_pretend_args_size);
6418 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
6419 offset -= cfun->machine->frame.args_size;
6424 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
6425 back to a previous frame. */
6427 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
6432 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
6435 /* Use FN to save or restore register REGNO. MODE is the register's
6436 mode and OFFSET is the offset of its save slot from the current
6440 mips_save_restore_reg (enum machine_mode mode, int regno,
6441 HOST_WIDE_INT offset, mips_save_restore_fn fn)
6445 mem = gen_rtx_MEM (mode, plus_constant (stack_pointer_rtx, offset));
6447 fn (gen_rtx_REG (mode, regno), mem);
6451 /* Call FN for each register that is saved by the current function.
6452 SP_OFFSET is the offset of the current stack pointer from the start
6456 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
6458 #define BITSET_P(VALUE, BIT) (((VALUE) & (1L << (BIT))) != 0)
6460 enum machine_mode fpr_mode;
6461 HOST_WIDE_INT offset;
6464 /* Save registers starting from high to low. The debuggers prefer at least
6465 the return register be stored at func+4, and also it allows us not to
6466 need a nop in the epilog if at least one register is reloaded in
6467 addition to return address. */
6468 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
6469 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
6470 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
6472 mips_save_restore_reg (gpr_mode, regno, offset, fn);
6473 offset -= GET_MODE_SIZE (gpr_mode);
6476 /* This loop must iterate over the same space as its companion in
6477 compute_frame_size. */
6478 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
6479 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
6480 for (regno = (FP_REG_LAST - FP_INC + 1);
6481 regno >= FP_REG_FIRST;
6483 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
6485 mips_save_restore_reg (fpr_mode, regno, offset, fn);
6486 offset -= GET_MODE_SIZE (fpr_mode);
6491 /* If we're generating n32 or n64 abicalls, and the current function
6492 does not use $28 as its global pointer, emit a cplocal directive.
6493 Use pic_offset_table_rtx as the argument to the directive. */
6496 mips_output_cplocal (void)
6498 if (!TARGET_EXPLICIT_RELOCS
6499 && cfun->machine->global_pointer > 0
6500 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
6501 output_asm_insn (".cplocal %+", 0);
6504 /* Return the style of GP load sequence that is being used for the
6505 current function. */
6507 enum mips_loadgp_style
6508 mips_current_loadgp_style (void)
6510 if (!TARGET_ABICALLS || cfun->machine->global_pointer == 0)
6513 if (TARGET_ABSOLUTE_ABICALLS)
6514 return LOADGP_ABSOLUTE;
6516 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
6519 /* The __gnu_local_gp symbol. */
6521 static GTY(()) rtx mips_gnu_local_gp;
6523 /* If we're generating n32 or n64 abicalls, emit instructions
6524 to set up the global pointer. */
6527 mips_emit_loadgp (void)
6529 rtx addr, offset, incoming_address;
6531 switch (mips_current_loadgp_style ())
6533 case LOADGP_ABSOLUTE:
6534 if (mips_gnu_local_gp == NULL)
6536 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
6537 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
6539 emit_insn (gen_loadgp_noshared (mips_gnu_local_gp));
6543 addr = XEXP (DECL_RTL (current_function_decl), 0);
6544 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
6545 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
6546 emit_insn (gen_loadgp (offset, incoming_address));
6547 if (!TARGET_EXPLICIT_RELOCS)
6548 emit_insn (gen_loadgp_blockage ());
6556 /* Set up the stack and frame (if desired) for the function. */
6559 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6562 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
6564 #ifdef SDB_DEBUGGING_INFO
6565 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
6566 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
6569 /* In mips16 mode, we may need to generate a 32 bit to handle
6570 floating point arguments. The linker will arrange for any 32 bit
6571 functions to call this stub, which will then jump to the 16 bit
6573 if (TARGET_MIPS16 && !TARGET_SOFT_FLOAT
6574 && current_function_args_info.fp_code != 0)
6575 build_mips16_function_stub (file);
6577 if (!FUNCTION_NAME_ALREADY_DECLARED)
6579 /* Get the function name the same way that toplev.c does before calling
6580 assemble_start_function. This is needed so that the name used here
6581 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
6582 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
6584 if (!flag_inhibit_size_directive)
6586 fputs ("\t.ent\t", file);
6587 assemble_name (file, fnname);
6591 assemble_name (file, fnname);
6592 fputs (":\n", file);
6595 /* Stop mips_file_end from treating this function as external. */
6596 if (TARGET_IRIX && mips_abi == ABI_32)
6597 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
6599 if (!flag_inhibit_size_directive)
6601 /* .frame FRAMEREG, FRAMESIZE, RETREG */
6603 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
6604 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
6605 ", args= " HOST_WIDE_INT_PRINT_DEC
6606 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
6607 (reg_names[(frame_pointer_needed)
6608 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
6609 ((frame_pointer_needed && TARGET_MIPS16)
6610 ? tsize - cfun->machine->frame.args_size
6612 reg_names[GP_REG_FIRST + 31],
6613 cfun->machine->frame.var_size,
6614 cfun->machine->frame.num_gp,
6615 cfun->machine->frame.num_fp,
6616 cfun->machine->frame.args_size,
6617 cfun->machine->frame.cprestore_size);
6619 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
6620 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
6621 cfun->machine->frame.mask,
6622 cfun->machine->frame.gp_save_offset);
6623 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
6624 cfun->machine->frame.fmask,
6625 cfun->machine->frame.fp_save_offset);
6628 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
6629 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
6632 if (mips_current_loadgp_style () == LOADGP_OLDABI)
6634 /* Handle the initialization of $gp for SVR4 PIC. */
6635 if (!cfun->machine->all_noreorder_p)
6636 output_asm_insn ("%(.cpload\t%^%)", 0);
6638 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
6640 else if (cfun->machine->all_noreorder_p)
6641 output_asm_insn ("%(%<", 0);
6643 /* Tell the assembler which register we're using as the global
6644 pointer. This is needed for thunks, since they can use either
6645 explicit relocs or assembler macros. */
6646 mips_output_cplocal ();
6649 /* Make the last instruction frame related and note that it performs
6650 the operation described by FRAME_PATTERN. */
6653 mips_set_frame_expr (rtx frame_pattern)
6657 insn = get_last_insn ();
6658 RTX_FRAME_RELATED_P (insn) = 1;
6659 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
6665 /* Return a frame-related rtx that stores REG at MEM.
6666 REG must be a single register. */
6669 mips_frame_set (rtx mem, rtx reg)
6673 /* If we're saving the return address register and the dwarf return
6674 address column differs from the hard register number, adjust the
6675 note reg to refer to the former. */
6676 if (REGNO (reg) == GP_REG_FIRST + 31
6677 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
6678 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
6680 set = gen_rtx_SET (VOIDmode, mem, reg);
6681 RTX_FRAME_RELATED_P (set) = 1;
6687 /* Save register REG to MEM. Make the instruction frame-related. */
6690 mips_save_reg (rtx reg, rtx mem)
6692 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
6696 if (mips_split_64bit_move_p (mem, reg))
6697 mips_split_64bit_move (mem, reg);
6699 emit_move_insn (mem, reg);
6701 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
6702 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
6703 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
6708 && REGNO (reg) != GP_REG_FIRST + 31
6709 && !M16_REG_P (REGNO (reg)))
6711 /* Save a non-mips16 register by moving it through a temporary.
6712 We don't need to do this for $31 since there's a special
6713 instruction for it. */
6714 emit_move_insn (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
6715 emit_move_insn (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
6718 emit_move_insn (mem, reg);
6720 mips_set_frame_expr (mips_frame_set (mem, reg));
6725 /* Expand the prologue into a bunch of separate insns. */
6728 mips_expand_prologue (void)
6732 if (cfun->machine->global_pointer > 0)
6733 REGNO (pic_offset_table_rtx) = cfun->machine->global_pointer;
6735 size = compute_frame_size (get_frame_size ());
6737 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
6738 bytes beforehand; this is enough to cover the register save area
6739 without going out of range. */
6740 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
6742 HOST_WIDE_INT step1;
6744 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
6745 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
6747 GEN_INT (-step1)))) = 1;
6749 mips_for_each_saved_reg (size, mips_save_reg);
6752 /* Allocate the rest of the frame. */
6755 if (SMALL_OPERAND (-size))
6756 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
6758 GEN_INT (-size)))) = 1;
6761 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
6764 /* There are no instructions to add or subtract registers
6765 from the stack pointer, so use the frame pointer as a
6766 temporary. We should always be using a frame pointer
6767 in this case anyway. */
6768 gcc_assert (frame_pointer_needed);
6769 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
6770 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
6771 hard_frame_pointer_rtx,
6772 MIPS_PROLOGUE_TEMP (Pmode)));
6773 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
6776 emit_insn (gen_sub3_insn (stack_pointer_rtx,
6778 MIPS_PROLOGUE_TEMP (Pmode)));
6780 /* Describe the combined effect of the previous instructions. */
6782 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
6783 plus_constant (stack_pointer_rtx, -size)));
6787 /* Set up the frame pointer, if we're using one. In mips16 code,
6788 we point the frame pointer ahead of the outgoing argument area.
6789 This should allow more variables & incoming arguments to be
6790 accessed with unextended instructions. */
6791 if (frame_pointer_needed)
6793 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
6795 rtx offset = GEN_INT (cfun->machine->frame.args_size);
6796 if (SMALL_OPERAND (cfun->machine->frame.args_size))
6798 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
6803 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), offset);
6804 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
6805 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
6806 hard_frame_pointer_rtx,
6807 MIPS_PROLOGUE_TEMP (Pmode)));
6809 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
6810 plus_constant (stack_pointer_rtx,
6811 cfun->machine->frame.args_size)));
6815 RTX_FRAME_RELATED_P (emit_move_insn (hard_frame_pointer_rtx,
6816 stack_pointer_rtx)) = 1;
6819 mips_emit_loadgp ();
6821 /* If generating o32/o64 abicalls, save $gp on the stack. */
6822 if (TARGET_ABICALLS && !TARGET_NEWABI && !current_function_is_leaf)
6823 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
6825 /* If we are profiling, make sure no instructions are scheduled before
6826 the call to mcount. */
6828 if (current_function_profile)
6829 emit_insn (gen_blockage ());
6832 /* Do any necessary cleanup after a function to restore stack, frame,
6835 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
6838 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
6839 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6841 /* Reinstate the normal $gp. */
6842 REGNO (pic_offset_table_rtx) = GLOBAL_POINTER_REGNUM;
6843 mips_output_cplocal ();
6845 if (cfun->machine->all_noreorder_p)
6847 /* Avoid using %>%) since it adds excess whitespace. */
6848 output_asm_insn (".set\tmacro", 0);
6849 output_asm_insn (".set\treorder", 0);
6850 set_noreorder = set_nomacro = 0;
6853 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
6857 /* Get the function name the same way that toplev.c does before calling
6858 assemble_start_function. This is needed so that the name used here
6859 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
6860 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
6861 fputs ("\t.end\t", file);
6862 assemble_name (file, fnname);
6867 /* Emit instructions to restore register REG from slot MEM. */
6870 mips_restore_reg (rtx reg, rtx mem)
6872 /* There's no mips16 instruction to load $31 directly. Load into
6873 $7 instead and adjust the return insn appropriately. */
6874 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
6875 reg = gen_rtx_REG (GET_MODE (reg), 7);
6877 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
6879 /* Can't restore directly; move through a temporary. */
6880 emit_move_insn (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
6881 emit_move_insn (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
6884 emit_move_insn (reg, mem);
6888 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
6889 if this epilogue precedes a sibling call, false if it is for a normal
6890 "epilogue" pattern. */
6893 mips_expand_epilogue (int sibcall_p)
6895 HOST_WIDE_INT step1, step2;
6898 if (!sibcall_p && mips_can_use_return_insn ())
6900 emit_jump_insn (gen_return ());
6904 /* Split the frame into two. STEP1 is the amount of stack we should
6905 deallocate before restoring the registers. STEP2 is the amount we
6906 should deallocate afterwards.
6908 Start off by assuming that no registers need to be restored. */
6909 step1 = cfun->machine->frame.total_size;
6912 /* Work out which register holds the frame address. Account for the
6913 frame pointer offset used by mips16 code. */
6914 if (!frame_pointer_needed)
6915 base = stack_pointer_rtx;
6918 base = hard_frame_pointer_rtx;
6920 step1 -= cfun->machine->frame.args_size;
6923 /* If we need to restore registers, deallocate as much stack as
6924 possible in the second step without going out of range. */
6925 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
6927 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
6931 /* Set TARGET to BASE + STEP1. */
6937 /* Get an rtx for STEP1 that we can add to BASE. */
6938 adjust = GEN_INT (step1);
6939 if (!SMALL_OPERAND (step1))
6941 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), adjust);
6942 adjust = MIPS_EPILOGUE_TEMP (Pmode);
6945 /* Normal mode code can copy the result straight into $sp. */
6947 target = stack_pointer_rtx;
6949 emit_insn (gen_add3_insn (target, base, adjust));
6952 /* Copy TARGET into the stack pointer. */
6953 if (target != stack_pointer_rtx)
6954 emit_move_insn (stack_pointer_rtx, target);
6956 /* If we're using addressing macros for n32/n64 abicalls, $gp is
6957 implicitly used by all SYMBOL_REFs. We must emit a blockage
6958 insn before restoring it. */
6959 if (TARGET_ABICALLS && TARGET_NEWABI && !TARGET_EXPLICIT_RELOCS)
6960 emit_insn (gen_blockage ());
6962 /* Restore the registers. */
6963 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
6966 /* Deallocate the final bit of the frame. */
6968 emit_insn (gen_add3_insn (stack_pointer_rtx,
6972 /* Add in the __builtin_eh_return stack adjustment. We need to
6973 use a temporary in mips16 code. */
6974 if (current_function_calls_eh_return)
6978 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
6979 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
6980 MIPS_EPILOGUE_TEMP (Pmode),
6981 EH_RETURN_STACKADJ_RTX));
6982 emit_move_insn (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
6985 emit_insn (gen_add3_insn (stack_pointer_rtx,
6987 EH_RETURN_STACKADJ_RTX));
6992 /* The mips16 loads the return address into $7, not $31. */
6993 if (TARGET_MIPS16 && (cfun->machine->frame.mask & RA_MASK) != 0)
6994 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
6995 GP_REG_FIRST + 7)));
6997 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
6998 GP_REG_FIRST + 31)));
7002 /* Return nonzero if this function is known to have a null epilogue.
7003 This allows the optimizer to omit jumps to jumps if no stack
7007 mips_can_use_return_insn (void)
7011 if (! reload_completed)
7014 if (regs_ever_live[31] || current_function_profile)
7017 return_type = DECL_RESULT (current_function_decl);
7019 /* In mips16 mode, a function which returns a floating point value
7020 needs to arrange to copy the return value into the floating point
7023 && mips16_hard_float
7024 && ! aggregate_value_p (return_type, current_function_decl)
7025 && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
7026 && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
7029 if (cfun->machine->frame.initialized)
7030 return cfun->machine->frame.total_size == 0;
7032 return compute_frame_size (get_frame_size ()) == 0;
7035 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
7036 in order to avoid duplicating too much logic from elsewhere. */
7039 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
7040 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
7043 rtx this, temp1, temp2, insn, fnaddr;
7045 /* Pretend to be a post-reload pass while generating rtl. */
7047 reload_completed = 1;
7048 reset_block_changes ();
7050 /* Pick a global pointer for -mabicalls. Use $15 rather than $28
7051 for TARGET_NEWABI since the latter is a call-saved register. */
7052 if (TARGET_ABICALLS)
7053 cfun->machine->global_pointer
7054 = REGNO (pic_offset_table_rtx)
7055 = TARGET_NEWABI ? 15 : GLOBAL_POINTER_REGNUM;
7057 /* Set up the global pointer for n32 or n64 abicalls. */
7058 mips_emit_loadgp ();
7060 /* We need two temporary registers in some cases. */
7061 temp1 = gen_rtx_REG (Pmode, 2);
7062 temp2 = gen_rtx_REG (Pmode, 3);
7064 /* Find out which register contains the "this" pointer. */
7065 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
7066 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
7068 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
7070 /* Add DELTA to THIS. */
7073 rtx offset = GEN_INT (delta);
7074 if (!SMALL_OPERAND (delta))
7076 emit_move_insn (temp1, offset);
7079 emit_insn (gen_add3_insn (this, this, offset));
7082 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
7083 if (vcall_offset != 0)
7087 /* Set TEMP1 to *THIS. */
7088 emit_move_insn (temp1, gen_rtx_MEM (Pmode, this));
7090 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
7091 addr = mips_add_offset (temp2, temp1, vcall_offset);
7093 /* Load the offset and add it to THIS. */
7094 emit_move_insn (temp1, gen_rtx_MEM (Pmode, addr));
7095 emit_insn (gen_add3_insn (this, this, temp1));
7098 /* Jump to the target function. Use a sibcall if direct jumps are
7099 allowed, otherwise load the address into a register first. */
7100 fnaddr = XEXP (DECL_RTL (function), 0);
7101 if (TARGET_MIPS16 || TARGET_ABICALLS || TARGET_LONG_CALLS)
7103 /* This is messy. gas treats "la $25,foo" as part of a call
7104 sequence and may allow a global "foo" to be lazily bound.
7105 The general move patterns therefore reject this combination.
7107 In this context, lazy binding would actually be OK for o32 and o64,
7108 but it's still wrong for n32 and n64; see mips_load_call_address.
7109 We must therefore load the address via a temporary register if
7110 mips_dangerous_for_la25_p.
7112 If we jump to the temporary register rather than $25, the assembler
7113 can use the move insn to fill the jump's delay slot. */
7114 if (TARGET_ABICALLS && !mips_dangerous_for_la25_p (fnaddr))
7115 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
7116 mips_load_call_address (temp1, fnaddr, true);
7118 if (TARGET_ABICALLS && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
7119 emit_move_insn (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
7120 emit_jump_insn (gen_indirect_jump (temp1));
7124 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
7125 SIBLING_CALL_P (insn) = 1;
7128 /* Run just enough of rest_of_compilation. This sequence was
7129 "borrowed" from alpha.c. */
7130 insn = get_insns ();
7131 insn_locators_initialize ();
7132 split_all_insns_noflow ();
7134 mips16_lay_out_constants ();
7135 shorten_branches (insn);
7136 final_start_function (insn, file, 1);
7137 final (insn, file, 1);
7138 final_end_function ();
7140 /* Clean up the vars set above. Note that final_end_function resets
7141 the global pointer for us. */
7142 reload_completed = 0;
7146 /* Returns nonzero if X contains a SYMBOL_REF. */
7149 symbolic_expression_p (rtx x)
7151 if (GET_CODE (x) == SYMBOL_REF)
7154 if (GET_CODE (x) == CONST)
7155 return symbolic_expression_p (XEXP (x, 0));
7158 return symbolic_expression_p (XEXP (x, 0));
7160 if (ARITHMETIC_P (x))
7161 return (symbolic_expression_p (XEXP (x, 0))
7162 || symbolic_expression_p (XEXP (x, 1)));
7167 /* Choose the section to use for the constant rtx expression X that has
7171 mips_select_rtx_section (enum machine_mode mode, rtx x,
7172 unsigned HOST_WIDE_INT align)
7176 /* In mips16 mode, the constant table always goes in the same section
7177 as the function, so that constants can be loaded using PC relative
7179 return function_section (current_function_decl);
7181 else if (TARGET_EMBEDDED_DATA)
7183 /* For embedded applications, always put constants in read-only data,
7184 in order to reduce RAM usage. */
7185 return mergeable_constant_section (mode, align, 0);
7189 /* For hosted applications, always put constants in small data if
7190 possible, as this gives the best performance. */
7191 /* ??? Consider using mergeable small data sections. */
7193 if (GET_MODE_SIZE (mode) <= (unsigned) mips_section_threshold
7194 && mips_section_threshold > 0)
7195 return get_named_section (NULL, ".sdata", 0);
7196 else if (flag_pic && symbolic_expression_p (x))
7197 return get_named_section (NULL, ".data.rel.ro", 3);
7199 return mergeable_constant_section (mode, align, 0);
7203 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
7205 The complication here is that, with the combination TARGET_ABICALLS
7206 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
7207 therefore not be included in the read-only part of a DSO. Handle such
7208 cases by selecting a normal data section instead of a read-only one.
7209 The logic apes that in default_function_rodata_section. */
7212 mips_function_rodata_section (tree decl)
7214 if (!TARGET_ABICALLS || TARGET_GPWORD)
7215 return default_function_rodata_section (decl);
7217 if (decl && DECL_SECTION_NAME (decl))
7219 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
7220 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
7222 char *rname = ASTRDUP (name);
7224 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
7226 else if (flag_function_sections && flag_data_sections
7227 && strncmp (name, ".text.", 6) == 0)
7229 char *rname = ASTRDUP (name);
7230 memcpy (rname + 1, "data", 4);
7231 return get_section (rname, SECTION_WRITE, decl);
7234 return data_section;
7237 /* Implement TARGET_IN_SMALL_DATA_P. Return true if it would be safe to
7238 access DECL using %gp_rel(...)($gp). */
7241 mips_in_small_data_p (tree decl)
7245 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
7248 /* We don't yet generate small-data references for -mabicalls. See related
7249 -G handling in override_options. */
7250 if (TARGET_ABICALLS)
7253 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
7257 /* Reject anything that isn't in a known small-data section. */
7258 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
7259 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
7262 /* If a symbol is defined externally, the assembler will use the
7263 usual -G rules when deciding how to implement macros. */
7264 if (TARGET_EXPLICIT_RELOCS || !DECL_EXTERNAL (decl))
7267 else if (TARGET_EMBEDDED_DATA)
7269 /* Don't put constants into the small data section: we want them
7270 to be in ROM rather than RAM. */
7271 if (TREE_CODE (decl) != VAR_DECL)
7274 if (TREE_READONLY (decl)
7275 && !TREE_SIDE_EFFECTS (decl)
7276 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
7280 size = int_size_in_bytes (TREE_TYPE (decl));
7281 return (size > 0 && size <= mips_section_threshold);
7284 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
7285 anchors for small data: the GP register acts as an anchor in that
7286 case. We also don't want to use them for PC-relative accesses,
7287 where the PC acts as an anchor. */
7290 mips_use_anchors_for_symbol_p (rtx symbol)
7292 switch (mips_classify_symbol (symbol))
7294 case SYMBOL_CONSTANT_POOL:
7295 case SYMBOL_SMALL_DATA:
7303 /* See whether VALTYPE is a record whose fields should be returned in
7304 floating-point registers. If so, return the number of fields and
7305 list them in FIELDS (which should have two elements). Return 0
7308 For n32 & n64, a structure with one or two fields is returned in
7309 floating-point registers as long as every field has a floating-point
7313 mips_fpr_return_fields (tree valtype, tree *fields)
7321 if (TREE_CODE (valtype) != RECORD_TYPE)
7325 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
7327 if (TREE_CODE (field) != FIELD_DECL)
7330 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
7336 fields[i++] = field;
7342 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
7343 a value in the most significant part of $2/$3 if:
7345 - the target is big-endian;
7347 - the value has a structure or union type (we generalize this to
7348 cover aggregates from other languages too); and
7350 - the structure is not returned in floating-point registers. */
7353 mips_return_in_msb (tree valtype)
7357 return (TARGET_NEWABI
7358 && TARGET_BIG_ENDIAN
7359 && AGGREGATE_TYPE_P (valtype)
7360 && mips_fpr_return_fields (valtype, fields) == 0);
7364 /* Return a composite value in a pair of floating-point registers.
7365 MODE1 and OFFSET1 are the mode and byte offset for the first value,
7366 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
7369 For n32 & n64, $f0 always holds the first value and $f2 the second.
7370 Otherwise the values are packed together as closely as possible. */
7373 mips_return_fpr_pair (enum machine_mode mode,
7374 enum machine_mode mode1, HOST_WIDE_INT offset1,
7375 enum machine_mode mode2, HOST_WIDE_INT offset2)
7379 inc = (TARGET_NEWABI ? 2 : FP_INC);
7380 return gen_rtx_PARALLEL
7383 gen_rtx_EXPR_LIST (VOIDmode,
7384 gen_rtx_REG (mode1, FP_RETURN),
7386 gen_rtx_EXPR_LIST (VOIDmode,
7387 gen_rtx_REG (mode2, FP_RETURN + inc),
7388 GEN_INT (offset2))));
7393 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
7394 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
7395 VALTYPE is null and MODE is the mode of the return value. */
7398 mips_function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
7399 enum machine_mode mode)
7406 mode = TYPE_MODE (valtype);
7407 unsignedp = TYPE_UNSIGNED (valtype);
7409 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
7410 true, we must promote the mode just as PROMOTE_MODE does. */
7411 mode = promote_mode (valtype, mode, &unsignedp, 1);
7413 /* Handle structures whose fields are returned in $f0/$f2. */
7414 switch (mips_fpr_return_fields (valtype, fields))
7417 return gen_rtx_REG (mode, FP_RETURN);
7420 return mips_return_fpr_pair (mode,
7421 TYPE_MODE (TREE_TYPE (fields[0])),
7422 int_byte_position (fields[0]),
7423 TYPE_MODE (TREE_TYPE (fields[1])),
7424 int_byte_position (fields[1]));
7427 /* If a value is passed in the most significant part of a register, see
7428 whether we have to round the mode up to a whole number of words. */
7429 if (mips_return_in_msb (valtype))
7431 HOST_WIDE_INT size = int_size_in_bytes (valtype);
7432 if (size % UNITS_PER_WORD != 0)
7434 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
7435 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
7439 /* For EABI, the class of return register depends entirely on MODE.
7440 For example, "struct { some_type x; }" and "union { some_type x; }"
7441 are returned in the same way as a bare "some_type" would be.
7442 Other ABIs only use FPRs for scalar, complex or vector types. */
7443 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
7444 return gen_rtx_REG (mode, GP_RETURN);
7447 if ((GET_MODE_CLASS (mode) == MODE_FLOAT
7448 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
7449 && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE)
7450 return gen_rtx_REG (mode, FP_RETURN);
7452 /* Handle long doubles for n32 & n64. */
7454 return mips_return_fpr_pair (mode,
7456 DImode, GET_MODE_SIZE (mode) / 2);
7458 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
7459 && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE * 2)
7460 return mips_return_fpr_pair (mode,
7461 GET_MODE_INNER (mode), 0,
7462 GET_MODE_INNER (mode),
7463 GET_MODE_SIZE (mode) / 2);
7465 return gen_rtx_REG (mode, GP_RETURN);
7468 /* Return nonzero when an argument must be passed by reference. */
7471 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
7472 enum machine_mode mode, tree type,
7473 bool named ATTRIBUTE_UNUSED)
7475 if (mips_abi == ABI_EABI)
7479 /* ??? How should SCmode be handled? */
7480 if (type == NULL_TREE || mode == DImode || mode == DFmode)
7483 size = int_size_in_bytes (type);
7484 return size == -1 || size > UNITS_PER_WORD;
7488 /* If we have a variable-sized parameter, we have no choice. */
7489 return targetm.calls.must_pass_in_stack (mode, type);
7494 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
7495 enum machine_mode mode ATTRIBUTE_UNUSED,
7496 tree type ATTRIBUTE_UNUSED, bool named)
7498 return mips_abi == ABI_EABI && named;
7501 /* Return true if registers of class CLASS cannot change from mode FROM
7505 mips_cannot_change_mode_class (enum machine_mode from,
7506 enum machine_mode to, enum reg_class class)
7508 if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
7509 && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
7511 if (TARGET_BIG_ENDIAN)
7513 /* When a multi-word value is stored in paired floating-point
7514 registers, the first register always holds the low word.
7515 We therefore can't allow FPRs to change between single-word
7516 and multi-word modes. */
7517 if (FP_INC > 1 && reg_classes_intersect_p (FP_REGS, class))
7522 /* LO_REGNO == HI_REGNO + 1, so if a multi-word value is stored
7523 in LO and HI, the high word always comes first. We therefore
7524 can't allow values stored in HI to change between single-word
7525 and multi-word modes.
7526 This rule applies to both the original HI/LO pair and the new
7527 DSP accumulators. */
7528 if (reg_classes_intersect_p (ACC_REGS, class))
7532 /* Loading a 32-bit value into a 64-bit floating-point register
7533 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
7534 We can't allow 64-bit float registers to change from SImode to
7538 && GET_MODE_SIZE (to) >= UNITS_PER_WORD
7539 && reg_classes_intersect_p (FP_REGS, class))
7544 /* Return true if X should not be moved directly into register $25.
7545 We need this because many versions of GAS will treat "la $25,foo" as
7546 part of a call sequence and so allow a global "foo" to be lazily bound. */
7549 mips_dangerous_for_la25_p (rtx x)
7551 HOST_WIDE_INT offset;
7553 if (TARGET_EXPLICIT_RELOCS)
7556 mips_split_const (x, &x, &offset);
7557 return global_got_operand (x, VOIDmode);
7560 /* Implement PREFERRED_RELOAD_CLASS. */
7563 mips_preferred_reload_class (rtx x, enum reg_class class)
7565 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
7568 if (TARGET_HARD_FLOAT
7569 && FLOAT_MODE_P (GET_MODE (x))
7570 && reg_class_subset_p (FP_REGS, class))
7573 if (reg_class_subset_p (GR_REGS, class))
7576 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
7582 /* This function returns the register class required for a secondary
7583 register when copying between one of the registers in CLASS, and X,
7584 using MODE. If IN_P is nonzero, the copy is going from X to the
7585 register, otherwise the register is the source. A return value of
7586 NO_REGS means that no secondary register is required. */
7589 mips_secondary_reload_class (enum reg_class class,
7590 enum machine_mode mode, rtx x, int in_p)
7592 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
7596 if (REG_P (x)|| GET_CODE (x) == SUBREG)
7597 regno = true_regnum (x);
7599 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
7601 if (mips_dangerous_for_la25_p (x))
7604 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
7608 /* Copying from HI or LO to anywhere other than a general register
7609 requires a general register.
7610 This rule applies to both the original HI/LO pair and the new
7611 DSP accumulators. */
7612 if (reg_class_subset_p (class, ACC_REGS))
7614 if (TARGET_MIPS16 && in_p)
7616 /* We can't really copy to HI or LO at all in mips16 mode. */
7619 return gp_reg_p ? NO_REGS : gr_regs;
7621 if (ACC_REG_P (regno))
7623 if (TARGET_MIPS16 && ! in_p)
7625 /* We can't really copy to HI or LO at all in mips16 mode. */
7628 return class == gr_regs ? NO_REGS : gr_regs;
7631 /* We can only copy a value to a condition code register from a
7632 floating point register, and even then we require a scratch
7633 floating point register. We can only copy a value out of a
7634 condition code register into a general register. */
7635 if (class == ST_REGS)
7639 return gp_reg_p ? NO_REGS : gr_regs;
7641 if (ST_REG_P (regno))
7645 return class == gr_regs ? NO_REGS : gr_regs;
7648 if (class == FP_REGS)
7652 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
7655 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
7657 /* We can use the l.s and l.d macros to load floating-point
7658 constants. ??? For l.s, we could probably get better
7659 code by returning GR_REGS here. */
7662 else if (gp_reg_p || x == CONST0_RTX (mode))
7664 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
7667 else if (FP_REG_P (regno))
7669 /* In this case we can use mov.s or mov.d. */
7674 /* Otherwise, we need to reload through an integer register. */
7679 /* In mips16 mode, going between memory and anything but M16_REGS
7680 requires an M16_REG. */
7683 if (class != M16_REGS && class != M16_NA_REGS)
7691 if (class == M16_REGS || class == M16_NA_REGS)
7700 /* Implement CLASS_MAX_NREGS.
7702 Usually all registers are word-sized. The only supported exception
7703 is -mgp64 -msingle-float, which has 64-bit words but 32-bit float
7704 registers. A word-based calculation is correct even in that case,
7705 since -msingle-float disallows multi-FPR values.
7707 The FP status registers are an exception to this rule. They are always
7708 4 bytes wide as they only hold condition code modes, and CCmode is always
7709 considered to be 4 bytes wide. */
7712 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
7713 enum machine_mode mode)
7715 if (class == ST_REGS)
7716 return (GET_MODE_SIZE (mode) + 3) / 4;
7718 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7722 mips_valid_pointer_mode (enum machine_mode mode)
7724 return (mode == SImode || (TARGET_64BIT && mode == DImode));
7727 /* Target hook for vector_mode_supported_p. */
7730 mips_vector_mode_supported_p (enum machine_mode mode)
7735 return TARGET_PAIRED_SINGLE_FLOAT;
7746 /* If we can access small data directly (using gp-relative relocation
7747 operators) return the small data pointer, otherwise return null.
7749 For each mips16 function which refers to GP relative symbols, we
7750 use a pseudo register, initialized at the start of the function, to
7751 hold the $gp value. */
7754 mips16_gp_pseudo_reg (void)
7756 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
7761 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
7763 /* We want to initialize this to a value which gcc will believe
7766 unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_GP);
7767 emit_move_insn (cfun->machine->mips16_gp_pseudo_rtx,
7768 gen_rtx_CONST (Pmode, unspec));
7769 insn = get_insns ();
7772 push_topmost_sequence ();
7773 /* We need to emit the initialization after the FUNCTION_BEG
7774 note, so that it will be integrated. */
7775 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
7777 && NOTE_LINE_NUMBER (scan) == NOTE_INSN_FUNCTION_BEG)
7779 if (scan == NULL_RTX)
7780 scan = get_insns ();
7781 insn = emit_insn_after (insn, scan);
7782 pop_topmost_sequence ();
7785 return cfun->machine->mips16_gp_pseudo_rtx;
7788 /* Write out code to move floating point arguments in or out of
7789 general registers. Output the instructions to FILE. FP_CODE is
7790 the code describing which arguments are present (see the comment at
7791 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
7792 we are copying from the floating point registers. */
7795 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
7801 /* This code only works for the original 32 bit ABI and the O64 ABI. */
7802 gcc_assert (TARGET_OLDABI);
7808 gparg = GP_ARG_FIRST;
7809 fparg = FP_ARG_FIRST;
7810 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
7814 if ((fparg & 1) != 0)
7816 fprintf (file, "\t%s\t%s,%s\n", s,
7817 reg_names[gparg], reg_names[fparg]);
7819 else if ((f & 3) == 2)
7822 fprintf (file, "\td%s\t%s,%s\n", s,
7823 reg_names[gparg], reg_names[fparg]);
7826 if ((fparg & 1) != 0)
7828 if (TARGET_BIG_ENDIAN)
7829 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
7830 reg_names[gparg], reg_names[fparg + 1], s,
7831 reg_names[gparg + 1], reg_names[fparg]);
7833 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
7834 reg_names[gparg], reg_names[fparg], s,
7835 reg_names[gparg + 1], reg_names[fparg + 1]);
7848 /* Build a mips16 function stub. This is used for functions which
7849 take arguments in the floating point registers. It is 32 bit code
7850 that moves the floating point args into the general registers, and
7851 then jumps to the 16 bit code. */
7854 build_mips16_function_stub (FILE *file)
7857 char *secname, *stubname;
7858 tree stubid, stubdecl;
7862 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7863 secname = (char *) alloca (strlen (fnname) + 20);
7864 sprintf (secname, ".mips16.fn.%s", fnname);
7865 stubname = (char *) alloca (strlen (fnname) + 20);
7866 sprintf (stubname, "__fn_stub_%s", fnname);
7867 stubid = get_identifier (stubname);
7868 stubdecl = build_decl (FUNCTION_DECL, stubid,
7869 build_function_type (void_type_node, NULL_TREE));
7870 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
7872 fprintf (file, "\t# Stub function for %s (", current_function_name ());
7874 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
7876 fprintf (file, "%s%s",
7877 need_comma ? ", " : "",
7878 (f & 3) == 1 ? "float" : "double");
7881 fprintf (file, ")\n");
7883 fprintf (file, "\t.set\tnomips16\n");
7884 switch_to_section (function_section (stubdecl));
7885 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
7887 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
7888 within a .ent, and we cannot emit another .ent. */
7889 if (!FUNCTION_NAME_ALREADY_DECLARED)
7891 fputs ("\t.ent\t", file);
7892 assemble_name (file, stubname);
7896 assemble_name (file, stubname);
7897 fputs (":\n", file);
7899 /* We don't want the assembler to insert any nops here. */
7900 fprintf (file, "\t.set\tnoreorder\n");
7902 mips16_fp_args (file, current_function_args_info.fp_code, 1);
7904 fprintf (asm_out_file, "\t.set\tnoat\n");
7905 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
7906 assemble_name (file, fnname);
7907 fprintf (file, "\n");
7908 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
7909 fprintf (asm_out_file, "\t.set\tat\n");
7911 /* Unfortunately, we can't fill the jump delay slot. We can't fill
7912 with one of the mfc1 instructions, because the result is not
7913 available for one instruction, so if the very first instruction
7914 in the function refers to the register, it will see the wrong
7916 fprintf (file, "\tnop\n");
7918 fprintf (file, "\t.set\treorder\n");
7920 if (!FUNCTION_NAME_ALREADY_DECLARED)
7922 fputs ("\t.end\t", file);
7923 assemble_name (file, stubname);
7927 fprintf (file, "\t.set\tmips16\n");
7929 switch_to_section (function_section (current_function_decl));
7932 /* We keep a list of functions for which we have already built stubs
7933 in build_mips16_call_stub. */
7937 struct mips16_stub *next;
7942 static struct mips16_stub *mips16_stubs;
7944 /* Build a call stub for a mips16 call. A stub is needed if we are
7945 passing any floating point values which should go into the floating
7946 point registers. If we are, and the call turns out to be to a 32
7947 bit function, the stub will be used to move the values into the
7948 floating point registers before calling the 32 bit function. The
7949 linker will magically adjust the function call to either the 16 bit
7950 function or the 32 bit stub, depending upon where the function call
7951 is actually defined.
7953 Similarly, we need a stub if the return value might come back in a
7954 floating point register.
7956 RETVAL is the location of the return value, or null if this is
7957 a call rather than a call_value. FN is the address of the
7958 function and ARG_SIZE is the size of the arguments. FP_CODE
7959 is the code built by function_arg. This function returns a nonzero
7960 value if it builds the call instruction itself. */
7963 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
7967 char *secname, *stubname;
7968 struct mips16_stub *l;
7969 tree stubid, stubdecl;
7973 /* We don't need to do anything if we aren't in mips16 mode, or if
7974 we were invoked with the -msoft-float option. */
7975 if (! TARGET_MIPS16 || ! mips16_hard_float)
7978 /* Figure out whether the value might come back in a floating point
7980 fpret = (retval != 0
7981 && GET_MODE_CLASS (GET_MODE (retval)) == MODE_FLOAT
7982 && GET_MODE_SIZE (GET_MODE (retval)) <= UNITS_PER_FPVALUE);
7984 /* We don't need to do anything if there were no floating point
7985 arguments and the value will not be returned in a floating point
7987 if (fp_code == 0 && ! fpret)
7990 /* We don't need to do anything if this is a call to a special
7991 mips16 support function. */
7992 if (GET_CODE (fn) == SYMBOL_REF
7993 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
7996 /* This code will only work for o32 and o64 abis. The other ABI's
7997 require more sophisticated support. */
7998 gcc_assert (TARGET_OLDABI);
8000 /* We can only handle SFmode and DFmode floating point return
8003 gcc_assert (GET_MODE (retval) == SFmode || GET_MODE (retval) == DFmode);
8005 /* If we're calling via a function pointer, then we must always call
8006 via a stub. There are magic stubs provided in libgcc.a for each
8007 of the required cases. Each of them expects the function address
8008 to arrive in register $2. */
8010 if (GET_CODE (fn) != SYMBOL_REF)
8016 /* ??? If this code is modified to support other ABI's, we need
8017 to handle PARALLEL return values here. */
8019 sprintf (buf, "__mips16_call_stub_%s%d",
8021 ? (GET_MODE (retval) == SFmode ? "sf_" : "df_")
8024 id = get_identifier (buf);
8025 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
8027 emit_move_insn (gen_rtx_REG (Pmode, 2), fn);
8029 if (retval == NULL_RTX)
8030 insn = gen_call_internal (stub_fn, arg_size);
8032 insn = gen_call_value_internal (retval, stub_fn, arg_size);
8033 insn = emit_call_insn (insn);
8035 /* Put the register usage information on the CALL. */
8036 CALL_INSN_FUNCTION_USAGE (insn) =
8037 gen_rtx_EXPR_LIST (VOIDmode,
8038 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
8039 CALL_INSN_FUNCTION_USAGE (insn));
8041 /* If we are handling a floating point return value, we need to
8042 save $18 in the function prologue. Putting a note on the
8043 call will mean that regs_ever_live[$18] will be true if the
8044 call is not eliminated, and we can check that in the prologue
8047 CALL_INSN_FUNCTION_USAGE (insn) =
8048 gen_rtx_EXPR_LIST (VOIDmode,
8049 gen_rtx_USE (VOIDmode,
8050 gen_rtx_REG (word_mode, 18)),
8051 CALL_INSN_FUNCTION_USAGE (insn));
8053 /* Return 1 to tell the caller that we've generated the call
8058 /* We know the function we are going to call. If we have already
8059 built a stub, we don't need to do anything further. */
8061 fnname = XSTR (fn, 0);
8062 for (l = mips16_stubs; l != NULL; l = l->next)
8063 if (strcmp (l->name, fnname) == 0)
8068 /* Build a special purpose stub. When the linker sees a
8069 function call in mips16 code, it will check where the target
8070 is defined. If the target is a 32 bit call, the linker will
8071 search for the section defined here. It can tell which
8072 symbol this section is associated with by looking at the
8073 relocation information (the name is unreliable, since this
8074 might be a static function). If such a section is found, the
8075 linker will redirect the call to the start of the magic
8078 If the function does not return a floating point value, the
8079 special stub section is named
8082 If the function does return a floating point value, the stub
8084 .mips16.call.fp.FNNAME
8087 secname = (char *) alloca (strlen (fnname) + 40);
8088 sprintf (secname, ".mips16.call.%s%s",
8091 stubname = (char *) alloca (strlen (fnname) + 20);
8092 sprintf (stubname, "__call_stub_%s%s",
8095 stubid = get_identifier (stubname);
8096 stubdecl = build_decl (FUNCTION_DECL, stubid,
8097 build_function_type (void_type_node, NULL_TREE));
8098 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
8100 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
8102 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
8106 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
8108 fprintf (asm_out_file, "%s%s",
8109 need_comma ? ", " : "",
8110 (f & 3) == 1 ? "float" : "double");
8113 fprintf (asm_out_file, ")\n");
8115 fprintf (asm_out_file, "\t.set\tnomips16\n");
8116 assemble_start_function (stubdecl, stubname);
8118 if (!FUNCTION_NAME_ALREADY_DECLARED)
8120 fputs ("\t.ent\t", asm_out_file);
8121 assemble_name (asm_out_file, stubname);
8122 fputs ("\n", asm_out_file);
8124 assemble_name (asm_out_file, stubname);
8125 fputs (":\n", asm_out_file);
8128 /* We build the stub code by hand. That's the only way we can
8129 do it, since we can't generate 32 bit code during a 16 bit
8132 /* We don't want the assembler to insert any nops here. */
8133 fprintf (asm_out_file, "\t.set\tnoreorder\n");
8135 mips16_fp_args (asm_out_file, fp_code, 0);
8139 fprintf (asm_out_file, "\t.set\tnoat\n");
8140 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
8142 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
8143 fprintf (asm_out_file, "\t.set\tat\n");
8144 /* Unfortunately, we can't fill the jump delay slot. We
8145 can't fill with one of the mtc1 instructions, because the
8146 result is not available for one instruction, so if the
8147 very first instruction in the function refers to the
8148 register, it will see the wrong value. */
8149 fprintf (asm_out_file, "\tnop\n");
8153 fprintf (asm_out_file, "\tmove\t%s,%s\n",
8154 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
8155 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
8156 /* As above, we can't fill the delay slot. */
8157 fprintf (asm_out_file, "\tnop\n");
8158 if (GET_MODE (retval) == SFmode)
8159 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8160 reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
8163 if (TARGET_BIG_ENDIAN)
8165 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8166 reg_names[GP_REG_FIRST + 2],
8167 reg_names[FP_REG_FIRST + 1]);
8168 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8169 reg_names[GP_REG_FIRST + 3],
8170 reg_names[FP_REG_FIRST + 0]);
8174 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8175 reg_names[GP_REG_FIRST + 2],
8176 reg_names[FP_REG_FIRST + 0]);
8177 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8178 reg_names[GP_REG_FIRST + 3],
8179 reg_names[FP_REG_FIRST + 1]);
8182 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
8183 /* As above, we can't fill the delay slot. */
8184 fprintf (asm_out_file, "\tnop\n");
8187 fprintf (asm_out_file, "\t.set\treorder\n");
8189 #ifdef ASM_DECLARE_FUNCTION_SIZE
8190 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
8193 if (!FUNCTION_NAME_ALREADY_DECLARED)
8195 fputs ("\t.end\t", asm_out_file);
8196 assemble_name (asm_out_file, stubname);
8197 fputs ("\n", asm_out_file);
8200 fprintf (asm_out_file, "\t.set\tmips16\n");
8202 /* Record this stub. */
8203 l = (struct mips16_stub *) xmalloc (sizeof *l);
8204 l->name = xstrdup (fnname);
8206 l->next = mips16_stubs;
8210 /* If we expect a floating point return value, but we've built a
8211 stub which does not expect one, then we're in trouble. We can't
8212 use the existing stub, because it won't handle the floating point
8213 value. We can't build a new stub, because the linker won't know
8214 which stub to use for the various calls in this object file.
8215 Fortunately, this case is illegal, since it means that a function
8216 was declared in two different ways in a single compilation. */
8217 if (fpret && ! l->fpret)
8218 error ("cannot handle inconsistent calls to %qs", fnname);
8220 /* If we are calling a stub which handles a floating point return
8221 value, we need to arrange to save $18 in the prologue. We do
8222 this by marking the function call as using the register. The
8223 prologue will later see that it is used, and emit code to save
8230 if (retval == NULL_RTX)
8231 insn = gen_call_internal (fn, arg_size);
8233 insn = gen_call_value_internal (retval, fn, arg_size);
8234 insn = emit_call_insn (insn);
8236 CALL_INSN_FUNCTION_USAGE (insn) =
8237 gen_rtx_EXPR_LIST (VOIDmode,
8238 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
8239 CALL_INSN_FUNCTION_USAGE (insn));
8241 /* Return 1 to tell the caller that we've generated the call
8246 /* Return 0 to let the caller generate the call insn. */
8250 /* An entry in the mips16 constant pool. VALUE is the pool constant,
8251 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
8253 struct mips16_constant {
8254 struct mips16_constant *next;
8257 enum machine_mode mode;
8260 /* Information about an incomplete mips16 constant pool. FIRST is the
8261 first constant, HIGHEST_ADDRESS is the highest address that the first
8262 byte of the pool can have, and INSN_ADDRESS is the current instruction
8265 struct mips16_constant_pool {
8266 struct mips16_constant *first;
8267 int highest_address;
8271 /* Add constant VALUE to POOL and return its label. MODE is the
8272 value's mode (used for CONST_INTs, etc.). */
8275 add_constant (struct mips16_constant_pool *pool,
8276 rtx value, enum machine_mode mode)
8278 struct mips16_constant **p, *c;
8279 bool first_of_size_p;
8281 /* See whether the constant is already in the pool. If so, return the
8282 existing label, otherwise leave P pointing to the place where the
8283 constant should be added.
8285 Keep the pool sorted in increasing order of mode size so that we can
8286 reduce the number of alignments needed. */
8287 first_of_size_p = true;
8288 for (p = &pool->first; *p != 0; p = &(*p)->next)
8290 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
8292 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
8294 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
8295 first_of_size_p = false;
8298 /* In the worst case, the constant needed by the earliest instruction
8299 will end up at the end of the pool. The entire pool must then be
8300 accessible from that instruction.
8302 When adding the first constant, set the pool's highest address to
8303 the address of the first out-of-range byte. Adjust this address
8304 downwards each time a new constant is added. */
8305 if (pool->first == 0)
8306 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
8307 is the address of the instruction with the lowest two bits clear.
8308 The base PC value for ld has the lowest three bits clear. Assume
8309 the worst case here. */
8310 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
8311 pool->highest_address -= GET_MODE_SIZE (mode);
8312 if (first_of_size_p)
8313 /* Take into account the worst possible padding due to alignment. */
8314 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
8316 /* Create a new entry. */
8317 c = (struct mips16_constant *) xmalloc (sizeof *c);
8320 c->label = gen_label_rtx ();
8327 /* Output constant VALUE after instruction INSN and return the last
8328 instruction emitted. MODE is the mode of the constant. */
8331 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
8333 switch (GET_MODE_CLASS (mode))
8337 rtx size = GEN_INT (GET_MODE_SIZE (mode));
8338 return emit_insn_after (gen_consttable_int (value, size), insn);
8342 return emit_insn_after (gen_consttable_float (value), insn);
8344 case MODE_VECTOR_FLOAT:
8345 case MODE_VECTOR_INT:
8348 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
8349 insn = dump_constants_1 (GET_MODE_INNER (mode),
8350 CONST_VECTOR_ELT (value, i), insn);
8360 /* Dump out the constants in CONSTANTS after INSN. */
8363 dump_constants (struct mips16_constant *constants, rtx insn)
8365 struct mips16_constant *c, *next;
8369 for (c = constants; c != NULL; c = next)
8371 /* If necessary, increase the alignment of PC. */
8372 if (align < GET_MODE_SIZE (c->mode))
8374 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
8375 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
8377 align = GET_MODE_SIZE (c->mode);
8379 insn = emit_label_after (c->label, insn);
8380 insn = dump_constants_1 (c->mode, c->value, insn);
8386 emit_barrier_after (insn);
8389 /* Return the length of instruction INSN. */
8392 mips16_insn_length (rtx insn)
8396 rtx body = PATTERN (insn);
8397 if (GET_CODE (body) == ADDR_VEC)
8398 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
8399 if (GET_CODE (body) == ADDR_DIFF_VEC)
8400 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
8402 return get_attr_length (insn);
8405 /* Rewrite *X so that constant pool references refer to the constant's
8406 label instead. DATA points to the constant pool structure. */
8409 mips16_rewrite_pool_refs (rtx *x, void *data)
8411 struct mips16_constant_pool *pool = data;
8412 if (GET_CODE (*x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (*x))
8413 *x = gen_rtx_LABEL_REF (Pmode, add_constant (pool,
8414 get_pool_constant (*x),
8415 get_pool_mode (*x)));
8419 /* Build MIPS16 constant pools. */
8422 mips16_lay_out_constants (void)
8424 struct mips16_constant_pool pool;
8428 memset (&pool, 0, sizeof (pool));
8429 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8431 /* Rewrite constant pool references in INSN. */
8433 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &pool);
8435 pool.insn_address += mips16_insn_length (insn);
8437 if (pool.first != NULL)
8439 /* If there are no natural barriers between the first user of
8440 the pool and the highest acceptable address, we'll need to
8441 create a new instruction to jump around the constant pool.
8442 In the worst case, this instruction will be 4 bytes long.
8444 If it's too late to do this transformation after INSN,
8445 do it immediately before INSN. */
8446 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
8450 label = gen_label_rtx ();
8452 jump = emit_jump_insn_before (gen_jump (label), insn);
8453 JUMP_LABEL (jump) = label;
8454 LABEL_NUSES (label) = 1;
8455 barrier = emit_barrier_after (jump);
8457 emit_label_after (label, barrier);
8458 pool.insn_address += 4;
8461 /* See whether the constant pool is now out of range of the first
8462 user. If so, output the constants after the previous barrier.
8463 Note that any instructions between BARRIER and INSN (inclusive)
8464 will use negative offsets to refer to the pool. */
8465 if (pool.insn_address > pool.highest_address)
8467 dump_constants (pool.first, barrier);
8471 else if (BARRIER_P (insn))
8475 dump_constants (pool.first, get_last_insn ());
8478 /* A temporary variable used by for_each_rtx callbacks, etc. */
8479 static rtx mips_sim_insn;
8481 /* A structure representing the state of the processor pipeline.
8482 Used by the mips_sim_* family of functions. */
8484 /* The maximum number of instructions that can be issued in a cycle.
8485 (Caches mips_issue_rate.) */
8486 unsigned int issue_rate;
8488 /* The current simulation time. */
8491 /* How many more instructions can be issued in the current cycle. */
8492 unsigned int insns_left;
8494 /* LAST_SET[X].INSN is the last instruction to set register X.
8495 LAST_SET[X].TIME is the time at which that instruction was issued.
8496 INSN is null if no instruction has yet set register X. */
8500 } last_set[FIRST_PSEUDO_REGISTER];
8502 /* The pipeline's current DFA state. */
8506 /* Reset STATE to the initial simulation state. */
8509 mips_sim_reset (struct mips_sim *state)
8512 state->insns_left = state->issue_rate;
8513 memset (&state->last_set, 0, sizeof (state->last_set));
8514 state_reset (state->dfa_state);
8517 /* Initialize STATE before its first use. DFA_STATE points to an
8518 allocated but uninitialized DFA state. */
8521 mips_sim_init (struct mips_sim *state, state_t dfa_state)
8523 state->issue_rate = mips_issue_rate ();
8524 state->dfa_state = dfa_state;
8525 mips_sim_reset (state);
8528 /* Advance STATE by one clock cycle. */
8531 mips_sim_next_cycle (struct mips_sim *state)
8534 state->insns_left = state->issue_rate;
8535 state_transition (state->dfa_state, 0);
8538 /* Advance simulation state STATE until instruction INSN can read
8542 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
8546 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
8547 if (state->last_set[REGNO (reg) + i].insn != 0)
8551 t = state->last_set[REGNO (reg) + i].time;
8552 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
8553 while (state->time < t)
8554 mips_sim_next_cycle (state);
8558 /* A for_each_rtx callback. If *X is a register, advance simulation state
8559 DATA until mips_sim_insn can read the register's value. */
8562 mips_sim_wait_regs_2 (rtx *x, void *data)
8565 mips_sim_wait_reg (data, mips_sim_insn, *x);
8569 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
8572 mips_sim_wait_regs_1 (rtx *x, void *data)
8574 for_each_rtx (x, mips_sim_wait_regs_2, data);
8577 /* Advance simulation state STATE until all of INSN's register
8578 dependencies are satisfied. */
8581 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
8583 mips_sim_insn = insn;
8584 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
8587 /* Advance simulation state STATE until the units required by
8588 instruction INSN are available. */
8591 mips_sim_wait_units (struct mips_sim *state, rtx insn)
8595 tmp_state = alloca (state_size ());
8596 while (state->insns_left == 0
8597 || (memcpy (tmp_state, state->dfa_state, state_size ()),
8598 state_transition (tmp_state, insn) >= 0))
8599 mips_sim_next_cycle (state);
8602 /* Advance simulation state STATE until INSN is ready to issue. */
8605 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
8607 mips_sim_wait_regs (state, insn);
8608 mips_sim_wait_units (state, insn);
8611 /* mips_sim_insn has just set X. Update the LAST_SET array
8612 in simulation state DATA. */
8615 mips_sim_record_set (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
8617 struct mips_sim *state;
8622 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
8624 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
8625 state->last_set[REGNO (x) + i].time = state->time;
8629 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
8630 can issue immediately (i.e., that mips_sim_wait_insn has already
8634 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
8636 state_transition (state->dfa_state, insn);
8637 state->insns_left--;
8639 mips_sim_insn = insn;
8640 note_stores (PATTERN (insn), mips_sim_record_set, state);
8643 /* Simulate issuing a NOP in state STATE. */
8646 mips_sim_issue_nop (struct mips_sim *state)
8648 if (state->insns_left == 0)
8649 mips_sim_next_cycle (state);
8650 state->insns_left--;
8653 /* Update simulation state STATE so that it's ready to accept the instruction
8654 after INSN. INSN should be part of the main rtl chain, not a member of a
8658 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
8660 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
8662 mips_sim_issue_nop (state);
8664 switch (GET_CODE (SEQ_BEGIN (insn)))
8668 /* We can't predict the processor state after a call or label. */
8669 mips_sim_reset (state);
8673 /* The delay slots of branch likely instructions are only executed
8674 when the branch is taken. Therefore, if the caller has simulated
8675 the delay slot instruction, STATE does not really reflect the state
8676 of the pipeline for the instruction after the delay slot. Also,
8677 branch likely instructions tend to incur a penalty when not taken,
8678 so there will probably be an extra delay between the branch and
8679 the instruction after the delay slot. */
8680 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
8681 mips_sim_reset (state);
8689 /* The VR4130 pipeline issues aligned pairs of instructions together,
8690 but it stalls the second instruction if it depends on the first.
8691 In order to cut down the amount of logic required, this dependence
8692 check is not based on a full instruction decode. Instead, any non-SPECIAL
8693 instruction is assumed to modify the register specified by bits 20-16
8694 (which is usually the "rt" field).
8696 In beq, beql, bne and bnel instructions, the rt field is actually an
8697 input, so we can end up with a false dependence between the branch
8698 and its delay slot. If this situation occurs in instruction INSN,
8699 try to avoid it by swapping rs and rt. */
8702 vr4130_avoid_branch_rt_conflict (rtx insn)
8706 first = SEQ_BEGIN (insn);
8707 second = SEQ_END (insn);
8709 && NONJUMP_INSN_P (second)
8710 && GET_CODE (PATTERN (first)) == SET
8711 && GET_CODE (SET_DEST (PATTERN (first))) == PC
8712 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
8714 /* Check for the right kind of condition. */
8715 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
8716 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
8717 && REG_P (XEXP (cond, 0))
8718 && REG_P (XEXP (cond, 1))
8719 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
8720 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
8722 /* SECOND mentions the rt register but not the rs register. */
8723 rtx tmp = XEXP (cond, 0);
8724 XEXP (cond, 0) = XEXP (cond, 1);
8725 XEXP (cond, 1) = tmp;
8730 /* Implement -mvr4130-align. Go through each basic block and simulate the
8731 processor pipeline. If we find that a pair of instructions could execute
8732 in parallel, and the first of those instruction is not 8-byte aligned,
8733 insert a nop to make it aligned. */
8736 vr4130_align_insns (void)
8738 struct mips_sim state;
8739 rtx insn, subinsn, last, last2, next;
8744 /* LAST is the last instruction before INSN to have a nonzero length.
8745 LAST2 is the last such instruction before LAST. */
8749 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
8752 mips_sim_init (&state, alloca (state_size ()));
8753 for (insn = get_insns (); insn != 0; insn = next)
8755 unsigned int length;
8757 next = NEXT_INSN (insn);
8759 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
8760 This isn't really related to the alignment pass, but we do it on
8761 the fly to avoid a separate instruction walk. */
8762 vr4130_avoid_branch_rt_conflict (insn);
8764 if (USEFUL_INSN_P (insn))
8765 FOR_EACH_SUBINSN (subinsn, insn)
8767 mips_sim_wait_insn (&state, subinsn);
8769 /* If we want this instruction to issue in parallel with the
8770 previous one, make sure that the previous instruction is
8771 aligned. There are several reasons why this isn't worthwhile
8772 when the second instruction is a call:
8774 - Calls are less likely to be performance critical,
8775 - There's a good chance that the delay slot can execute
8776 in parallel with the call.
8777 - The return address would then be unaligned.
8779 In general, if we're going to insert a nop between instructions
8780 X and Y, it's better to insert it immediately after X. That
8781 way, if the nop makes Y aligned, it will also align any labels
8783 if (state.insns_left != state.issue_rate
8784 && !CALL_P (subinsn))
8786 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
8788 /* SUBINSN is the first instruction in INSN and INSN is
8789 aligned. We want to align the previous instruction
8790 instead, so insert a nop between LAST2 and LAST.
8792 Note that LAST could be either a single instruction
8793 or a branch with a delay slot. In the latter case,
8794 LAST, like INSN, is already aligned, but the delay
8795 slot must have some extra delay that stops it from
8796 issuing at the same time as the branch. We therefore
8797 insert a nop before the branch in order to align its
8799 emit_insn_after (gen_nop (), last2);
8802 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
8804 /* SUBINSN is the delay slot of INSN, but INSN is
8805 currently unaligned. Insert a nop between
8806 LAST and INSN to align it. */
8807 emit_insn_after (gen_nop (), last);
8811 mips_sim_issue_insn (&state, subinsn);
8813 mips_sim_finish_insn (&state, insn);
8815 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
8816 length = get_attr_length (insn);
8819 /* If the instruction is an asm statement or multi-instruction
8820 mips.md patern, the length is only an estimate. Insert an
8821 8 byte alignment after it so that the following instructions
8822 can be handled correctly. */
8823 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
8824 && (recog_memoized (insn) < 0 || length >= 8))
8826 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
8827 next = NEXT_INSN (next);
8828 mips_sim_next_cycle (&state);
8831 else if (length & 4)
8832 aligned_p = !aligned_p;
8837 /* See whether INSN is an aligned label. */
8838 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
8844 /* Subroutine of mips_reorg. If there is a hazard between INSN
8845 and a previous instruction, avoid it by inserting nops after
8848 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
8849 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
8850 before using the value of that register. *HILO_DELAY counts the
8851 number of instructions since the last hilo hazard (that is,
8852 the number of instructions since the last mflo or mfhi).
8854 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
8855 for the next instruction.
8857 LO_REG is an rtx for the LO register, used in dependence checking. */
8860 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
8861 rtx *delayed_reg, rtx lo_reg)
8869 pattern = PATTERN (insn);
8871 /* Do not put the whole function in .set noreorder if it contains
8872 an asm statement. We don't know whether there will be hazards
8873 between the asm statement and the gcc-generated code. */
8874 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
8875 cfun->machine->all_noreorder_p = false;
8877 /* Ignore zero-length instructions (barriers and the like). */
8878 ninsns = get_attr_length (insn) / 4;
8882 /* Work out how many nops are needed. Note that we only care about
8883 registers that are explicitly mentioned in the instruction's pattern.
8884 It doesn't matter that calls use the argument registers or that they
8885 clobber hi and lo. */
8886 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
8887 nops = 2 - *hilo_delay;
8888 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
8893 /* Insert the nops between this instruction and the previous one.
8894 Each new nop takes us further from the last hilo hazard. */
8895 *hilo_delay += nops;
8897 emit_insn_after (gen_hazard_nop (), after);
8899 /* Set up the state for the next instruction. */
8900 *hilo_delay += ninsns;
8902 if (INSN_CODE (insn) >= 0)
8903 switch (get_attr_hazard (insn))
8913 set = single_set (insn);
8914 gcc_assert (set != 0);
8915 *delayed_reg = SET_DEST (set);
8921 /* Go through the instruction stream and insert nops where necessary.
8922 See if the whole function can then be put into .set noreorder &
8926 mips_avoid_hazards (void)
8928 rtx insn, last_insn, lo_reg, delayed_reg;
8931 /* Force all instructions to be split into their final form. */
8932 split_all_insns_noflow ();
8934 /* Recalculate instruction lengths without taking nops into account. */
8935 cfun->machine->ignore_hazard_length_p = true;
8936 shorten_branches (get_insns ());
8938 cfun->machine->all_noreorder_p = true;
8940 /* Profiled functions can't be all noreorder because the profiler
8941 support uses assembler macros. */
8942 if (current_function_profile)
8943 cfun->machine->all_noreorder_p = false;
8945 /* Code compiled with -mfix-vr4120 can't be all noreorder because
8946 we rely on the assembler to work around some errata. */
8947 if (TARGET_FIX_VR4120)
8948 cfun->machine->all_noreorder_p = false;
8950 /* The same is true for -mfix-vr4130 if we might generate mflo or
8951 mfhi instructions. Note that we avoid using mflo and mfhi if
8952 the VR4130 macc and dmacc instructions are available instead;
8953 see the *mfhilo_{si,di}_macc patterns. */
8954 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
8955 cfun->machine->all_noreorder_p = false;
8960 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
8962 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
8965 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
8966 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
8967 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
8968 &hilo_delay, &delayed_reg, lo_reg);
8970 mips_avoid_hazard (last_insn, insn, &hilo_delay,
8971 &delayed_reg, lo_reg);
8978 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
8984 mips16_lay_out_constants ();
8985 else if (TARGET_EXPLICIT_RELOCS)
8987 if (mips_flag_delayed_branch)
8988 dbr_schedule (get_insns ());
8989 mips_avoid_hazards ();
8990 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
8991 vr4130_align_insns ();
8995 /* This function does three things:
8997 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
8998 - Register the mips16 hardware floating point stubs.
8999 - Register the gofast functions if selected using --enable-gofast. */
9001 #include "config/gofast.h"
9004 mips_init_libfuncs (void)
9006 if (TARGET_FIX_VR4120)
9008 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
9009 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
9012 if (TARGET_MIPS16 && mips16_hard_float)
9014 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
9015 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
9016 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
9017 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
9019 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
9020 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
9021 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
9022 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
9023 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
9024 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
9026 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
9027 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
9029 if (TARGET_DOUBLE_FLOAT)
9031 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
9032 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
9033 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
9034 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
9036 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
9037 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
9038 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
9039 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
9040 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
9041 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
9043 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
9044 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
9046 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
9047 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
9051 gofast_maybe_init_libfuncs ();
9054 /* Return a number assessing the cost of moving a register in class
9055 FROM to class TO. The classes are expressed using the enumeration
9056 values such as `GENERAL_REGS'. A value of 2 is the default; other
9057 values are interpreted relative to that.
9059 It is not required that the cost always equal 2 when FROM is the
9060 same as TO; on some machines it is expensive to move between
9061 registers if they are not general registers.
9063 If reload sees an insn consisting of a single `set' between two
9064 hard registers, and if `REGISTER_MOVE_COST' applied to their
9065 classes returns a value of 2, reload does not check to ensure that
9066 the constraints of the insn are met. Setting a cost of other than
9067 2 will allow reload to verify that the constraints are met. You
9068 should do this if the `movM' pattern's constraints do not allow
9071 ??? We make the cost of moving from HI/LO into general
9072 registers the same as for one of moving general registers to
9073 HI/LO for TARGET_MIPS16 in order to prevent allocating a
9074 pseudo to HI/LO. This might hurt optimizations though, it
9075 isn't clear if it is wise. And it might not work in all cases. We
9076 could solve the DImode LO reg problem by using a multiply, just
9077 like reload_{in,out}si. We could solve the SImode/HImode HI reg
9078 problem by using divide instructions. divu puts the remainder in
9079 the HI reg, so doing a divide by -1 will move the value in the HI
9080 reg for all values except -1. We could handle that case by using a
9081 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
9082 a compare/branch to test the input value to see which instruction
9083 we need to use. This gets pretty messy, but it is feasible. */
9086 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
9087 enum reg_class to, enum reg_class from)
9089 if (from == M16_REGS && GR_REG_CLASS_P (to))
9091 else if (from == M16_NA_REGS && GR_REG_CLASS_P (to))
9093 else if (GR_REG_CLASS_P (from))
9097 else if (to == M16_NA_REGS)
9099 else if (GR_REG_CLASS_P (to))
9106 else if (to == FP_REGS)
9108 else if (reg_class_subset_p (to, ACC_REGS))
9115 else if (COP_REG_CLASS_P (to))
9120 else if (from == FP_REGS)
9122 if (GR_REG_CLASS_P (to))
9124 else if (to == FP_REGS)
9126 else if (to == ST_REGS)
9129 else if (reg_class_subset_p (from, ACC_REGS))
9131 if (GR_REG_CLASS_P (to))
9139 else if (from == ST_REGS && GR_REG_CLASS_P (to))
9141 else if (COP_REG_CLASS_P (from))
9147 ??? What cases are these? Shouldn't we return 2 here? */
9152 /* Return the length of INSN. LENGTH is the initial length computed by
9153 attributes in the machine-description file. */
9156 mips_adjust_insn_length (rtx insn, int length)
9158 /* A unconditional jump has an unfilled delay slot if it is not part
9159 of a sequence. A conditional jump normally has a delay slot, but
9160 does not on MIPS16. */
9161 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
9164 /* See how many nops might be needed to avoid hardware hazards. */
9165 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
9166 switch (get_attr_hazard (insn))
9180 /* All MIPS16 instructions are a measly two bytes. */
9188 /* Return an asm sequence to start a noat block and load the address
9189 of a label into $1. */
9192 mips_output_load_label (void)
9194 if (TARGET_EXPLICIT_RELOCS)
9198 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
9201 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
9204 if (ISA_HAS_LOAD_DELAY)
9205 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
9206 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
9210 if (Pmode == DImode)
9211 return "%[dla\t%@,%0";
9213 return "%[la\t%@,%0";
9217 /* Return the assembly code for INSN, which has the operands given by
9218 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
9219 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
9220 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
9221 version of BRANCH_IF_TRUE. */
9224 mips_output_conditional_branch (rtx insn, rtx *operands,
9225 const char *branch_if_true,
9226 const char *branch_if_false)
9228 unsigned int length;
9229 rtx taken, not_taken;
9231 length = get_attr_length (insn);
9234 /* Just a simple conditional branch. */
9235 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
9236 return branch_if_true;
9239 /* Generate a reversed branch around a direct jump. This fallback does
9240 not use branch-likely instructions. */
9241 mips_branch_likely = false;
9242 not_taken = gen_label_rtx ();
9243 taken = operands[1];
9245 /* Generate the reversed branch to NOT_TAKEN. */
9246 operands[1] = not_taken;
9247 output_asm_insn (branch_if_false, operands);
9249 /* If INSN has a delay slot, we must provide delay slots for both the
9250 branch to NOT_TAKEN and the conditional jump. We must also ensure
9251 that INSN's delay slot is executed in the appropriate cases. */
9254 /* This first delay slot will always be executed, so use INSN's
9255 delay slot if is not annulled. */
9256 if (!INSN_ANNULLED_BRANCH_P (insn))
9258 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9259 asm_out_file, optimize, 1, NULL);
9260 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9263 output_asm_insn ("nop", 0);
9264 fprintf (asm_out_file, "\n");
9267 /* Output the unconditional branch to TAKEN. */
9269 output_asm_insn ("j\t%0%/", &taken);
9272 output_asm_insn (mips_output_load_label (), &taken);
9273 output_asm_insn ("jr\t%@%]%/", 0);
9276 /* Now deal with its delay slot; see above. */
9279 /* This delay slot will only be executed if the branch is taken.
9280 Use INSN's delay slot if is annulled. */
9281 if (INSN_ANNULLED_BRANCH_P (insn))
9283 final_scan_insn (XVECEXP (final_sequence, 0, 1),
9284 asm_out_file, optimize, 1, NULL);
9285 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9288 output_asm_insn ("nop", 0);
9289 fprintf (asm_out_file, "\n");
9292 /* Output NOT_TAKEN. */
9293 (*targetm.asm_out.internal_label) (asm_out_file, "L",
9294 CODE_LABEL_NUMBER (not_taken));
9298 /* Return the assembly code for INSN, which branches to OPERANDS[1]
9299 if some ordered condition is true. The condition is given by
9300 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
9301 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
9302 its second is always zero. */
9305 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
9307 const char *branch[2];
9309 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
9310 Make BRANCH[0] branch on the inverse condition. */
9311 switch (GET_CODE (operands[0]))
9313 /* These cases are equivalent to comparisons against zero. */
9315 inverted_p = !inverted_p;
9318 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
9319 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
9322 /* These cases are always true or always false. */
9324 inverted_p = !inverted_p;
9327 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
9328 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
9332 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
9333 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
9336 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
9339 /* Used to output div or ddiv instruction DIVISION, which has the operands
9340 given by OPERANDS. Add in a divide-by-zero check if needed.
9342 When working around R4000 and R4400 errata, we need to make sure that
9343 the division is not immediately followed by a shift[1][2]. We also
9344 need to stop the division from being put into a branch delay slot[3].
9345 The easiest way to avoid both problems is to add a nop after the
9346 division. When a divide-by-zero check is needed, this nop can be
9347 used to fill the branch delay slot.
9349 [1] If a double-word or a variable shift executes immediately
9350 after starting an integer division, the shift may give an
9351 incorrect result. See quotations of errata #16 and #28 from
9352 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9353 in mips.md for details.
9355 [2] A similar bug to [1] exists for all revisions of the
9356 R4000 and the R4400 when run in an MC configuration.
9357 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
9359 "19. In this following sequence:
9361 ddiv (or ddivu or div or divu)
9362 dsll32 (or dsrl32, dsra32)
9364 if an MPT stall occurs, while the divide is slipping the cpu
9365 pipeline, then the following double shift would end up with an
9368 Workaround: The compiler needs to avoid generating any
9369 sequence with divide followed by extended double shift."
9371 This erratum is also present in "MIPS R4400MC Errata, Processor
9372 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
9373 & 3.0" as errata #10 and #4, respectively.
9375 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9376 (also valid for MIPS R4000MC processors):
9378 "52. R4000SC: This bug does not apply for the R4000PC.
9380 There are two flavors of this bug:
9382 1) If the instruction just after divide takes an RF exception
9383 (tlb-refill, tlb-invalid) and gets an instruction cache
9384 miss (both primary and secondary) and the line which is
9385 currently in secondary cache at this index had the first
9386 data word, where the bits 5..2 are set, then R4000 would
9387 get a wrong result for the div.
9392 ------------------- # end-of page. -tlb-refill
9397 ------------------- # end-of page. -tlb-invalid
9400 2) If the divide is in the taken branch delay slot, where the
9401 target takes RF exception and gets an I-cache miss for the
9402 exception vector or where I-cache miss occurs for the
9403 target address, under the above mentioned scenarios, the
9404 div would get wrong results.
9407 j r2 # to next page mapped or unmapped
9408 div r8,r9 # this bug would be there as long
9409 # as there is an ICache miss and
9410 nop # the "data pattern" is present
9413 beq r0, r0, NextPage # to Next page
9417 This bug is present for div, divu, ddiv, and ddivu
9420 Workaround: For item 1), OS could make sure that the next page
9421 after the divide instruction is also mapped. For item 2), the
9422 compiler could make sure that the divide instruction is not in
9423 the branch delay slot."
9425 These processors have PRId values of 0x00004220 and 0x00004300 for
9426 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
9429 mips_output_division (const char *division, rtx *operands)
9434 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
9436 output_asm_insn (s, operands);
9439 if (TARGET_CHECK_ZERO_DIV)
9443 output_asm_insn (s, operands);
9444 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
9446 else if (GENERATE_DIVIDE_TRAPS)
9448 output_asm_insn (s, operands);
9453 output_asm_insn ("%(bne\t%2,%.,1f", operands);
9454 output_asm_insn (s, operands);
9455 s = "break\t7%)\n1:";
9461 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
9462 with a final "000" replaced by "k". Ignore case.
9464 Note: this function is shared between GCC and GAS. */
9467 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
9469 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
9470 given++, canonical++;
9472 return ((*given == 0 && *canonical == 0)
9473 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
9477 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
9478 CPU name. We've traditionally allowed a lot of variation here.
9480 Note: this function is shared between GCC and GAS. */
9483 mips_matching_cpu_name_p (const char *canonical, const char *given)
9485 /* First see if the name matches exactly, or with a final "000"
9487 if (mips_strict_matching_cpu_name_p (canonical, given))
9490 /* If not, try comparing based on numerical designation alone.
9491 See if GIVEN is an unadorned number, or 'r' followed by a number. */
9492 if (TOLOWER (*given) == 'r')
9494 if (!ISDIGIT (*given))
9497 /* Skip over some well-known prefixes in the canonical name,
9498 hoping to find a number there too. */
9499 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
9501 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
9503 else if (TOLOWER (canonical[0]) == 'r')
9506 return mips_strict_matching_cpu_name_p (canonical, given);
9510 /* Return the mips_cpu_info entry for the processor or ISA given
9511 by CPU_STRING. Return null if the string isn't recognized.
9513 A similar function exists in GAS. */
9515 static const struct mips_cpu_info *
9516 mips_parse_cpu (const char *cpu_string)
9518 const struct mips_cpu_info *p;
9521 /* In the past, we allowed upper-case CPU names, but it doesn't
9522 work well with the multilib machinery. */
9523 for (s = cpu_string; *s != 0; s++)
9526 warning (0, "the cpu name must be lower case");
9530 /* 'from-abi' selects the most compatible architecture for the given
9531 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
9532 EABIs, we have to decide whether we're using the 32-bit or 64-bit
9533 version. Look first at the -mgp options, if given, otherwise base
9534 the choice on MASK_64BIT in TARGET_DEFAULT. */
9535 if (strcasecmp (cpu_string, "from-abi") == 0)
9536 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
9537 : ABI_NEEDS_64BIT_REGS ? 3
9538 : (TARGET_64BIT ? 3 : 1));
9540 /* 'default' has traditionally been a no-op. Probably not very useful. */
9541 if (strcasecmp (cpu_string, "default") == 0)
9544 for (p = mips_cpu_info_table; p->name != 0; p++)
9545 if (mips_matching_cpu_name_p (p->name, cpu_string))
9552 /* Return the processor associated with the given ISA level, or null
9553 if the ISA isn't valid. */
9555 static const struct mips_cpu_info *
9556 mips_cpu_info_from_isa (int isa)
9558 const struct mips_cpu_info *p;
9560 for (p = mips_cpu_info_table; p->name != 0; p++)
9567 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
9568 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
9569 they only hold condition code modes, and CCmode is always considered to
9570 be 4 bytes wide. All other registers are word sized. */
9573 mips_hard_regno_nregs (int regno, enum machine_mode mode)
9575 if (ST_REG_P (regno))
9576 return ((GET_MODE_SIZE (mode) + 3) / 4);
9577 else if (! FP_REG_P (regno))
9578 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
9580 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
9583 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
9584 all BLKmode objects are returned in memory. Under the new (N32 and
9585 64-bit MIPS ABIs) small structures are returned in a register.
9586 Objects with varying size must still be returned in memory, of
9590 mips_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
9593 return (TYPE_MODE (type) == BLKmode);
9595 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
9596 || (int_size_in_bytes (type) == -1));
9600 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
9602 return !TARGET_OLDABI;
9605 /* Return true if INSN is a multiply-add or multiply-subtract
9606 instruction and PREV assigns to the accumulator operand. */
9609 mips_linked_madd_p (rtx prev, rtx insn)
9613 x = single_set (insn);
9619 if (GET_CODE (x) == PLUS
9620 && GET_CODE (XEXP (x, 0)) == MULT
9621 && reg_set_p (XEXP (x, 1), prev))
9624 if (GET_CODE (x) == MINUS
9625 && GET_CODE (XEXP (x, 1)) == MULT
9626 && reg_set_p (XEXP (x, 0), prev))
9632 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
9633 that may clobber hi or lo. */
9635 static rtx mips_macc_chains_last_hilo;
9637 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
9638 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
9641 mips_macc_chains_record (rtx insn)
9643 if (get_attr_may_clobber_hilo (insn))
9644 mips_macc_chains_last_hilo = insn;
9647 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
9648 has NREADY elements, looking for a multiply-add or multiply-subtract
9649 instruction that is cumulative with mips_macc_chains_last_hilo.
9650 If there is one, promote it ahead of anything else that might
9651 clobber hi or lo. */
9654 mips_macc_chains_reorder (rtx *ready, int nready)
9658 if (mips_macc_chains_last_hilo != 0)
9659 for (i = nready - 1; i >= 0; i--)
9660 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
9662 for (j = nready - 1; j > i; j--)
9663 if (recog_memoized (ready[j]) >= 0
9664 && get_attr_may_clobber_hilo (ready[j]))
9666 mips_promote_ready (ready, i, j);
9673 /* The last instruction to be scheduled. */
9675 static rtx vr4130_last_insn;
9677 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
9678 points to an rtx that is initially an instruction. Nullify the rtx
9679 if the instruction uses the value of register X. */
9682 vr4130_true_reg_dependence_p_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
9684 rtx *insn_ptr = data;
9687 && reg_referenced_p (x, PATTERN (*insn_ptr)))
9691 /* Return true if there is true register dependence between vr4130_last_insn
9695 vr4130_true_reg_dependence_p (rtx insn)
9697 note_stores (PATTERN (vr4130_last_insn),
9698 vr4130_true_reg_dependence_p_1, &insn);
9702 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
9703 the ready queue and that INSN2 is the instruction after it, return
9704 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
9705 in which INSN1 and INSN2 can probably issue in parallel, but for
9706 which (INSN2, INSN1) should be less sensitive to instruction
9707 alignment than (INSN1, INSN2). See 4130.md for more details. */
9710 vr4130_swap_insns_p (rtx insn1, rtx insn2)
9714 /* Check for the following case:
9716 1) there is some other instruction X with an anti dependence on INSN1;
9717 2) X has a higher priority than INSN2; and
9718 3) X is an arithmetic instruction (and thus has no unit restrictions).
9720 If INSN1 is the last instruction blocking X, it would better to
9721 choose (INSN1, X) over (INSN2, INSN1). */
9722 for (dep = INSN_DEPEND (insn1); dep != 0; dep = XEXP (dep, 1))
9723 if (REG_NOTE_KIND (dep) == REG_DEP_ANTI
9724 && INSN_PRIORITY (XEXP (dep, 0)) > INSN_PRIORITY (insn2)
9725 && recog_memoized (XEXP (dep, 0)) >= 0
9726 && get_attr_vr4130_class (XEXP (dep, 0)) == VR4130_CLASS_ALU)
9729 if (vr4130_last_insn != 0
9730 && recog_memoized (insn1) >= 0
9731 && recog_memoized (insn2) >= 0)
9733 /* See whether INSN1 and INSN2 use different execution units,
9734 or if they are both ALU-type instructions. If so, they can
9735 probably execute in parallel. */
9736 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
9737 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
9738 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
9740 /* If only one of the instructions has a dependence on
9741 vr4130_last_insn, prefer to schedule the other one first. */
9742 bool dep1 = vr4130_true_reg_dependence_p (insn1);
9743 bool dep2 = vr4130_true_reg_dependence_p (insn2);
9747 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
9748 is not an ALU-type instruction and if INSN1 uses the same
9749 execution unit. (Note that if this condition holds, we already
9750 know that INSN2 uses a different execution unit.) */
9751 if (class1 != VR4130_CLASS_ALU
9752 && recog_memoized (vr4130_last_insn) >= 0
9753 && class1 == get_attr_vr4130_class (vr4130_last_insn))
9760 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
9761 queue with at least two instructions. Swap the first two if
9762 vr4130_swap_insns_p says that it could be worthwhile. */
9765 vr4130_reorder (rtx *ready, int nready)
9767 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
9768 mips_promote_ready (ready, nready - 2, nready - 1);
9771 /* Remove the instruction at index LOWER from ready queue READY and
9772 reinsert it in front of the instruction at index HIGHER. LOWER must
9776 mips_promote_ready (rtx *ready, int lower, int higher)
9781 new_head = ready[lower];
9782 for (i = lower; i < higher; i++)
9783 ready[i] = ready[i + 1];
9784 ready[i] = new_head;
9787 /* Implement TARGET_SCHED_REORDER. */
9790 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9791 rtx *ready, int *nreadyp, int cycle)
9793 if (!reload_completed && TUNE_MACC_CHAINS)
9796 mips_macc_chains_last_hilo = 0;
9798 mips_macc_chains_reorder (ready, *nreadyp);
9800 if (reload_completed && TUNE_MIPS4130 && !TARGET_VR4130_ALIGN)
9803 vr4130_last_insn = 0;
9805 vr4130_reorder (ready, *nreadyp);
9807 return mips_issue_rate ();
9810 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
9813 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9816 switch (GET_CODE (PATTERN (insn)))
9820 /* Don't count USEs and CLOBBERs against the issue rate. */
9825 if (!reload_completed && TUNE_MACC_CHAINS)
9826 mips_macc_chains_record (insn);
9827 vr4130_last_insn = insn;
9833 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
9834 dependencies have no cost. */
9837 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
9838 rtx dep ATTRIBUTE_UNUSED, int cost)
9840 if (REG_NOTE_KIND (link) != 0)
9845 /* Return the number of instructions that can be issued per cycle. */
9848 mips_issue_rate (void)
9852 case PROCESSOR_R4130:
9853 case PROCESSOR_R5400:
9854 case PROCESSOR_R5500:
9855 case PROCESSOR_R7000:
9856 case PROCESSOR_R9000:
9860 /* This is actually 4, but we get better performance if we claim 3.
9861 This is partly because of unwanted speculative code motion with the
9862 larger number, and partly because in most common cases we can't
9863 reach the theoretical max of 4. */
9871 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
9872 be as wide as the scheduling freedom in the DFA. */
9875 mips_multipass_dfa_lookahead (void)
9877 /* Can schedule up to 4 of the 6 function units in any one cycle. */
9878 if (mips_tune == PROCESSOR_SB1)
9884 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
9885 return the first operand of the associated "pref" or "prefx" insn. */
9888 mips_prefetch_cookie (rtx write, rtx locality)
9890 /* store_streamed / load_streamed. */
9891 if (INTVAL (locality) <= 0)
9892 return GEN_INT (INTVAL (write) + 4);
9895 if (INTVAL (locality) <= 2)
9898 /* store_retained / load_retained. */
9899 return GEN_INT (INTVAL (write) + 6);
9902 /* MIPS builtin function support. */
9904 struct builtin_description
9906 /* The code of the main .md file instruction. See mips_builtin_type
9907 for more information. */
9908 enum insn_code icode;
9910 /* The floating-point comparison code to use with ICODE, if any. */
9911 enum mips_fp_condition cond;
9913 /* The name of the builtin function. */
9916 /* Specifies how the function should be expanded. */
9917 enum mips_builtin_type builtin_type;
9919 /* The function's prototype. */
9920 enum mips_function_type function_type;
9922 /* The target flags required for this function. */
9926 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
9927 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
9928 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
9929 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
9930 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
9932 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
9934 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
9935 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
9936 "__builtin_mips_" #INSN "_" #COND "_s", \
9937 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
9938 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
9939 "__builtin_mips_" #INSN "_" #COND "_d", \
9940 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
9942 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
9943 The lower and upper forms require TARGET_FLAGS while the any and all
9944 forms require MASK_MIPS3D. */
9945 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
9946 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9947 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
9948 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
9949 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9950 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
9951 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
9952 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9953 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
9954 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
9955 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9956 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
9957 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
9959 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
9960 require MASK_MIPS3D. */
9961 #define CMP_4S_BUILTINS(INSN, COND) \
9962 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
9963 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
9964 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9966 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
9967 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
9968 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9971 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
9972 instruction requires TARGET_FLAGS. */
9973 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
9974 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9975 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
9976 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9978 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
9979 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
9980 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
9983 /* Define all the builtins related to c.cond.fmt condition COND. */
9984 #define CMP_BUILTINS(COND) \
9985 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
9986 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
9987 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
9988 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
9989 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
9990 CMP_4S_BUILTINS (c, COND), \
9991 CMP_4S_BUILTINS (cabs, COND)
9993 /* __builtin_mips_abs_ps() maps to the standard absM2 pattern. */
9994 #define CODE_FOR_mips_abs_ps CODE_FOR_absv2sf2
9996 static const struct builtin_description mips_bdesc[] =
9998 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
9999 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10000 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10001 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10002 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
10003 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10004 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10005 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10007 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
10008 MASK_PAIRED_SINGLE_FLOAT),
10009 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10010 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10011 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10012 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10014 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
10015 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
10016 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10017 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
10018 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
10019 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10021 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
10022 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
10023 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10024 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
10025 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
10026 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10028 MIPS_FP_CONDITIONS (CMP_BUILTINS)
10031 /* Builtin functions for the SB-1 processor. */
10033 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
10035 static const struct builtin_description sb1_bdesc[] =
10037 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
10040 /* Builtin functions for DSP ASE. */
10042 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
10043 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
10044 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
10045 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
10047 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
10048 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
10049 builtin_description fields. */
10050 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
10051 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
10052 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
10054 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
10055 branch instruction. TARGET_FLAGS is a builtin_description field. */
10056 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
10057 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
10058 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
10060 static const struct builtin_description dsp_bdesc[] =
10062 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10063 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10064 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10065 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10066 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10067 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10068 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10069 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10070 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10071 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10072 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10073 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10074 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10075 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
10076 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
10077 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
10078 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
10079 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
10080 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
10081 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
10082 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
10083 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
10084 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10085 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10086 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10087 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10088 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10089 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10090 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10091 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10092 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
10093 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10094 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10095 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10096 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
10097 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10098 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10099 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10100 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
10101 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
10102 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10103 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
10104 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
10105 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10106 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10107 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10108 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10109 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10110 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10111 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10112 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
10113 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
10114 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10115 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10116 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10117 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10118 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
10119 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10120 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
10121 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
10122 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10123 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10124 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10125 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10126 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10127 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10128 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10129 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10130 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10131 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10132 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10133 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10134 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10135 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10136 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10137 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10138 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10139 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10140 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
10141 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
10142 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
10143 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
10144 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
10145 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
10146 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
10147 BPOSGE_BUILTIN (32, MASK_DSP)
10150 /* This helps provide a mapping from builtin function codes to bdesc
10155 /* The builtin function table that this entry describes. */
10156 const struct builtin_description *bdesc;
10158 /* The number of entries in the builtin function table. */
10161 /* The target processor that supports these builtin functions.
10162 PROCESSOR_MAX means we enable them for all processors. */
10163 enum processor_type proc;
10166 static const struct bdesc_map bdesc_arrays[] =
10168 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX },
10169 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1 },
10170 { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX }
10173 /* Take the head of argument list *ARGLIST and convert it into a form
10174 suitable for input operand OP of instruction ICODE. Return the value
10175 and point *ARGLIST at the next element of the list. */
10178 mips_prepare_builtin_arg (enum insn_code icode,
10179 unsigned int op, tree *arglist)
10182 enum machine_mode mode;
10184 value = expand_normal (TREE_VALUE (*arglist));
10185 mode = insn_data[icode].operand[op].mode;
10186 if (!insn_data[icode].operand[op].predicate (value, mode))
10188 value = copy_to_mode_reg (mode, value);
10189 /* Check the predicate again. */
10190 if (!insn_data[icode].operand[op].predicate (value, mode))
10192 error ("invalid argument to builtin function");
10197 *arglist = TREE_CHAIN (*arglist);
10201 /* Return an rtx suitable for output operand OP of instruction ICODE.
10202 If TARGET is non-null, try to use it where possible. */
10205 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
10207 enum machine_mode mode;
10209 mode = insn_data[icode].operand[op].mode;
10210 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
10211 target = gen_reg_rtx (mode);
10216 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
10219 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10220 enum machine_mode mode ATTRIBUTE_UNUSED,
10221 int ignore ATTRIBUTE_UNUSED)
10223 enum insn_code icode;
10224 enum mips_builtin_type type;
10225 tree fndecl, arglist;
10226 unsigned int fcode;
10227 const struct builtin_description *bdesc;
10228 const struct bdesc_map *m;
10230 fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
10231 arglist = TREE_OPERAND (exp, 1);
10232 fcode = DECL_FUNCTION_CODE (fndecl);
10235 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
10237 if (fcode < m->size)
10240 icode = bdesc[fcode].icode;
10241 type = bdesc[fcode].builtin_type;
10251 case MIPS_BUILTIN_DIRECT:
10252 return mips_expand_builtin_direct (icode, target, arglist, true);
10254 case MIPS_BUILTIN_DIRECT_NO_TARGET:
10255 return mips_expand_builtin_direct (icode, target, arglist, false);
10257 case MIPS_BUILTIN_MOVT:
10258 case MIPS_BUILTIN_MOVF:
10259 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
10262 case MIPS_BUILTIN_CMP_ANY:
10263 case MIPS_BUILTIN_CMP_ALL:
10264 case MIPS_BUILTIN_CMP_UPPER:
10265 case MIPS_BUILTIN_CMP_LOWER:
10266 case MIPS_BUILTIN_CMP_SINGLE:
10267 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
10270 case MIPS_BUILTIN_BPOSGE32:
10271 return mips_expand_builtin_bposge (type, target);
10278 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
10281 mips_init_builtins (void)
10283 const struct builtin_description *d;
10284 const struct bdesc_map *m;
10285 tree types[(int) MIPS_MAX_FTYPE_MAX];
10286 tree V2SF_type_node;
10287 tree V2HI_type_node;
10288 tree V4QI_type_node;
10289 unsigned int offset;
10291 /* We have only builtins for -mpaired-single, -mips3d and -mdsp. */
10292 if (!TARGET_PAIRED_SINGLE_FLOAT && !TARGET_DSP)
10295 if (TARGET_PAIRED_SINGLE_FLOAT)
10297 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
10299 types[MIPS_V2SF_FTYPE_V2SF]
10300 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
10302 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
10303 = build_function_type_list (V2SF_type_node,
10304 V2SF_type_node, V2SF_type_node, NULL_TREE);
10306 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
10307 = build_function_type_list (V2SF_type_node,
10308 V2SF_type_node, V2SF_type_node,
10309 integer_type_node, NULL_TREE);
10311 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
10312 = build_function_type_list (V2SF_type_node,
10313 V2SF_type_node, V2SF_type_node,
10314 V2SF_type_node, V2SF_type_node, NULL_TREE);
10316 types[MIPS_V2SF_FTYPE_SF_SF]
10317 = build_function_type_list (V2SF_type_node,
10318 float_type_node, float_type_node, NULL_TREE);
10320 types[MIPS_INT_FTYPE_V2SF_V2SF]
10321 = build_function_type_list (integer_type_node,
10322 V2SF_type_node, V2SF_type_node, NULL_TREE);
10324 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
10325 = build_function_type_list (integer_type_node,
10326 V2SF_type_node, V2SF_type_node,
10327 V2SF_type_node, V2SF_type_node, NULL_TREE);
10329 types[MIPS_INT_FTYPE_SF_SF]
10330 = build_function_type_list (integer_type_node,
10331 float_type_node, float_type_node, NULL_TREE);
10333 types[MIPS_INT_FTYPE_DF_DF]
10334 = build_function_type_list (integer_type_node,
10335 double_type_node, double_type_node, NULL_TREE);
10337 types[MIPS_SF_FTYPE_V2SF]
10338 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
10340 types[MIPS_SF_FTYPE_SF]
10341 = build_function_type_list (float_type_node,
10342 float_type_node, NULL_TREE);
10344 types[MIPS_SF_FTYPE_SF_SF]
10345 = build_function_type_list (float_type_node,
10346 float_type_node, float_type_node, NULL_TREE);
10348 types[MIPS_DF_FTYPE_DF]
10349 = build_function_type_list (double_type_node,
10350 double_type_node, NULL_TREE);
10352 types[MIPS_DF_FTYPE_DF_DF]
10353 = build_function_type_list (double_type_node,
10354 double_type_node, double_type_node, NULL_TREE);
10359 V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
10360 V4QI_type_node = build_vector_type_for_mode (intQI_type_node, V4QImode);
10362 types[MIPS_V2HI_FTYPE_V2HI_V2HI]
10363 = build_function_type_list (V2HI_type_node,
10364 V2HI_type_node, V2HI_type_node,
10367 types[MIPS_SI_FTYPE_SI_SI]
10368 = build_function_type_list (intSI_type_node,
10369 intSI_type_node, intSI_type_node,
10372 types[MIPS_V4QI_FTYPE_V4QI_V4QI]
10373 = build_function_type_list (V4QI_type_node,
10374 V4QI_type_node, V4QI_type_node,
10377 types[MIPS_SI_FTYPE_V4QI]
10378 = build_function_type_list (intSI_type_node,
10382 types[MIPS_V2HI_FTYPE_V2HI]
10383 = build_function_type_list (V2HI_type_node,
10387 types[MIPS_SI_FTYPE_SI]
10388 = build_function_type_list (intSI_type_node,
10392 types[MIPS_V4QI_FTYPE_V2HI_V2HI]
10393 = build_function_type_list (V4QI_type_node,
10394 V2HI_type_node, V2HI_type_node,
10397 types[MIPS_V2HI_FTYPE_SI_SI]
10398 = build_function_type_list (V2HI_type_node,
10399 intSI_type_node, intSI_type_node,
10402 types[MIPS_SI_FTYPE_V2HI]
10403 = build_function_type_list (intSI_type_node,
10407 types[MIPS_V2HI_FTYPE_V4QI]
10408 = build_function_type_list (V2HI_type_node,
10412 types[MIPS_V4QI_FTYPE_V4QI_SI]
10413 = build_function_type_list (V4QI_type_node,
10414 V4QI_type_node, intSI_type_node,
10417 types[MIPS_V2HI_FTYPE_V2HI_SI]
10418 = build_function_type_list (V2HI_type_node,
10419 V2HI_type_node, intSI_type_node,
10422 types[MIPS_V2HI_FTYPE_V4QI_V2HI]
10423 = build_function_type_list (V2HI_type_node,
10424 V4QI_type_node, V2HI_type_node,
10427 types[MIPS_SI_FTYPE_V2HI_V2HI]
10428 = build_function_type_list (intSI_type_node,
10429 V2HI_type_node, V2HI_type_node,
10432 types[MIPS_DI_FTYPE_DI_V4QI_V4QI]
10433 = build_function_type_list (intDI_type_node,
10434 intDI_type_node, V4QI_type_node, V4QI_type_node,
10437 types[MIPS_DI_FTYPE_DI_V2HI_V2HI]
10438 = build_function_type_list (intDI_type_node,
10439 intDI_type_node, V2HI_type_node, V2HI_type_node,
10442 types[MIPS_DI_FTYPE_DI_SI_SI]
10443 = build_function_type_list (intDI_type_node,
10444 intDI_type_node, intSI_type_node, intSI_type_node,
10447 types[MIPS_V4QI_FTYPE_SI]
10448 = build_function_type_list (V4QI_type_node,
10452 types[MIPS_V2HI_FTYPE_SI]
10453 = build_function_type_list (V2HI_type_node,
10457 types[MIPS_VOID_FTYPE_V4QI_V4QI]
10458 = build_function_type_list (void_type_node,
10459 V4QI_type_node, V4QI_type_node,
10462 types[MIPS_SI_FTYPE_V4QI_V4QI]
10463 = build_function_type_list (intSI_type_node,
10464 V4QI_type_node, V4QI_type_node,
10467 types[MIPS_VOID_FTYPE_V2HI_V2HI]
10468 = build_function_type_list (void_type_node,
10469 V2HI_type_node, V2HI_type_node,
10472 types[MIPS_SI_FTYPE_DI_SI]
10473 = build_function_type_list (intSI_type_node,
10474 intDI_type_node, intSI_type_node,
10477 types[MIPS_DI_FTYPE_DI_SI]
10478 = build_function_type_list (intDI_type_node,
10479 intDI_type_node, intSI_type_node,
10482 types[MIPS_VOID_FTYPE_SI_SI]
10483 = build_function_type_list (void_type_node,
10484 intSI_type_node, intSI_type_node,
10487 types[MIPS_SI_FTYPE_PTR_SI]
10488 = build_function_type_list (intSI_type_node,
10489 ptr_type_node, intSI_type_node,
10492 types[MIPS_SI_FTYPE_VOID]
10493 = build_function_type (intSI_type_node, void_list_node);
10496 /* Iterate through all of the bdesc arrays, initializing all of the
10497 builtin functions. */
10500 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
10502 if (m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
10503 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
10504 if ((d->target_flags & target_flags) == d->target_flags)
10505 lang_hooks.builtin_function (d->name, types[d->function_type],
10506 d - m->bdesc + offset,
10507 BUILT_IN_MD, NULL, NULL);
10512 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
10513 .md pattern and ARGLIST is the list of function arguments. TARGET,
10514 if nonnull, suggests a good place to put the result.
10515 HAS_TARGET indicates the function must return something. */
10518 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree arglist,
10521 rtx ops[MAX_RECOG_OPERANDS];
10526 /* We save target to ops[0]. */
10527 ops[0] = mips_prepare_builtin_target (icode, 0, target);
10531 /* We need to test if arglist is not zero. Some instructions have extra
10532 clobber registers. */
10533 for (; i < insn_data[icode].n_operands && arglist != 0; i++)
10534 ops[i] = mips_prepare_builtin_arg (icode, i, &arglist);
10539 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
10543 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
10547 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
10551 gcc_unreachable ();
10556 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
10557 function (TYPE says which). ARGLIST is the list of arguments to the
10558 function, ICODE is the instruction that should be used to compare
10559 the first two arguments, and COND is the condition it should test.
10560 TARGET, if nonnull, suggests a good place to put the result. */
10563 mips_expand_builtin_movtf (enum mips_builtin_type type,
10564 enum insn_code icode, enum mips_fp_condition cond,
10565 rtx target, tree arglist)
10567 rtx cmp_result, op0, op1;
10569 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10570 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
10571 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
10572 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
10574 icode = CODE_FOR_mips_cond_move_tf_ps;
10575 target = mips_prepare_builtin_target (icode, 0, target);
10576 if (type == MIPS_BUILTIN_MOVT)
10578 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
10579 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
10583 op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
10584 op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
10586 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
10590 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
10591 into TARGET otherwise. Return TARGET. */
10594 mips_builtin_branch_and_move (rtx condition, rtx target,
10595 rtx value_if_true, rtx value_if_false)
10597 rtx true_label, done_label;
10599 true_label = gen_label_rtx ();
10600 done_label = gen_label_rtx ();
10602 /* First assume that CONDITION is false. */
10603 emit_move_insn (target, value_if_false);
10605 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
10606 emit_jump_insn (gen_condjump (condition, true_label));
10607 emit_jump_insn (gen_jump (done_label));
10610 /* Fix TARGET if CONDITION is true. */
10611 emit_label (true_label);
10612 emit_move_insn (target, value_if_true);
10614 emit_label (done_label);
10618 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
10619 of the comparison instruction and COND is the condition it should test.
10620 ARGLIST is the list of function arguments and TARGET, if nonnull,
10621 suggests a good place to put the boolean result. */
10624 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
10625 enum insn_code icode, enum mips_fp_condition cond,
10626 rtx target, tree arglist)
10628 rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
10631 if (target == 0 || GET_MODE (target) != SImode)
10632 target = gen_reg_rtx (SImode);
10634 /* Prepare the operands to the comparison. */
10635 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10636 for (i = 1; i < insn_data[icode].n_operands - 1; i++)
10637 ops[i] = mips_prepare_builtin_arg (icode, i, &arglist);
10639 switch (insn_data[icode].n_operands)
10642 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond)));
10646 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2],
10647 ops[3], ops[4], GEN_INT (cond)));
10651 gcc_unreachable ();
10654 /* If the comparison sets more than one register, we define the result
10655 to be 0 if all registers are false and -1 if all registers are true.
10656 The value of the complete result is indeterminate otherwise. */
10657 switch (builtin_type)
10659 case MIPS_BUILTIN_CMP_ALL:
10660 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
10661 return mips_builtin_branch_and_move (condition, target,
10662 const0_rtx, const1_rtx);
10664 case MIPS_BUILTIN_CMP_UPPER:
10665 case MIPS_BUILTIN_CMP_LOWER:
10666 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
10667 condition = gen_single_cc (cmp_result, offset);
10668 return mips_builtin_branch_and_move (condition, target,
10669 const1_rtx, const0_rtx);
10672 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
10673 return mips_builtin_branch_and_move (condition, target,
10674 const1_rtx, const0_rtx);
10678 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
10679 suggests a good place to put the boolean result. */
10682 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
10684 rtx condition, cmp_result;
10687 if (target == 0 || GET_MODE (target) != SImode)
10688 target = gen_reg_rtx (SImode);
10690 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
10692 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
10697 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
10698 return mips_builtin_branch_and_move (condition, target,
10699 const1_rtx, const0_rtx);
10702 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
10703 FIRST is true if this is the first time handling this decl. */
10706 mips_encode_section_info (tree decl, rtx rtl, int first)
10708 default_encode_section_info (decl, rtl, first);
10710 if (TREE_CODE (decl) == FUNCTION_DECL
10711 && lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
10713 rtx symbol = XEXP (rtl, 0);
10714 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
10718 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. PIC_FUNCTION_ADDR_REGNUM is live
10719 on entry to a function when generating -mshared abicalls code. */
10722 mips_extra_live_on_entry (bitmap regs)
10724 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
10725 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
10729 #include "gt-mips.h"