1 /* Subroutines used for MIPS code generation.
2 Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007
4 Free Software Foundation, Inc.
5 Contributed by A. Lichnewsky, lich@inria.inria.fr.
6 Changes by Michael Meissner, meissner@osf.org.
7 64-bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
8 Brendan Eich, brendan@microunity.com.
10 This file is part of GCC.
12 GCC is free software; you can redistribute it and/or modify
13 it under the terms of the GNU General Public License as published by
14 the Free Software Foundation; either version 2, or (at your option)
17 GCC is distributed in the hope that it will be useful,
18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with GCC; see the file COPYING. If not, write to
24 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
25 Boston, MA 02110-1301, USA. */
29 #include "coretypes.h"
34 #include "hard-reg-set.h"
36 #include "insn-config.h"
37 #include "conditions.h"
38 #include "insn-attr.h"
54 #include "target-def.h"
55 #include "integrate.h"
56 #include "langhooks.h"
57 #include "cfglayout.h"
58 #include "sched-int.h"
59 #include "tree-gimple.h"
62 /* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF. */
63 #define UNSPEC_ADDRESS_P(X) \
64 (GET_CODE (X) == UNSPEC \
65 && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST \
66 && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
68 /* Extract the symbol or label from UNSPEC wrapper X. */
69 #define UNSPEC_ADDRESS(X) \
72 /* Extract the symbol type from UNSPEC wrapper X. */
73 #define UNSPEC_ADDRESS_TYPE(X) \
74 ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
76 /* The maximum distance between the top of the stack frame and the
77 value $sp has when we save and restore registers.
79 The value for normal-mode code must be a SMALL_OPERAND and must
80 preserve the maximum stack alignment. We therefore use a value
81 of 0x7ff0 in this case.
83 MIPS16e SAVE and RESTORE instructions can adjust the stack pointer by
84 up to 0x7f8 bytes and can usually save or restore all the registers
85 that we need to save or restore. (Note that we can only use these
86 instructions for o32, for which the stack alignment is 8 bytes.)
88 We use a maximum gap of 0x100 or 0x400 for MIPS16 code when SAVE and
89 RESTORE are not available. We can then use unextended instructions
90 to save and restore registers, and to allocate and deallocate the top
92 #define MIPS_MAX_FIRST_STACK_STEP \
93 (!TARGET_MIPS16 ? 0x7ff0 \
94 : GENERATE_MIPS16E_SAVE_RESTORE ? 0x7f8 \
95 : TARGET_64BIT ? 0x100 : 0x400)
97 /* True if INSN is a mips.md pattern or asm statement. */
98 #define USEFUL_INSN_P(INSN) \
100 && GET_CODE (PATTERN (INSN)) != USE \
101 && GET_CODE (PATTERN (INSN)) != CLOBBER \
102 && GET_CODE (PATTERN (INSN)) != ADDR_VEC \
103 && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
105 /* If INSN is a delayed branch sequence, return the first instruction
106 in the sequence, otherwise return INSN itself. */
107 #define SEQ_BEGIN(INSN) \
108 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
109 ? XVECEXP (PATTERN (INSN), 0, 0) \
112 /* Likewise for the last instruction in a delayed branch sequence. */
113 #define SEQ_END(INSN) \
114 (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE \
115 ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1) \
118 /* Execute the following loop body with SUBINSN set to each instruction
119 between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive. */
120 #define FOR_EACH_SUBINSN(SUBINSN, INSN) \
121 for ((SUBINSN) = SEQ_BEGIN (INSN); \
122 (SUBINSN) != NEXT_INSN (SEQ_END (INSN)); \
123 (SUBINSN) = NEXT_INSN (SUBINSN))
125 /* True if bit BIT is set in VALUE. */
126 #define BITSET_P(VALUE, BIT) (((VALUE) & (1 << (BIT))) != 0)
128 /* Classifies an address.
131 A natural register + offset address. The register satisfies
132 mips_valid_base_register_p and the offset is a const_arith_operand.
135 A LO_SUM rtx. The first operand is a valid base register and
136 the second operand is a symbolic address.
139 A signed 16-bit constant address.
142 A constant symbolic address (equivalent to CONSTANT_SYMBOLIC). */
143 enum mips_address_type {
150 /* Classifies the prototype of a builtin function. */
151 enum mips_function_type
153 MIPS_V2SF_FTYPE_V2SF,
154 MIPS_V2SF_FTYPE_V2SF_V2SF,
155 MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
156 MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
157 MIPS_V2SF_FTYPE_SF_SF,
158 MIPS_INT_FTYPE_V2SF_V2SF,
159 MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
160 MIPS_INT_FTYPE_SF_SF,
161 MIPS_INT_FTYPE_DF_DF,
168 /* For MIPS DSP ASE */
170 MIPS_DI_FTYPE_DI_SI_SI,
171 MIPS_DI_FTYPE_DI_V2HI_V2HI,
172 MIPS_DI_FTYPE_DI_V4QI_V4QI,
174 MIPS_SI_FTYPE_PTR_SI,
178 MIPS_SI_FTYPE_V2HI_V2HI,
180 MIPS_SI_FTYPE_V4QI_V4QI,
183 MIPS_V2HI_FTYPE_SI_SI,
184 MIPS_V2HI_FTYPE_V2HI,
185 MIPS_V2HI_FTYPE_V2HI_SI,
186 MIPS_V2HI_FTYPE_V2HI_V2HI,
187 MIPS_V2HI_FTYPE_V4QI,
188 MIPS_V2HI_FTYPE_V4QI_V2HI,
190 MIPS_V4QI_FTYPE_V2HI_V2HI,
191 MIPS_V4QI_FTYPE_V4QI_SI,
192 MIPS_V4QI_FTYPE_V4QI_V4QI,
193 MIPS_VOID_FTYPE_SI_SI,
194 MIPS_VOID_FTYPE_V2HI_V2HI,
195 MIPS_VOID_FTYPE_V4QI_V4QI,
197 /* For MIPS DSP REV 2 ASE. */
198 MIPS_V4QI_FTYPE_V4QI,
199 MIPS_SI_FTYPE_SI_SI_SI,
200 MIPS_DI_FTYPE_DI_USI_USI,
202 MIPS_DI_FTYPE_USI_USI,
203 MIPS_V2HI_FTYPE_SI_SI_SI,
209 /* Specifies how a builtin function should be converted into rtl. */
210 enum mips_builtin_type
212 /* The builtin corresponds directly to an .md pattern. The return
213 value is mapped to operand 0 and the arguments are mapped to
214 operands 1 and above. */
217 /* The builtin corresponds directly to an .md pattern. There is no return
218 value and the arguments are mapped to operands 0 and above. */
219 MIPS_BUILTIN_DIRECT_NO_TARGET,
221 /* The builtin corresponds to a comparison instruction followed by
222 a mips_cond_move_tf_ps pattern. The first two arguments are the
223 values to compare and the second two arguments are the vector
224 operands for the movt.ps or movf.ps instruction (in assembly order). */
228 /* The builtin corresponds to a V2SF comparison instruction. Operand 0
229 of this instruction is the result of the comparison, which has mode
230 CCV2 or CCV4. The function arguments are mapped to operands 1 and
231 above. The function's return value is an SImode boolean that is
232 true under the following conditions:
234 MIPS_BUILTIN_CMP_ANY: one of the registers is true
235 MIPS_BUILTIN_CMP_ALL: all of the registers are true
236 MIPS_BUILTIN_CMP_LOWER: the first register is true
237 MIPS_BUILTIN_CMP_UPPER: the second register is true. */
238 MIPS_BUILTIN_CMP_ANY,
239 MIPS_BUILTIN_CMP_ALL,
240 MIPS_BUILTIN_CMP_UPPER,
241 MIPS_BUILTIN_CMP_LOWER,
243 /* As above, but the instruction only sets a single $fcc register. */
244 MIPS_BUILTIN_CMP_SINGLE,
246 /* For generating bposge32 branch instructions in MIPS32 DSP ASE. */
247 MIPS_BUILTIN_BPOSGE32
250 /* Invokes MACRO (COND) for each c.cond.fmt condition. */
251 #define MIPS_FP_CONDITIONS(MACRO) \
269 /* Enumerates the codes above as MIPS_FP_COND_<X>. */
270 #define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
271 enum mips_fp_condition {
272 MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
275 /* Index X provides the string representation of MIPS_FP_COND_<X>. */
276 #define STRINGIFY(X) #X
277 static const char *const mips_fp_conditions[] = {
278 MIPS_FP_CONDITIONS (STRINGIFY)
281 /* A function to save or store a register. The first argument is the
282 register and the second is the stack slot. */
283 typedef void (*mips_save_restore_fn) (rtx, rtx);
285 struct mips16_constant;
286 struct mips_arg_info;
287 struct mips_address_info;
288 struct mips_integer_op;
291 static enum mips_symbol_type mips_classify_symbol (rtx);
292 static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
293 static bool mips_symbolic_address_p (enum mips_symbol_type, enum machine_mode);
294 static bool mips_classify_address (struct mips_address_info *, rtx,
295 enum machine_mode, int);
296 static bool mips_cannot_force_const_mem (rtx);
297 static bool mips_use_blocks_for_constant_p (enum machine_mode, rtx);
298 static int mips_symbol_insns (enum mips_symbol_type);
299 static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
300 static rtx mips_force_temporary (rtx, rtx);
301 static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
302 static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
303 static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
304 static unsigned int mips_build_lower (struct mips_integer_op *,
305 unsigned HOST_WIDE_INT);
306 static unsigned int mips_build_integer (struct mips_integer_op *,
307 unsigned HOST_WIDE_INT);
308 static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
309 static int m16_check_op (rtx, int, int, int);
310 static bool mips_rtx_costs (rtx, int, int, int *);
311 static int mips_address_cost (rtx);
312 static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
313 static void mips_load_call_address (rtx, rtx, int);
314 static bool mips_function_ok_for_sibcall (tree, tree);
315 static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
316 static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
317 static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
318 static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
319 tree, int, struct mips_arg_info *);
320 static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
321 static void mips_set_architecture (const struct mips_cpu_info *);
322 static void mips_set_tune (const struct mips_cpu_info *);
323 static bool mips_handle_option (size_t, const char *, int);
324 static struct machine_function *mips_init_machine_status (void);
325 static void print_operand_reloc (FILE *, rtx, const char **);
326 static void mips_file_start (void);
327 static bool mips_rewrite_small_data_p (rtx);
328 static int mips_small_data_pattern_1 (rtx *, void *);
329 static int mips_rewrite_small_data_1 (rtx *, void *);
330 static bool mips_function_has_gp_insn (void);
331 static unsigned int mips_global_pointer (void);
332 static bool mips_save_reg_p (unsigned int);
333 static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
334 mips_save_restore_fn);
335 static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
336 static void mips_output_cplocal (void);
337 static void mips_emit_loadgp (void);
338 static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
339 static void mips_set_frame_expr (rtx);
340 static rtx mips_frame_set (rtx, rtx);
341 static void mips_save_reg (rtx, rtx);
342 static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
343 static void mips_restore_reg (rtx, rtx);
344 static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
345 HOST_WIDE_INT, tree);
346 static int symbolic_expression_p (rtx);
347 static section *mips_select_rtx_section (enum machine_mode, rtx,
348 unsigned HOST_WIDE_INT);
349 static section *mips_function_rodata_section (tree);
350 static bool mips_in_small_data_p (tree);
351 static bool mips_use_anchors_for_symbol_p (rtx);
352 static int mips_fpr_return_fields (tree, tree *);
353 static bool mips_return_in_msb (tree);
354 static rtx mips_return_fpr_pair (enum machine_mode mode,
355 enum machine_mode mode1, HOST_WIDE_INT,
356 enum machine_mode mode2, HOST_WIDE_INT);
357 static rtx mips16_gp_pseudo_reg (void);
358 static void mips16_fp_args (FILE *, int, int);
359 static void build_mips16_function_stub (FILE *);
360 static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
361 static void dump_constants (struct mips16_constant *, rtx);
362 static int mips16_insn_length (rtx);
363 static int mips16_rewrite_pool_refs (rtx *, void *);
364 static void mips16_lay_out_constants (void);
365 static void mips_sim_reset (struct mips_sim *);
366 static void mips_sim_init (struct mips_sim *, state_t);
367 static void mips_sim_next_cycle (struct mips_sim *);
368 static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
369 static int mips_sim_wait_regs_2 (rtx *, void *);
370 static void mips_sim_wait_regs_1 (rtx *, void *);
371 static void mips_sim_wait_regs (struct mips_sim *, rtx);
372 static void mips_sim_wait_units (struct mips_sim *, rtx);
373 static void mips_sim_wait_insn (struct mips_sim *, rtx);
374 static void mips_sim_record_set (rtx, rtx, void *);
375 static void mips_sim_issue_insn (struct mips_sim *, rtx);
376 static void mips_sim_issue_nop (struct mips_sim *);
377 static void mips_sim_finish_insn (struct mips_sim *, rtx);
378 static void vr4130_avoid_branch_rt_conflict (rtx);
379 static void vr4130_align_insns (void);
380 static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
381 static void mips_avoid_hazards (void);
382 static void mips_reorg (void);
383 static bool mips_strict_matching_cpu_name_p (const char *, const char *);
384 static bool mips_matching_cpu_name_p (const char *, const char *);
385 static const struct mips_cpu_info *mips_parse_cpu (const char *);
386 static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
387 static bool mips_return_in_memory (tree, tree);
388 static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
389 static void mips_macc_chains_record (rtx);
390 static void mips_macc_chains_reorder (rtx *, int);
391 static void vr4130_true_reg_dependence_p_1 (rtx, rtx, void *);
392 static bool vr4130_true_reg_dependence_p (rtx);
393 static bool vr4130_swap_insns_p (rtx, rtx);
394 static void vr4130_reorder (rtx *, int);
395 static void mips_promote_ready (rtx *, int, int);
396 static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
397 static int mips_variable_issue (FILE *, int, rtx, int);
398 static int mips_adjust_cost (rtx, rtx, rtx, int);
399 static int mips_issue_rate (void);
400 static int mips_multipass_dfa_lookahead (void);
401 static void mips_init_libfuncs (void);
402 static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
404 static tree mips_build_builtin_va_list (void);
405 static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
406 static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
408 static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
410 static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
412 static bool mips_valid_pointer_mode (enum machine_mode);
413 static bool mips_vector_mode_supported_p (enum machine_mode);
414 static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree, unsigned int);
415 static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
416 static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
417 static void mips_init_builtins (void);
418 static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree, bool);
419 static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
420 enum insn_code, enum mips_fp_condition,
422 static rtx mips_expand_builtin_compare (enum mips_builtin_type,
423 enum insn_code, enum mips_fp_condition,
425 static rtx mips_expand_builtin_bposge (enum mips_builtin_type, rtx);
426 static void mips_encode_section_info (tree, rtx, int);
427 static void mips_extra_live_on_entry (bitmap);
428 static int mips_comp_type_attributes (tree, tree);
429 static int mips_mode_rep_extended (enum machine_mode, enum machine_mode);
430 static bool mips_offset_within_alignment_p (rtx, HOST_WIDE_INT);
431 static void mips_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
433 /* Structure to be filled in by compute_frame_size with register
434 save masks, and offsets for the current function. */
436 struct mips_frame_info GTY(())
438 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
439 HOST_WIDE_INT var_size; /* # bytes that variables take up */
440 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
441 HOST_WIDE_INT cprestore_size; /* # bytes that the .cprestore slot takes up */
442 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
443 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
444 unsigned int mask; /* mask of saved gp registers */
445 unsigned int fmask; /* mask of saved fp registers */
446 HOST_WIDE_INT gp_save_offset; /* offset from vfp to store gp registers */
447 HOST_WIDE_INT fp_save_offset; /* offset from vfp to store fp registers */
448 HOST_WIDE_INT gp_sp_offset; /* offset from new sp to store gp registers */
449 HOST_WIDE_INT fp_sp_offset; /* offset from new sp to store fp registers */
450 bool initialized; /* true if frame size already calculated */
451 int num_gp; /* number of gp registers saved */
452 int num_fp; /* number of fp registers saved */
455 struct machine_function GTY(()) {
456 /* Pseudo-reg holding the value of $28 in a mips16 function which
457 refers to GP relative global variables. */
458 rtx mips16_gp_pseudo_rtx;
460 /* The number of extra stack bytes taken up by register varargs.
461 This area is allocated by the callee at the very top of the frame. */
464 /* Current frame information, calculated by compute_frame_size. */
465 struct mips_frame_info frame;
467 /* The register to use as the global pointer within this function. */
468 unsigned int global_pointer;
470 /* True if mips_adjust_insn_length should ignore an instruction's
472 bool ignore_hazard_length_p;
474 /* True if the whole function is suitable for .set noreorder and
476 bool all_noreorder_p;
478 /* True if the function is known to have an instruction that needs $gp. */
482 /* Information about a single argument. */
485 /* True if the argument is passed in a floating-point register, or
486 would have been if we hadn't run out of registers. */
489 /* The number of words passed in registers, rounded up. */
490 unsigned int reg_words;
492 /* For EABI, the offset of the first register from GP_ARG_FIRST or
493 FP_ARG_FIRST. For other ABIs, the offset of the first register from
494 the start of the ABI's argument structure (see the CUMULATIVE_ARGS
495 comment for details).
497 The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
499 unsigned int reg_offset;
501 /* The number of words that must be passed on the stack, rounded up. */
502 unsigned int stack_words;
504 /* The offset from the start of the stack overflow area of the argument's
505 first stack word. Only meaningful when STACK_WORDS is nonzero. */
506 unsigned int stack_offset;
510 /* Information about an address described by mips_address_type.
516 REG is the base register and OFFSET is the constant offset.
519 REG is the register that contains the high part of the address,
520 OFFSET is the symbolic address being referenced and SYMBOL_TYPE
521 is the type of OFFSET's symbol.
524 SYMBOL_TYPE is the type of symbol being referenced. */
526 struct mips_address_info
528 enum mips_address_type type;
531 enum mips_symbol_type symbol_type;
535 /* One stage in a constant building sequence. These sequences have
539 A = A CODE[1] VALUE[1]
540 A = A CODE[2] VALUE[2]
543 where A is an accumulator, each CODE[i] is a binary rtl operation
544 and each VALUE[i] is a constant integer. */
545 struct mips_integer_op {
547 unsigned HOST_WIDE_INT value;
551 /* The largest number of operations needed to load an integer constant.
552 The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
553 When the lowest bit is clear, we can try, but reject a sequence with
554 an extra SLL at the end. */
555 #define MIPS_MAX_INTEGER_OPS 7
557 /* Information about a MIPS16e SAVE or RESTORE instruction. */
558 struct mips16e_save_restore_info {
559 /* The number of argument registers saved by a SAVE instruction.
560 0 for RESTORE instructions. */
563 /* Bit X is set if the instruction saves or restores GPR X. */
566 /* The total number of bytes to allocate. */
570 /* Global variables for machine-dependent things. */
572 /* Threshold for data being put into the small data/bss area, instead
573 of the normal data area. */
574 int mips_section_threshold = -1;
576 /* Count the number of .file directives, so that .loc is up to date. */
577 int num_source_filenames = 0;
579 /* Count the number of sdb related labels are generated (to find block
580 start and end boundaries). */
581 int sdb_label_count = 0;
583 /* Next label # for each statement for Silicon Graphics IRIS systems. */
586 /* Name of the file containing the current function. */
587 const char *current_function_file = "";
589 /* Number of nested .set noreorder, noat, nomacro, and volatile requests. */
595 /* The next branch instruction is a branch likely, not branch normal. */
596 int mips_branch_likely;
598 /* The operands passed to the last cmpMM expander. */
601 /* The target cpu for code generation. */
602 enum processor_type mips_arch;
603 const struct mips_cpu_info *mips_arch_info;
605 /* The target cpu for optimization and scheduling. */
606 enum processor_type mips_tune;
607 const struct mips_cpu_info *mips_tune_info;
609 /* Which instruction set architecture to use. */
612 /* Which ABI to use. */
613 int mips_abi = MIPS_ABI_DEFAULT;
615 /* Cost information to use. */
616 const struct mips_rtx_cost_data *mips_cost;
618 /* Whether we are generating mips16 hard float code. In mips16 mode
619 we always set TARGET_SOFT_FLOAT; this variable is nonzero if
620 -msoft-float was not specified by the user, which means that we
621 should arrange to call mips32 hard floating point code. */
622 int mips16_hard_float;
624 /* The architecture selected by -mipsN. */
625 static const struct mips_cpu_info *mips_isa_info;
627 /* If TRUE, we split addresses into their high and low parts in the RTL. */
628 int mips_split_addresses;
630 /* Mode used for saving/restoring general purpose registers. */
631 static enum machine_mode gpr_mode;
633 /* Array giving truth value on whether or not a given hard register
634 can support a given mode. */
635 char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
637 /* List of all MIPS punctuation characters used by print_operand. */
638 char mips_print_operand_punct[256];
640 /* Map GCC register number to debugger register number. */
641 int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
642 int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
644 /* A copy of the original flag_delayed_branch: see override_options. */
645 static int mips_flag_delayed_branch;
647 static GTY (()) int mips_output_filename_first_time = 1;
649 /* mips_split_p[X] is true if symbols of type X can be split by
650 mips_split_symbol(). */
651 bool mips_split_p[NUM_SYMBOL_TYPES];
653 /* mips_lo_relocs[X] is the relocation to use when a symbol of type X
654 appears in a LO_SUM. It can be null if such LO_SUMs aren't valid or
655 if they are matched by a special .md file pattern. */
656 static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
658 /* Likewise for HIGHs. */
659 static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
661 /* Map hard register number to register class */
662 const enum reg_class mips_regno_to_class[] =
664 LEA_REGS, LEA_REGS, M16_NA_REGS, V1_REG,
665 M16_REGS, M16_REGS, M16_REGS, M16_REGS,
666 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
667 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
668 M16_NA_REGS, M16_NA_REGS, LEA_REGS, LEA_REGS,
669 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
670 T_REG, PIC_FN_ADDR_REG, LEA_REGS, LEA_REGS,
671 LEA_REGS, LEA_REGS, LEA_REGS, LEA_REGS,
672 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
673 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
674 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
675 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
676 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
677 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
678 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
679 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
680 MD0_REG, MD1_REG, NO_REGS, ST_REGS,
681 ST_REGS, ST_REGS, ST_REGS, ST_REGS,
682 ST_REGS, ST_REGS, ST_REGS, NO_REGS,
683 NO_REGS, ALL_REGS, ALL_REGS, NO_REGS,
684 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
685 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
686 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
687 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
688 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
689 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
690 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
691 COP0_REGS, COP0_REGS, COP0_REGS, COP0_REGS,
692 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
693 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
694 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
695 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
696 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
697 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
698 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
699 COP2_REGS, COP2_REGS, COP2_REGS, COP2_REGS,
700 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
701 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
702 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
703 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
704 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
705 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
706 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
707 COP3_REGS, COP3_REGS, COP3_REGS, COP3_REGS,
708 DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS, DSP_ACC_REGS,
709 DSP_ACC_REGS, DSP_ACC_REGS, ALL_REGS, ALL_REGS,
710 ALL_REGS, ALL_REGS, ALL_REGS, ALL_REGS
713 /* Table of machine dependent attributes. */
714 const struct attribute_spec mips_attribute_table[] =
716 { "long_call", 0, 0, false, true, true, NULL },
717 { "far", 0, 0, false, true, true, NULL },
718 { "near", 0, 0, false, true, true, NULL },
719 { NULL, 0, 0, false, false, false, NULL }
722 /* A table describing all the processors gcc knows about. Names are
723 matched in the order listed. The first mention of an ISA level is
724 taken as the canonical name for that ISA.
726 To ease comparison, please keep this table in the same order as
727 gas's mips_cpu_info_table[]. Please also make sure that
728 MIPS_ISA_LEVEL_SPEC handles all -march options correctly. */
729 const struct mips_cpu_info mips_cpu_info_table[] = {
730 /* Entries for generic ISAs */
731 { "mips1", PROCESSOR_R3000, 1 },
732 { "mips2", PROCESSOR_R6000, 2 },
733 { "mips3", PROCESSOR_R4000, 3 },
734 { "mips4", PROCESSOR_R8000, 4 },
735 { "mips32", PROCESSOR_4KC, 32 },
736 { "mips32r2", PROCESSOR_M4K, 33 },
737 { "mips64", PROCESSOR_5KC, 64 },
740 { "r3000", PROCESSOR_R3000, 1 },
741 { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
742 { "r3900", PROCESSOR_R3900, 1 },
745 { "r6000", PROCESSOR_R6000, 2 },
748 { "r4000", PROCESSOR_R4000, 3 },
749 { "vr4100", PROCESSOR_R4100, 3 },
750 { "vr4111", PROCESSOR_R4111, 3 },
751 { "vr4120", PROCESSOR_R4120, 3 },
752 { "vr4130", PROCESSOR_R4130, 3 },
753 { "vr4300", PROCESSOR_R4300, 3 },
754 { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
755 { "r4600", PROCESSOR_R4600, 3 },
756 { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
757 { "r4650", PROCESSOR_R4650, 3 },
760 { "r8000", PROCESSOR_R8000, 4 },
761 { "vr5000", PROCESSOR_R5000, 4 },
762 { "vr5400", PROCESSOR_R5400, 4 },
763 { "vr5500", PROCESSOR_R5500, 4 },
764 { "rm7000", PROCESSOR_R7000, 4 },
765 { "rm9000", PROCESSOR_R9000, 4 },
768 { "4kc", PROCESSOR_4KC, 32 },
769 { "4km", PROCESSOR_4KC, 32 }, /* = 4kc */
770 { "4kp", PROCESSOR_4KP, 32 },
771 { "4ksc", PROCESSOR_4KC, 32 },
773 /* MIPS32 Release 2 */
774 { "m4k", PROCESSOR_M4K, 33 },
775 { "4kec", PROCESSOR_4KC, 33 },
776 { "4kem", PROCESSOR_4KC, 33 },
777 { "4kep", PROCESSOR_4KP, 33 },
778 { "4ksd", PROCESSOR_4KC, 33 },
780 { "24kc", PROCESSOR_24KC, 33 },
781 { "24kf2_1", PROCESSOR_24KF2_1, 33 },
782 { "24kf", PROCESSOR_24KF2_1, 33 },
783 { "24kf1_1", PROCESSOR_24KF1_1, 33 },
784 { "24kfx", PROCESSOR_24KF1_1, 33 },
785 { "24kx", PROCESSOR_24KF1_1, 33 },
787 { "24kec", PROCESSOR_24KC, 33 }, /* 24K with DSP */
788 { "24kef2_1", PROCESSOR_24KF2_1, 33 },
789 { "24kef", PROCESSOR_24KF2_1, 33 },
790 { "24kef1_1", PROCESSOR_24KF1_1, 33 },
791 { "24kefx", PROCESSOR_24KF1_1, 33 },
792 { "24kex", PROCESSOR_24KF1_1, 33 },
794 { "34kc", PROCESSOR_24KC, 33 }, /* 34K with MT/DSP */
795 { "34kf2_1", PROCESSOR_24KF2_1, 33 },
796 { "34kf", PROCESSOR_24KF2_1, 33 },
797 { "34kf1_1", PROCESSOR_24KF1_1, 33 },
798 { "34kfx", PROCESSOR_24KF1_1, 33 },
799 { "34kx", PROCESSOR_24KF1_1, 33 },
801 { "74kc", PROCESSOR_74KC, 33 }, /* 74K with DSPr2 */
802 { "74kf2_1", PROCESSOR_74KF2_1, 33 },
803 { "74kf", PROCESSOR_74KF2_1, 33 },
804 { "74kf1_1", PROCESSOR_74KF1_1, 33 },
805 { "74kfx", PROCESSOR_74KF1_1, 33 },
806 { "74kx", PROCESSOR_74KF1_1, 33 },
807 { "74kf3_2", PROCESSOR_74KF3_2, 33 },
810 { "5kc", PROCESSOR_5KC, 64 },
811 { "5kf", PROCESSOR_5KF, 64 },
812 { "20kc", PROCESSOR_20KC, 64 },
813 { "sb1", PROCESSOR_SB1, 64 },
814 { "sb1a", PROCESSOR_SB1A, 64 },
815 { "sr71000", PROCESSOR_SR71000, 64 },
821 /* Default costs. If these are used for a processor we should look
822 up the actual costs. */
823 #define DEFAULT_COSTS COSTS_N_INSNS (6), /* fp_add */ \
824 COSTS_N_INSNS (7), /* fp_mult_sf */ \
825 COSTS_N_INSNS (8), /* fp_mult_df */ \
826 COSTS_N_INSNS (23), /* fp_div_sf */ \
827 COSTS_N_INSNS (36), /* fp_div_df */ \
828 COSTS_N_INSNS (10), /* int_mult_si */ \
829 COSTS_N_INSNS (10), /* int_mult_di */ \
830 COSTS_N_INSNS (69), /* int_div_si */ \
831 COSTS_N_INSNS (69), /* int_div_di */ \
832 2, /* branch_cost */ \
833 4 /* memory_latency */
835 /* Need to replace these with the costs of calling the appropriate
837 #define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */ \
838 COSTS_N_INSNS (256), /* fp_mult_sf */ \
839 COSTS_N_INSNS (256), /* fp_mult_df */ \
840 COSTS_N_INSNS (256), /* fp_div_sf */ \
841 COSTS_N_INSNS (256) /* fp_div_df */
843 static struct mips_rtx_cost_data const mips_rtx_cost_optimize_size =
845 COSTS_N_INSNS (1), /* fp_add */
846 COSTS_N_INSNS (1), /* fp_mult_sf */
847 COSTS_N_INSNS (1), /* fp_mult_df */
848 COSTS_N_INSNS (1), /* fp_div_sf */
849 COSTS_N_INSNS (1), /* fp_div_df */
850 COSTS_N_INSNS (1), /* int_mult_si */
851 COSTS_N_INSNS (1), /* int_mult_di */
852 COSTS_N_INSNS (1), /* int_div_si */
853 COSTS_N_INSNS (1), /* int_div_di */
855 4 /* memory_latency */
858 static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
861 COSTS_N_INSNS (2), /* fp_add */
862 COSTS_N_INSNS (4), /* fp_mult_sf */
863 COSTS_N_INSNS (5), /* fp_mult_df */
864 COSTS_N_INSNS (12), /* fp_div_sf */
865 COSTS_N_INSNS (19), /* fp_div_df */
866 COSTS_N_INSNS (12), /* int_mult_si */
867 COSTS_N_INSNS (12), /* int_mult_di */
868 COSTS_N_INSNS (35), /* int_div_si */
869 COSTS_N_INSNS (35), /* int_div_di */
871 4 /* memory_latency */
876 COSTS_N_INSNS (6), /* int_mult_si */
877 COSTS_N_INSNS (6), /* int_mult_di */
878 COSTS_N_INSNS (36), /* int_div_si */
879 COSTS_N_INSNS (36), /* int_div_di */
881 4 /* memory_latency */
885 COSTS_N_INSNS (36), /* int_mult_si */
886 COSTS_N_INSNS (36), /* int_mult_di */
887 COSTS_N_INSNS (37), /* int_div_si */
888 COSTS_N_INSNS (37), /* int_div_di */
890 4 /* memory_latency */
894 COSTS_N_INSNS (4), /* int_mult_si */
895 COSTS_N_INSNS (11), /* int_mult_di */
896 COSTS_N_INSNS (36), /* int_div_si */
897 COSTS_N_INSNS (68), /* int_div_di */
899 4 /* memory_latency */
902 COSTS_N_INSNS (4), /* fp_add */
903 COSTS_N_INSNS (4), /* fp_mult_sf */
904 COSTS_N_INSNS (5), /* fp_mult_df */
905 COSTS_N_INSNS (17), /* fp_div_sf */
906 COSTS_N_INSNS (32), /* fp_div_df */
907 COSTS_N_INSNS (4), /* int_mult_si */
908 COSTS_N_INSNS (11), /* int_mult_di */
909 COSTS_N_INSNS (36), /* int_div_si */
910 COSTS_N_INSNS (68), /* int_div_di */
912 4 /* memory_latency */
915 COSTS_N_INSNS (4), /* fp_add */
916 COSTS_N_INSNS (4), /* fp_mult_sf */
917 COSTS_N_INSNS (5), /* fp_mult_df */
918 COSTS_N_INSNS (17), /* fp_div_sf */
919 COSTS_N_INSNS (32), /* fp_div_df */
920 COSTS_N_INSNS (4), /* int_mult_si */
921 COSTS_N_INSNS (7), /* int_mult_di */
922 COSTS_N_INSNS (42), /* int_div_si */
923 COSTS_N_INSNS (72), /* int_div_di */
925 4 /* memory_latency */
929 COSTS_N_INSNS (5), /* int_mult_si */
930 COSTS_N_INSNS (5), /* int_mult_di */
931 COSTS_N_INSNS (41), /* int_div_si */
932 COSTS_N_INSNS (41), /* int_div_di */
934 4 /* memory_latency */
937 COSTS_N_INSNS (8), /* fp_add */
938 COSTS_N_INSNS (8), /* fp_mult_sf */
939 COSTS_N_INSNS (10), /* fp_mult_df */
940 COSTS_N_INSNS (34), /* fp_div_sf */
941 COSTS_N_INSNS (64), /* fp_div_df */
942 COSTS_N_INSNS (5), /* int_mult_si */
943 COSTS_N_INSNS (5), /* int_mult_di */
944 COSTS_N_INSNS (41), /* int_div_si */
945 COSTS_N_INSNS (41), /* int_div_di */
947 4 /* memory_latency */
950 COSTS_N_INSNS (4), /* fp_add */
951 COSTS_N_INSNS (4), /* fp_mult_sf */
952 COSTS_N_INSNS (5), /* fp_mult_df */
953 COSTS_N_INSNS (17), /* fp_div_sf */
954 COSTS_N_INSNS (32), /* fp_div_df */
955 COSTS_N_INSNS (5), /* int_mult_si */
956 COSTS_N_INSNS (5), /* int_mult_di */
957 COSTS_N_INSNS (41), /* int_div_si */
958 COSTS_N_INSNS (41), /* int_div_di */
960 4 /* memory_latency */
964 COSTS_N_INSNS (5), /* int_mult_si */
965 COSTS_N_INSNS (5), /* int_mult_di */
966 COSTS_N_INSNS (41), /* int_div_si */
967 COSTS_N_INSNS (41), /* int_div_di */
969 4 /* memory_latency */
972 COSTS_N_INSNS (8), /* fp_add */
973 COSTS_N_INSNS (8), /* fp_mult_sf */
974 COSTS_N_INSNS (10), /* fp_mult_df */
975 COSTS_N_INSNS (34), /* fp_div_sf */
976 COSTS_N_INSNS (64), /* fp_div_df */
977 COSTS_N_INSNS (5), /* int_mult_si */
978 COSTS_N_INSNS (5), /* int_mult_di */
979 COSTS_N_INSNS (41), /* int_div_si */
980 COSTS_N_INSNS (41), /* int_div_di */
982 4 /* memory_latency */
985 COSTS_N_INSNS (4), /* fp_add */
986 COSTS_N_INSNS (4), /* fp_mult_sf */
987 COSTS_N_INSNS (5), /* fp_mult_df */
988 COSTS_N_INSNS (17), /* fp_div_sf */
989 COSTS_N_INSNS (32), /* fp_div_df */
990 COSTS_N_INSNS (5), /* int_mult_si */
991 COSTS_N_INSNS (5), /* int_mult_di */
992 COSTS_N_INSNS (41), /* int_div_si */
993 COSTS_N_INSNS (41), /* int_div_di */
995 4 /* memory_latency */
998 COSTS_N_INSNS (6), /* fp_add */
999 COSTS_N_INSNS (6), /* fp_mult_sf */
1000 COSTS_N_INSNS (7), /* fp_mult_df */
1001 COSTS_N_INSNS (25), /* fp_div_sf */
1002 COSTS_N_INSNS (48), /* fp_div_df */
1003 COSTS_N_INSNS (5), /* int_mult_si */
1004 COSTS_N_INSNS (5), /* int_mult_di */
1005 COSTS_N_INSNS (41), /* int_div_si */
1006 COSTS_N_INSNS (41), /* int_div_di */
1007 1, /* branch_cost */
1008 4 /* memory_latency */
1014 COSTS_N_INSNS (2), /* fp_add */
1015 COSTS_N_INSNS (4), /* fp_mult_sf */
1016 COSTS_N_INSNS (5), /* fp_mult_df */
1017 COSTS_N_INSNS (12), /* fp_div_sf */
1018 COSTS_N_INSNS (19), /* fp_div_df */
1019 COSTS_N_INSNS (2), /* int_mult_si */
1020 COSTS_N_INSNS (2), /* int_mult_di */
1021 COSTS_N_INSNS (35), /* int_div_si */
1022 COSTS_N_INSNS (35), /* int_div_di */
1023 1, /* branch_cost */
1024 4 /* memory_latency */
1027 COSTS_N_INSNS (3), /* fp_add */
1028 COSTS_N_INSNS (5), /* fp_mult_sf */
1029 COSTS_N_INSNS (6), /* fp_mult_df */
1030 COSTS_N_INSNS (15), /* fp_div_sf */
1031 COSTS_N_INSNS (16), /* fp_div_df */
1032 COSTS_N_INSNS (17), /* int_mult_si */
1033 COSTS_N_INSNS (17), /* int_mult_di */
1034 COSTS_N_INSNS (38), /* int_div_si */
1035 COSTS_N_INSNS (38), /* int_div_di */
1036 2, /* branch_cost */
1037 6 /* memory_latency */
1040 COSTS_N_INSNS (6), /* fp_add */
1041 COSTS_N_INSNS (7), /* fp_mult_sf */
1042 COSTS_N_INSNS (8), /* fp_mult_df */
1043 COSTS_N_INSNS (23), /* fp_div_sf */
1044 COSTS_N_INSNS (36), /* fp_div_df */
1045 COSTS_N_INSNS (10), /* int_mult_si */
1046 COSTS_N_INSNS (10), /* int_mult_di */
1047 COSTS_N_INSNS (69), /* int_div_si */
1048 COSTS_N_INSNS (69), /* int_div_di */
1049 2, /* branch_cost */
1050 6 /* memory_latency */
1062 /* The only costs that appear to be updated here are
1063 integer multiplication. */
1065 COSTS_N_INSNS (4), /* int_mult_si */
1066 COSTS_N_INSNS (6), /* int_mult_di */
1067 COSTS_N_INSNS (69), /* int_div_si */
1068 COSTS_N_INSNS (69), /* int_div_di */
1069 1, /* branch_cost */
1070 4 /* memory_latency */
1082 COSTS_N_INSNS (6), /* fp_add */
1083 COSTS_N_INSNS (4), /* fp_mult_sf */
1084 COSTS_N_INSNS (5), /* fp_mult_df */
1085 COSTS_N_INSNS (23), /* fp_div_sf */
1086 COSTS_N_INSNS (36), /* fp_div_df */
1087 COSTS_N_INSNS (5), /* int_mult_si */
1088 COSTS_N_INSNS (5), /* int_mult_di */
1089 COSTS_N_INSNS (36), /* int_div_si */
1090 COSTS_N_INSNS (36), /* int_div_di */
1091 1, /* branch_cost */
1092 4 /* memory_latency */
1095 COSTS_N_INSNS (6), /* fp_add */
1096 COSTS_N_INSNS (5), /* fp_mult_sf */
1097 COSTS_N_INSNS (6), /* fp_mult_df */
1098 COSTS_N_INSNS (30), /* fp_div_sf */
1099 COSTS_N_INSNS (59), /* fp_div_df */
1100 COSTS_N_INSNS (3), /* int_mult_si */
1101 COSTS_N_INSNS (4), /* int_mult_di */
1102 COSTS_N_INSNS (42), /* int_div_si */
1103 COSTS_N_INSNS (74), /* int_div_di */
1104 1, /* branch_cost */
1105 4 /* memory_latency */
1108 COSTS_N_INSNS (6), /* fp_add */
1109 COSTS_N_INSNS (5), /* fp_mult_sf */
1110 COSTS_N_INSNS (6), /* fp_mult_df */
1111 COSTS_N_INSNS (30), /* fp_div_sf */
1112 COSTS_N_INSNS (59), /* fp_div_df */
1113 COSTS_N_INSNS (5), /* int_mult_si */
1114 COSTS_N_INSNS (9), /* int_mult_di */
1115 COSTS_N_INSNS (42), /* int_div_si */
1116 COSTS_N_INSNS (74), /* int_div_di */
1117 1, /* branch_cost */
1118 4 /* memory_latency */
1121 /* The only costs that are changed here are
1122 integer multiplication. */
1123 COSTS_N_INSNS (6), /* fp_add */
1124 COSTS_N_INSNS (7), /* fp_mult_sf */
1125 COSTS_N_INSNS (8), /* fp_mult_df */
1126 COSTS_N_INSNS (23), /* fp_div_sf */
1127 COSTS_N_INSNS (36), /* fp_div_df */
1128 COSTS_N_INSNS (5), /* int_mult_si */
1129 COSTS_N_INSNS (9), /* int_mult_di */
1130 COSTS_N_INSNS (69), /* int_div_si */
1131 COSTS_N_INSNS (69), /* int_div_di */
1132 1, /* branch_cost */
1133 4 /* memory_latency */
1139 /* The only costs that are changed here are
1140 integer multiplication. */
1141 COSTS_N_INSNS (6), /* fp_add */
1142 COSTS_N_INSNS (7), /* fp_mult_sf */
1143 COSTS_N_INSNS (8), /* fp_mult_df */
1144 COSTS_N_INSNS (23), /* fp_div_sf */
1145 COSTS_N_INSNS (36), /* fp_div_df */
1146 COSTS_N_INSNS (3), /* int_mult_si */
1147 COSTS_N_INSNS (8), /* int_mult_di */
1148 COSTS_N_INSNS (69), /* int_div_si */
1149 COSTS_N_INSNS (69), /* int_div_di */
1150 1, /* branch_cost */
1151 4 /* memory_latency */
1154 /* These costs are the same as the SB-1A below. */
1155 COSTS_N_INSNS (4), /* fp_add */
1156 COSTS_N_INSNS (4), /* fp_mult_sf */
1157 COSTS_N_INSNS (4), /* fp_mult_df */
1158 COSTS_N_INSNS (24), /* fp_div_sf */
1159 COSTS_N_INSNS (32), /* fp_div_df */
1160 COSTS_N_INSNS (3), /* int_mult_si */
1161 COSTS_N_INSNS (4), /* int_mult_di */
1162 COSTS_N_INSNS (36), /* int_div_si */
1163 COSTS_N_INSNS (68), /* int_div_di */
1164 1, /* branch_cost */
1165 4 /* memory_latency */
1168 /* These costs are the same as the SB-1 above. */
1169 COSTS_N_INSNS (4), /* fp_add */
1170 COSTS_N_INSNS (4), /* fp_mult_sf */
1171 COSTS_N_INSNS (4), /* fp_mult_df */
1172 COSTS_N_INSNS (24), /* fp_div_sf */
1173 COSTS_N_INSNS (32), /* fp_div_df */
1174 COSTS_N_INSNS (3), /* int_mult_si */
1175 COSTS_N_INSNS (4), /* int_mult_di */
1176 COSTS_N_INSNS (36), /* int_div_si */
1177 COSTS_N_INSNS (68), /* int_div_di */
1178 1, /* branch_cost */
1179 4 /* memory_latency */
1186 /* If a MIPS16e SAVE or RESTORE instruction saves or restores register
1187 mips16e_s2_s8_regs[X], it must also save the registers in indexes
1188 X + 1 onwards. Likewise mips16e_a0_a3_regs. */
1189 static const unsigned char mips16e_s2_s8_regs[] = {
1190 30, 23, 22, 21, 20, 19, 18
1192 static const unsigned char mips16e_a0_a3_regs[] = {
1196 /* A list of the registers that can be saved by the MIPS16e SAVE instruction,
1197 ordered from the uppermost in memory to the lowest in memory. */
1198 static const unsigned char mips16e_save_restore_regs[] = {
1199 31, 30, 23, 22, 21, 20, 19, 18, 17, 16, 7, 6, 5, 4
1202 /* Nonzero if -march should decide the default value of MASK_SOFT_FLOAT. */
1203 #ifndef MIPS_MARCH_CONTROLS_SOFT_FLOAT
1204 #define MIPS_MARCH_CONTROLS_SOFT_FLOAT 0
1207 /* Initialize the GCC target structure. */
1208 #undef TARGET_ASM_ALIGNED_HI_OP
1209 #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1210 #undef TARGET_ASM_ALIGNED_SI_OP
1211 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1212 #undef TARGET_ASM_ALIGNED_DI_OP
1213 #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1215 #undef TARGET_ASM_FUNCTION_PROLOGUE
1216 #define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
1217 #undef TARGET_ASM_FUNCTION_EPILOGUE
1218 #define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
1219 #undef TARGET_ASM_SELECT_RTX_SECTION
1220 #define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
1221 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
1222 #define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
1224 #undef TARGET_SCHED_REORDER
1225 #define TARGET_SCHED_REORDER mips_sched_reorder
1226 #undef TARGET_SCHED_VARIABLE_ISSUE
1227 #define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
1228 #undef TARGET_SCHED_ADJUST_COST
1229 #define TARGET_SCHED_ADJUST_COST mips_adjust_cost
1230 #undef TARGET_SCHED_ISSUE_RATE
1231 #define TARGET_SCHED_ISSUE_RATE mips_issue_rate
1232 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1233 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1234 mips_multipass_dfa_lookahead
1236 #undef TARGET_DEFAULT_TARGET_FLAGS
1237 #define TARGET_DEFAULT_TARGET_FLAGS \
1239 | TARGET_CPU_DEFAULT \
1240 | TARGET_ENDIAN_DEFAULT \
1241 | TARGET_FP_EXCEPTIONS_DEFAULT \
1242 | MASK_CHECK_ZERO_DIV \
1244 #undef TARGET_HANDLE_OPTION
1245 #define TARGET_HANDLE_OPTION mips_handle_option
1247 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1248 #define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
1250 #undef TARGET_VALID_POINTER_MODE
1251 #define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
1252 #undef TARGET_RTX_COSTS
1253 #define TARGET_RTX_COSTS mips_rtx_costs
1254 #undef TARGET_ADDRESS_COST
1255 #define TARGET_ADDRESS_COST mips_address_cost
1257 #undef TARGET_IN_SMALL_DATA_P
1258 #define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
1260 #undef TARGET_MACHINE_DEPENDENT_REORG
1261 #define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
1263 #undef TARGET_ASM_FILE_START
1264 #define TARGET_ASM_FILE_START mips_file_start
1265 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
1266 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
1268 #undef TARGET_INIT_LIBFUNCS
1269 #define TARGET_INIT_LIBFUNCS mips_init_libfuncs
1271 #undef TARGET_BUILD_BUILTIN_VA_LIST
1272 #define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
1273 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1274 #define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
1276 #undef TARGET_PROMOTE_FUNCTION_ARGS
1277 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
1278 #undef TARGET_PROMOTE_FUNCTION_RETURN
1279 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
1280 #undef TARGET_PROMOTE_PROTOTYPES
1281 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1283 #undef TARGET_RETURN_IN_MEMORY
1284 #define TARGET_RETURN_IN_MEMORY mips_return_in_memory
1285 #undef TARGET_RETURN_IN_MSB
1286 #define TARGET_RETURN_IN_MSB mips_return_in_msb
1288 #undef TARGET_ASM_OUTPUT_MI_THUNK
1289 #define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
1290 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1291 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
1293 #undef TARGET_SETUP_INCOMING_VARARGS
1294 #define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
1295 #undef TARGET_STRICT_ARGUMENT_NAMING
1296 #define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
1297 #undef TARGET_MUST_PASS_IN_STACK
1298 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
1299 #undef TARGET_PASS_BY_REFERENCE
1300 #define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
1301 #undef TARGET_CALLEE_COPIES
1302 #define TARGET_CALLEE_COPIES mips_callee_copies
1303 #undef TARGET_ARG_PARTIAL_BYTES
1304 #define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
1306 #undef TARGET_MODE_REP_EXTENDED
1307 #define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
1309 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1310 #define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
1312 #undef TARGET_INIT_BUILTINS
1313 #define TARGET_INIT_BUILTINS mips_init_builtins
1314 #undef TARGET_EXPAND_BUILTIN
1315 #define TARGET_EXPAND_BUILTIN mips_expand_builtin
1317 #undef TARGET_HAVE_TLS
1318 #define TARGET_HAVE_TLS HAVE_AS_TLS
1320 #undef TARGET_CANNOT_FORCE_CONST_MEM
1321 #define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
1323 #undef TARGET_ENCODE_SECTION_INFO
1324 #define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
1326 #undef TARGET_ATTRIBUTE_TABLE
1327 #define TARGET_ATTRIBUTE_TABLE mips_attribute_table
1329 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1330 #define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
1332 #undef TARGET_MIN_ANCHOR_OFFSET
1333 #define TARGET_MIN_ANCHOR_OFFSET -32768
1334 #undef TARGET_MAX_ANCHOR_OFFSET
1335 #define TARGET_MAX_ANCHOR_OFFSET 32767
1336 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1337 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
1338 #undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
1339 #define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
1341 #undef TARGET_COMP_TYPE_ATTRIBUTES
1342 #define TARGET_COMP_TYPE_ATTRIBUTES mips_comp_type_attributes
1344 #ifdef HAVE_AS_DTPRELWORD
1345 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1346 #define TARGET_ASM_OUTPUT_DWARF_DTPREL mips_output_dwarf_dtprel
1349 struct gcc_target targetm = TARGET_INITIALIZER;
1352 /* Predicates to test for presence of "near" and "far"/"long_call"
1353 attributes on the given TYPE. */
1356 mips_near_type_p (tree type)
1358 return lookup_attribute ("near", TYPE_ATTRIBUTES (type)) != NULL;
1362 mips_far_type_p (tree type)
1364 return (lookup_attribute ("long_call", TYPE_ATTRIBUTES (type)) != NULL
1365 || lookup_attribute ("far", TYPE_ATTRIBUTES (type)) != NULL);
1369 /* Return 0 if the attributes for two types are incompatible, 1 if they
1370 are compatible, and 2 if they are nearly compatible (which causes a
1371 warning to be generated). */
1374 mips_comp_type_attributes (tree type1, tree type2)
1376 /* Check for mismatch of non-default calling convention. */
1377 if (TREE_CODE (type1) != FUNCTION_TYPE)
1380 /* Disallow mixed near/far attributes. */
1381 if (mips_far_type_p (type1) && mips_near_type_p (type2))
1383 if (mips_near_type_p (type1) && mips_far_type_p (type2))
1389 /* If X is a PLUS of a CONST_INT, return the two terms in *BASE_PTR
1390 and *OFFSET_PTR. Return X in *BASE_PTR and 0 in *OFFSET_PTR otherwise. */
1393 mips_split_plus (rtx x, rtx *base_ptr, HOST_WIDE_INT *offset_ptr)
1395 if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1397 *base_ptr = XEXP (x, 0);
1398 *offset_ptr = INTVAL (XEXP (x, 1));
1407 /* Return true if SYMBOL_REF X is associated with a global symbol
1408 (in the STB_GLOBAL sense). */
1411 mips_global_symbol_p (rtx x)
1415 decl = SYMBOL_REF_DECL (x);
1417 return !SYMBOL_REF_LOCAL_P (x);
1419 /* Weakref symbols are not TREE_PUBLIC, but their targets are global
1420 or weak symbols. Relocations in the object file will be against
1421 the target symbol, so it's that symbol's binding that matters here. */
1422 return DECL_P (decl) && (TREE_PUBLIC (decl) || DECL_WEAK (decl));
1425 /* Return true if SYMBOL_REF X binds locally. */
1428 mips_symbol_binds_local_p (rtx x)
1430 return (SYMBOL_REF_DECL (x)
1431 ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
1432 : SYMBOL_REF_LOCAL_P (x));
1435 /* Classify symbol X, which must be a SYMBOL_REF or a LABEL_REF. */
1437 static enum mips_symbol_type
1438 mips_classify_symbol (rtx x)
1441 return SYMBOL_GOT_DISP;
1443 if (GET_CODE (x) == LABEL_REF)
1446 return SYMBOL_CONSTANT_POOL;
1447 if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1448 return SYMBOL_GOT_PAGE_OFST;
1449 return SYMBOL_GENERAL;
1452 gcc_assert (GET_CODE (x) == SYMBOL_REF);
1454 if (SYMBOL_REF_TLS_MODEL (x))
1457 if (CONSTANT_POOL_ADDRESS_P (x))
1460 return SYMBOL_CONSTANT_POOL;
1462 if (!TARGET_EMBEDDED_DATA
1463 && GET_MODE_SIZE (get_pool_mode (x)) <= mips_section_threshold)
1464 return SYMBOL_SMALL_DATA;
1467 /* Do not use small-data accesses for weak symbols; they may end up
1469 if (SYMBOL_REF_SMALL_P (x)
1470 && !SYMBOL_REF_WEAK (x))
1471 return SYMBOL_SMALL_DATA;
1473 if (TARGET_ABICALLS)
1475 /* Don't use GOT accesses for locally-binding symbols; we can use
1476 %hi and %lo instead. */
1477 if (TARGET_ABSOLUTE_ABICALLS && mips_symbol_binds_local_p (x))
1478 return SYMBOL_GENERAL;
1480 /* There are three cases to consider:
1482 - o32 PIC (either with or without explicit relocs)
1483 - n32/n64 PIC without explicit relocs
1484 - n32/n64 PIC with explicit relocs
1486 In the first case, both local and global accesses will use an
1487 R_MIPS_GOT16 relocation. We must correctly predict which of
1488 the two semantics (local or global) the assembler and linker
1489 will apply. The choice depends on the symbol's binding rather
1490 than its visibility.
1492 In the second case, the assembler will not use R_MIPS_GOT16
1493 relocations, but it chooses between local and global accesses
1494 in the same way as for o32 PIC.
1496 In the third case we have more freedom since both forms of
1497 access will work for any kind of symbol. However, there seems
1498 little point in doing things differently. */
1499 if (mips_global_symbol_p (x))
1500 return SYMBOL_GOT_DISP;
1502 return SYMBOL_GOT_PAGE_OFST;
1505 return SYMBOL_GENERAL;
1508 /* Return true if OFFSET is within the range [0, ALIGN), where ALIGN
1509 is the alignment (in bytes) of SYMBOL_REF X. */
1512 mips_offset_within_alignment_p (rtx x, HOST_WIDE_INT offset)
1514 /* If for some reason we can't get the alignment for the
1515 symbol, initializing this to one means we will only accept
1517 HOST_WIDE_INT align = 1;
1520 /* Get the alignment of the symbol we're referring to. */
1521 t = SYMBOL_REF_DECL (x);
1523 align = DECL_ALIGN_UNIT (t);
1525 return offset >= 0 && offset < align;
1528 /* Return true if X is a symbolic constant that can be calculated in
1529 the same way as a bare symbol. If it is, store the type of the
1530 symbol in *SYMBOL_TYPE. */
1533 mips_symbolic_constant_p (rtx x, enum mips_symbol_type *symbol_type)
1537 split_const (x, &x, &offset);
1538 if (UNSPEC_ADDRESS_P (x))
1540 *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1541 x = UNSPEC_ADDRESS (x);
1543 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1545 *symbol_type = mips_classify_symbol (x);
1546 if (*symbol_type == SYMBOL_TLS)
1552 if (offset == const0_rtx)
1555 /* Check whether a nonzero offset is valid for the underlying
1557 switch (*symbol_type)
1559 case SYMBOL_GENERAL:
1560 case SYMBOL_64_HIGH:
1563 /* If the target has 64-bit pointers and the object file only
1564 supports 32-bit symbols, the values of those symbols will be
1565 sign-extended. In this case we can't allow an arbitrary offset
1566 in case the 32-bit value X + OFFSET has a different sign from X. */
1567 if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1568 return offset_within_block_p (x, INTVAL (offset));
1570 /* In other cases the relocations can handle any offset. */
1573 case SYMBOL_CONSTANT_POOL:
1574 /* Allow constant pool references to be converted to LABEL+CONSTANT.
1575 In this case, we no longer have access to the underlying constant,
1576 but the original symbol-based access was known to be valid. */
1577 if (GET_CODE (x) == LABEL_REF)
1582 case SYMBOL_SMALL_DATA:
1583 /* Make sure that the offset refers to something within the
1584 same object block. This should guarantee that the final
1585 PC- or GP-relative offset is within the 16-bit limit. */
1586 return offset_within_block_p (x, INTVAL (offset));
1588 case SYMBOL_GOT_PAGE_OFST:
1589 case SYMBOL_GOTOFF_PAGE:
1590 /* If the symbol is global, the GOT entry will contain the symbol's
1591 address, and we will apply a 16-bit offset after loading it.
1592 If the symbol is local, the linker should provide enough local
1593 GOT entries for a 16-bit offset, but larger offsets may lead
1595 return SMALL_INT (offset);
1599 /* There is no carry between the HI and LO REL relocations, so the
1600 offset is only valid if we know it won't lead to such a carry. */
1601 return mips_offset_within_alignment_p (x, INTVAL (offset));
1603 case SYMBOL_GOT_DISP:
1604 case SYMBOL_GOTOFF_DISP:
1605 case SYMBOL_GOTOFF_CALL:
1606 case SYMBOL_GOTOFF_LOADGP:
1609 case SYMBOL_GOTTPREL:
1618 /* This function is used to implement REG_MODE_OK_FOR_BASE_P. */
1621 mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1623 if (!HARD_REGISTER_NUM_P (regno))
1627 regno = reg_renumber[regno];
1630 /* These fake registers will be eliminated to either the stack or
1631 hard frame pointer, both of which are usually valid base registers.
1632 Reload deals with the cases where the eliminated form isn't valid. */
1633 if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1636 /* In mips16 mode, the stack pointer can only address word and doubleword
1637 values, nothing smaller. There are two problems here:
1639 (a) Instantiating virtual registers can introduce new uses of the
1640 stack pointer. If these virtual registers are valid addresses,
1641 the stack pointer should be too.
1643 (b) Most uses of the stack pointer are not made explicit until
1644 FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1645 We don't know until that stage whether we'll be eliminating to the
1646 stack pointer (which needs the restriction) or the hard frame
1647 pointer (which doesn't).
1649 All in all, it seems more consistent to only enforce this restriction
1650 during and after reload. */
1651 if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1652 return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1654 return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1658 /* Return true if X is a valid base register for the given mode.
1659 Allow only hard registers if STRICT. */
1662 mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1664 if (!strict && GET_CODE (x) == SUBREG)
1668 && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1672 /* Return true if symbols of type SYMBOL_TYPE can directly address a value
1673 with mode MODE. This is used for both symbolic and LO_SUM addresses. */
1676 mips_symbolic_address_p (enum mips_symbol_type symbol_type,
1677 enum machine_mode mode)
1679 switch (symbol_type)
1681 case SYMBOL_GENERAL:
1682 return !TARGET_MIPS16;
1684 case SYMBOL_SMALL_DATA:
1687 case SYMBOL_CONSTANT_POOL:
1688 /* PC-relative addressing is only available for lw and ld. */
1689 return GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1691 case SYMBOL_GOT_PAGE_OFST:
1694 case SYMBOL_GOT_DISP:
1695 /* The address will have to be loaded from the GOT first. */
1698 case SYMBOL_GOTOFF_PAGE:
1699 case SYMBOL_GOTOFF_DISP:
1700 case SYMBOL_GOTOFF_CALL:
1701 case SYMBOL_GOTOFF_LOADGP:
1706 case SYMBOL_GOTTPREL:
1708 case SYMBOL_64_HIGH:
1718 /* Return true if X is a valid address for machine mode MODE. If it is,
1719 fill in INFO appropriately. STRICT is true if we should only accept
1720 hard base registers. */
1723 mips_classify_address (struct mips_address_info *info, rtx x,
1724 enum machine_mode mode, int strict)
1726 switch (GET_CODE (x))
1730 info->type = ADDRESS_REG;
1732 info->offset = const0_rtx;
1733 return mips_valid_base_register_p (info->reg, mode, strict);
1736 info->type = ADDRESS_REG;
1737 info->reg = XEXP (x, 0);
1738 info->offset = XEXP (x, 1);
1739 return (mips_valid_base_register_p (info->reg, mode, strict)
1740 && const_arith_operand (info->offset, VOIDmode));
1743 info->type = ADDRESS_LO_SUM;
1744 info->reg = XEXP (x, 0);
1745 info->offset = XEXP (x, 1);
1746 return (mips_valid_base_register_p (info->reg, mode, strict)
1747 && mips_symbolic_constant_p (info->offset, &info->symbol_type)
1748 && mips_symbolic_address_p (info->symbol_type, mode)
1749 && mips_lo_relocs[info->symbol_type] != 0);
1752 /* Small-integer addresses don't occur very often, but they
1753 are legitimate if $0 is a valid base register. */
1754 info->type = ADDRESS_CONST_INT;
1755 return !TARGET_MIPS16 && SMALL_INT (x);
1760 info->type = ADDRESS_SYMBOLIC;
1761 return (mips_symbolic_constant_p (x, &info->symbol_type)
1762 && mips_symbolic_address_p (info->symbol_type, mode)
1763 && !mips_split_p[info->symbol_type]);
1770 /* Return true if X is a thread-local symbol. */
1773 mips_tls_operand_p (rtx x)
1775 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1778 /* Return true if X can not be forced into a constant pool. */
1781 mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1783 return mips_tls_operand_p (*x);
1786 /* Return true if X can not be forced into a constant pool. */
1789 mips_cannot_force_const_mem (rtx x)
1795 /* As an optimization, reject constants that mips_legitimize_move
1798 Suppose we have a multi-instruction sequence that loads constant C
1799 into register R. If R does not get allocated a hard register, and
1800 R is used in an operand that allows both registers and memory
1801 references, reload will consider forcing C into memory and using
1802 one of the instruction's memory alternatives. Returning false
1803 here will force it to use an input reload instead. */
1804 if (GET_CODE (x) == CONST_INT)
1807 split_const (x, &base, &offset);
1808 if (symbolic_operand (base, VOIDmode) && SMALL_INT (offset))
1812 if (TARGET_HAVE_TLS && for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
1818 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. MIPS16 uses per-function
1819 constant pools, but normal-mode code doesn't need to. */
1822 mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1823 rtx x ATTRIBUTE_UNUSED)
1825 return !TARGET_MIPS16;
1828 /* Return the number of instructions needed to load a symbol of the
1829 given type into a register. If valid in an address, the same number
1830 of instructions are needed for loads and stores. Treat extended
1831 mips16 instructions as two instructions. */
1834 mips_symbol_insns (enum mips_symbol_type type)
1838 case SYMBOL_GENERAL:
1839 /* In mips16 code, general symbols must be fetched from the
1844 /* When using 64-bit symbols, we need 5 preparatory instructions,
1847 lui $at,%highest(symbol)
1848 daddiu $at,$at,%higher(symbol)
1850 daddiu $at,$at,%hi(symbol)
1853 The final address is then $at + %lo(symbol). With 32-bit
1854 symbols we just need a preparatory lui. */
1855 return (ABI_HAS_64BIT_SYMBOLS ? 6 : 2);
1857 case SYMBOL_SMALL_DATA:
1861 case SYMBOL_CONSTANT_POOL:
1862 /* This case is for mips16 only. Assume we'll need an
1863 extended instruction. */
1866 case SYMBOL_GOT_PAGE_OFST:
1867 case SYMBOL_GOT_DISP:
1868 /* Unless -funit-at-a-time is in effect, we can't be sure whether
1869 the local/global classification is accurate. See override_options
1872 The worst cases are:
1874 (1) For local symbols when generating o32 or o64 code. The assembler
1880 ...and the final address will be $at + %lo(symbol).
1882 (2) For global symbols when -mxgot. The assembler will use:
1884 lui $at,%got_hi(symbol)
1887 ...and the final address will be $at + %got_lo(symbol). */
1890 case SYMBOL_GOTOFF_PAGE:
1891 case SYMBOL_GOTOFF_DISP:
1892 case SYMBOL_GOTOFF_CALL:
1893 case SYMBOL_GOTOFF_LOADGP:
1894 case SYMBOL_64_HIGH:
1900 case SYMBOL_GOTTPREL:
1902 /* Check whether the offset is a 16- or 32-bit value. */
1903 return mips_split_p[type] ? 2 : 1;
1906 /* We don't treat a bare TLS symbol as a constant. */
1912 /* Return true if X is a legitimate $sp-based address for mode MDOE. */
1915 mips_stack_address_p (rtx x, enum machine_mode mode)
1917 struct mips_address_info addr;
1919 return (mips_classify_address (&addr, x, mode, false)
1920 && addr.type == ADDRESS_REG
1921 && addr.reg == stack_pointer_rtx);
1924 /* Return true if a value at OFFSET bytes from BASE can be accessed
1925 using an unextended mips16 instruction. MODE is the mode of the
1928 Usually the offset in an unextended instruction is a 5-bit field.
1929 The offset is unsigned and shifted left once for HIs, twice
1930 for SIs, and so on. An exception is SImode accesses off the
1931 stack pointer, which have an 8-bit immediate field. */
1934 mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
1937 && GET_CODE (offset) == CONST_INT
1938 && INTVAL (offset) >= 0
1939 && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
1941 if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1942 return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
1943 return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
1949 /* Return the number of instructions needed to load or store a value
1950 of mode MODE at X. Return 0 if X isn't valid for MODE.
1952 For mips16 code, count extended instructions as two instructions. */
1955 mips_address_insns (rtx x, enum machine_mode mode)
1957 struct mips_address_info addr;
1960 if (mode == BLKmode)
1961 /* BLKmode is used for single unaligned loads and stores. */
1964 /* Each word of a multi-word value will be accessed individually. */
1965 factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1967 if (mips_classify_address (&addr, x, mode, false))
1972 && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
1976 case ADDRESS_LO_SUM:
1977 return (TARGET_MIPS16 ? factor * 2 : factor);
1979 case ADDRESS_CONST_INT:
1982 case ADDRESS_SYMBOLIC:
1983 return factor * mips_symbol_insns (addr.symbol_type);
1989 /* Likewise for constant X. */
1992 mips_const_insns (rtx x)
1994 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
1995 enum mips_symbol_type symbol_type;
1998 switch (GET_CODE (x))
2002 || !mips_symbolic_constant_p (XEXP (x, 0), &symbol_type)
2003 || !mips_split_p[symbol_type])
2010 /* Unsigned 8-bit constants can be loaded using an unextended
2011 LI instruction. Unsigned 16-bit constants can be loaded
2012 using an extended LI. Negative constants must be loaded
2013 using LI and then negated. */
2014 return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
2015 : SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
2016 : INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
2017 : SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
2020 return mips_build_integer (codes, INTVAL (x));
2024 return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
2030 /* See if we can refer to X directly. */
2031 if (mips_symbolic_constant_p (x, &symbol_type))
2032 return mips_symbol_insns (symbol_type);
2034 /* Otherwise try splitting the constant into a base and offset.
2035 16-bit offsets can be added using an extra addiu. Larger offsets
2036 must be calculated separately and then added to the base. */
2037 split_const (x, &x, &offset);
2040 int n = mips_const_insns (x);
2043 if (SMALL_INT (offset))
2046 return n + 1 + mips_build_integer (codes, INTVAL (offset));
2053 return mips_symbol_insns (mips_classify_symbol (x));
2061 /* Return the number of instructions needed for memory reference X.
2062 Count extended mips16 instructions as two instructions. */
2065 mips_fetch_insns (rtx x)
2067 gcc_assert (MEM_P (x));
2068 return mips_address_insns (XEXP (x, 0), GET_MODE (x));
2072 /* Return the number of instructions needed for an integer division. */
2075 mips_idiv_insns (void)
2080 if (TARGET_CHECK_ZERO_DIV)
2082 if (GENERATE_DIVIDE_TRAPS)
2088 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
2093 /* This function is used to implement GO_IF_LEGITIMATE_ADDRESS. It
2094 returns a nonzero value if X is a legitimate address for a memory
2095 operand of the indicated MODE. STRICT is nonzero if this function
2096 is called during reload. */
2099 mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
2101 struct mips_address_info addr;
2103 return mips_classify_address (&addr, x, mode, strict);
2107 /* Copy VALUE to a register and return that register. If new psuedos
2108 are allowed, copy it into a new register, otherwise use DEST. */
2111 mips_force_temporary (rtx dest, rtx value)
2113 if (can_create_pseudo_p ())
2114 return force_reg (Pmode, value);
2117 emit_move_insn (copy_rtx (dest), value);
2123 /* Return a LO_SUM expression for ADDR. TEMP is as for mips_force_temporary
2124 and is used to load the high part into a register. */
2127 mips_split_symbol (rtx temp, rtx addr)
2132 high = mips_force_temporary (temp, gen_rtx_HIGH (Pmode, copy_rtx (addr)));
2133 else if (!can_create_pseudo_p ())
2135 emit_insn (gen_load_const_gp (copy_rtx (temp)));
2139 high = mips16_gp_pseudo_reg ();
2140 return gen_rtx_LO_SUM (Pmode, high, addr);
2144 /* Return an UNSPEC address with underlying address ADDRESS and symbol
2145 type SYMBOL_TYPE. */
2148 mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
2152 split_const (address, &base, &offset);
2153 base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
2154 UNSPEC_ADDRESS_FIRST + symbol_type);
2155 if (offset != const0_rtx)
2156 base = gen_rtx_PLUS (Pmode, base, offset);
2157 return gen_rtx_CONST (Pmode, base);
2161 /* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
2162 high part to BASE and return the result. Just return BASE otherwise.
2163 TEMP is available as a temporary register if needed.
2165 The returned expression can be used as the first operand to a LO_SUM. */
2168 mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
2169 enum mips_symbol_type symbol_type)
2171 if (mips_split_p[symbol_type])
2173 addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
2174 addr = mips_force_temporary (temp, addr);
2175 return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
2181 /* Return a legitimate address for REG + OFFSET. TEMP is as for
2182 mips_force_temporary; it is only needed when OFFSET is not a
2186 mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
2188 if (!SMALL_OPERAND (offset))
2193 /* Load the full offset into a register so that we can use
2194 an unextended instruction for the address itself. */
2195 high = GEN_INT (offset);
2200 /* Leave OFFSET as a 16-bit offset and put the excess in HIGH. */
2201 high = GEN_INT (CONST_HIGH_PART (offset));
2202 offset = CONST_LOW_PART (offset);
2204 high = mips_force_temporary (temp, high);
2205 reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2207 return plus_constant (reg, offset);
2210 /* Emit a call to __tls_get_addr. SYM is the TLS symbol we are
2211 referencing, and TYPE is the symbol type to use (either global
2212 dynamic or local dynamic). V0 is an RTX for the return value
2213 location. The entire insn sequence is returned. */
2215 static GTY(()) rtx mips_tls_symbol;
2218 mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2220 rtx insn, loc, tga, a0;
2222 a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2224 if (!mips_tls_symbol)
2225 mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2227 loc = mips_unspec_address (sym, type);
2231 emit_insn (gen_rtx_SET (Pmode, a0,
2232 gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2233 tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
2234 insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
2235 CONST_OR_PURE_CALL_P (insn) = 1;
2236 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
2237 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2238 insn = get_insns ();
2245 /* Generate the code to access LOC, a thread local SYMBOL_REF. The
2246 return value will be a valid address and move_operand (either a REG
2250 mips_legitimize_tls_address (rtx loc)
2252 rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2253 enum tls_model model;
2255 v0 = gen_rtx_REG (Pmode, GP_RETURN);
2256 v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2258 model = SYMBOL_REF_TLS_MODEL (loc);
2259 /* Only TARGET_ABICALLS code can have more than one module; other
2260 code must be be static and should not use a GOT. All TLS models
2261 reduce to local exec in this situation. */
2262 if (!TARGET_ABICALLS)
2263 model = TLS_MODEL_LOCAL_EXEC;
2267 case TLS_MODEL_GLOBAL_DYNAMIC:
2268 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2269 dest = gen_reg_rtx (Pmode);
2270 emit_libcall_block (insn, dest, v0, loc);
2273 case TLS_MODEL_LOCAL_DYNAMIC:
2274 insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2275 tmp1 = gen_reg_rtx (Pmode);
2277 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2278 share the LDM result with other LD model accesses. */
2279 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2281 emit_libcall_block (insn, tmp1, v0, eqv);
2283 tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2284 dest = gen_rtx_LO_SUM (Pmode, tmp2,
2285 mips_unspec_address (loc, SYMBOL_DTPREL));
2288 case TLS_MODEL_INITIAL_EXEC:
2289 tmp1 = gen_reg_rtx (Pmode);
2290 tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2291 if (Pmode == DImode)
2293 emit_insn (gen_tls_get_tp_di (v1));
2294 emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2298 emit_insn (gen_tls_get_tp_si (v1));
2299 emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2301 dest = gen_reg_rtx (Pmode);
2302 emit_insn (gen_add3_insn (dest, tmp1, v1));
2305 case TLS_MODEL_LOCAL_EXEC:
2306 if (Pmode == DImode)
2307 emit_insn (gen_tls_get_tp_di (v1));
2309 emit_insn (gen_tls_get_tp_si (v1));
2311 tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2312 dest = gen_rtx_LO_SUM (Pmode, tmp1,
2313 mips_unspec_address (loc, SYMBOL_TPREL));
2323 /* This function is used to implement LEGITIMIZE_ADDRESS. If *XLOC can
2324 be legitimized in a way that the generic machinery might not expect,
2325 put the new address in *XLOC and return true. MODE is the mode of
2326 the memory being accessed. */
2329 mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2331 enum mips_symbol_type symbol_type;
2333 if (mips_tls_operand_p (*xloc))
2335 *xloc = mips_legitimize_tls_address (*xloc);
2339 /* See if the address can split into a high part and a LO_SUM. */
2340 if (mips_symbolic_constant_p (*xloc, &symbol_type)
2341 && mips_symbolic_address_p (symbol_type, mode)
2342 && mips_split_p[symbol_type])
2344 *xloc = mips_split_symbol (0, *xloc);
2348 if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
2350 /* Handle REG + CONSTANT using mips_add_offset. */
2353 reg = XEXP (*xloc, 0);
2354 if (!mips_valid_base_register_p (reg, mode, 0))
2355 reg = copy_to_mode_reg (Pmode, reg);
2356 *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
2364 /* Subroutine of mips_build_integer (with the same interface).
2365 Assume that the final action in the sequence should be a left shift. */
2368 mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
2370 unsigned int i, shift;
2372 /* Shift VALUE right until its lowest bit is set. Shift arithmetically
2373 since signed numbers are easier to load than unsigned ones. */
2375 while ((value & 1) == 0)
2376 value /= 2, shift++;
2378 i = mips_build_integer (codes, value);
2379 codes[i].code = ASHIFT;
2380 codes[i].value = shift;
2385 /* As for mips_build_shift, but assume that the final action will be
2386 an IOR or PLUS operation. */
2389 mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
2391 unsigned HOST_WIDE_INT high;
2394 high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
2395 if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
2397 /* The constant is too complex to load with a simple lui/ori pair
2398 so our goal is to clear as many trailing zeros as possible.
2399 In this case, we know bit 16 is set and that the low 16 bits
2400 form a negative number. If we subtract that number from VALUE,
2401 we will clear at least the lowest 17 bits, maybe more. */
2402 i = mips_build_integer (codes, CONST_HIGH_PART (value));
2403 codes[i].code = PLUS;
2404 codes[i].value = CONST_LOW_PART (value);
2408 i = mips_build_integer (codes, high);
2409 codes[i].code = IOR;
2410 codes[i].value = value & 0xffff;
2416 /* Fill CODES with a sequence of rtl operations to load VALUE.
2417 Return the number of operations needed. */
2420 mips_build_integer (struct mips_integer_op *codes,
2421 unsigned HOST_WIDE_INT value)
2423 if (SMALL_OPERAND (value)
2424 || SMALL_OPERAND_UNSIGNED (value)
2425 || LUI_OPERAND (value))
2427 /* The value can be loaded with a single instruction. */
2428 codes[0].code = UNKNOWN;
2429 codes[0].value = value;
2432 else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
2434 /* Either the constant is a simple LUI/ORI combination or its
2435 lowest bit is set. We don't want to shift in this case. */
2436 return mips_build_lower (codes, value);
2438 else if ((value & 0xffff) == 0)
2440 /* The constant will need at least three actions. The lowest
2441 16 bits are clear, so the final action will be a shift. */
2442 return mips_build_shift (codes, value);
2446 /* The final action could be a shift, add or inclusive OR.
2447 Rather than use a complex condition to select the best
2448 approach, try both mips_build_shift and mips_build_lower
2449 and pick the one that gives the shortest sequence.
2450 Note that this case is only used once per constant. */
2451 struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
2452 unsigned int cost, alt_cost;
2454 cost = mips_build_shift (codes, value);
2455 alt_cost = mips_build_lower (alt_codes, value);
2456 if (alt_cost < cost)
2458 memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
2466 /* Load VALUE into DEST, using TEMP as a temporary register if need be. */
2469 mips_move_integer (rtx dest, rtx temp, unsigned HOST_WIDE_INT value)
2471 struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2472 enum machine_mode mode;
2473 unsigned int i, cost;
2476 mode = GET_MODE (dest);
2477 cost = mips_build_integer (codes, value);
2479 /* Apply each binary operation to X. Invariant: X is a legitimate
2480 source operand for a SET pattern. */
2481 x = GEN_INT (codes[0].value);
2482 for (i = 1; i < cost; i++)
2484 if (!can_create_pseudo_p ())
2486 emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2490 x = force_reg (mode, x);
2491 x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2494 emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2498 /* Subroutine of mips_legitimize_move. Move constant SRC into register
2499 DEST given that SRC satisfies immediate_operand but doesn't satisfy
2503 mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2507 /* Split moves of big integers into smaller pieces. */
2508 if (splittable_const_int_operand (src, mode))
2510 mips_move_integer (dest, dest, INTVAL (src));
2514 /* Split moves of symbolic constants into high/low pairs. */
2515 if (splittable_symbolic_operand (src, mode))
2517 emit_insn (gen_rtx_SET (VOIDmode, dest, mips_split_symbol (dest, src)));
2521 if (mips_tls_operand_p (src))
2523 emit_move_insn (dest, mips_legitimize_tls_address (src));
2527 /* If we have (const (plus symbol offset)), load the symbol first
2528 and then add in the offset. This is usually better than forcing
2529 the constant into memory, at least in non-mips16 code. */
2530 split_const (src, &base, &offset);
2532 && offset != const0_rtx
2533 && (can_create_pseudo_p () || SMALL_INT (offset)))
2535 base = mips_force_temporary (dest, base);
2536 emit_move_insn (dest, mips_add_offset (0, base, INTVAL (offset)));
2540 src = force_const_mem (mode, src);
2542 /* When using explicit relocs, constant pool references are sometimes
2543 not legitimate addresses. */
2544 if (!memory_operand (src, VOIDmode))
2545 src = replace_equiv_address (src, mips_split_symbol (dest, XEXP (src, 0)));
2546 emit_move_insn (dest, src);
2550 /* If (set DEST SRC) is not a valid instruction, emit an equivalent
2551 sequence that is valid. */
2554 mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2556 if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2558 emit_move_insn (dest, force_reg (mode, src));
2562 /* Check for individual, fully-reloaded mflo and mfhi instructions. */
2563 if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2564 && REG_P (src) && MD_REG_P (REGNO (src))
2565 && REG_P (dest) && GP_REG_P (REGNO (dest)))
2567 int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2568 if (GET_MODE_SIZE (mode) <= 4)
2569 emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
2570 gen_rtx_REG (SImode, REGNO (src)),
2571 gen_rtx_REG (SImode, other_regno)));
2573 emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
2574 gen_rtx_REG (DImode, REGNO (src)),
2575 gen_rtx_REG (DImode, other_regno)));
2579 /* We need to deal with constants that would be legitimate
2580 immediate_operands but not legitimate move_operands. */
2581 if (CONSTANT_P (src) && !move_operand (src, mode))
2583 mips_legitimize_const_move (mode, dest, src);
2584 set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2590 /* We need a lot of little routines to check constant values on the
2591 mips16. These are used to figure out how long the instruction will
2592 be. It would be much better to do this using constraints, but
2593 there aren't nearly enough letters available. */
2596 m16_check_op (rtx op, int low, int high, int mask)
2598 return (GET_CODE (op) == CONST_INT
2599 && INTVAL (op) >= low
2600 && INTVAL (op) <= high
2601 && (INTVAL (op) & mask) == 0);
2605 m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2607 return m16_check_op (op, 0x1, 0x8, 0);
2611 m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2613 return m16_check_op (op, - 0x8, 0x7, 0);
2617 m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2619 return m16_check_op (op, - 0x7, 0x8, 0);
2623 m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2625 return m16_check_op (op, - 0x10, 0xf, 0);
2629 m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2631 return m16_check_op (op, - 0xf, 0x10, 0);
2635 m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2637 return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2641 m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2643 return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2647 m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2649 return m16_check_op (op, - 0x80, 0x7f, 0);
2653 m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2655 return m16_check_op (op, - 0x7f, 0x80, 0);
2659 m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2661 return m16_check_op (op, 0x0, 0xff, 0);
2665 m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2667 return m16_check_op (op, - 0xff, 0x0, 0);
2671 m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2673 return m16_check_op (op, - 0x1, 0xfe, 0);
2677 m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2679 return m16_check_op (op, 0x0, 0xff << 2, 3);
2683 m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2685 return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2689 m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2691 return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2695 m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2697 return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2700 /* Return true if ADDR matches the pattern for the lwxs load scaled indexed
2701 address instruction. */
2704 mips_lwxs_address_p (rtx addr)
2707 && GET_CODE (addr) == PLUS
2708 && REG_P (XEXP (addr, 1)))
2710 rtx offset = XEXP (addr, 0);
2711 if (GET_CODE (offset) == MULT
2712 && REG_P (XEXP (offset, 0))
2713 && GET_CODE (XEXP (offset, 1)) == CONST_INT
2714 && INTVAL (XEXP (offset, 1)) == 4)
2721 mips_rtx_costs (rtx x, int code, int outer_code, int *total)
2723 enum machine_mode mode = GET_MODE (x);
2724 bool float_mode_p = FLOAT_MODE_P (mode);
2731 /* A number between 1 and 8 inclusive is efficient for a shift.
2732 Otherwise, we will need an extended instruction. */
2733 if ((outer_code) == ASHIFT || (outer_code) == ASHIFTRT
2734 || (outer_code) == LSHIFTRT)
2736 if (INTVAL (x) >= 1 && INTVAL (x) <= 8)
2739 *total = COSTS_N_INSNS (1);
2743 /* We can use cmpi for an xor with an unsigned 16-bit value. */
2744 if ((outer_code) == XOR
2745 && INTVAL (x) >= 0 && INTVAL (x) < 0x10000)
2751 /* We may be able to use slt or sltu for a comparison with a
2752 signed 16-bit value. (The boundary conditions aren't quite
2753 right, but this is just a heuristic anyhow.) */
2754 if (((outer_code) == LT || (outer_code) == LE
2755 || (outer_code) == GE || (outer_code) == GT
2756 || (outer_code) == LTU || (outer_code) == LEU
2757 || (outer_code) == GEU || (outer_code) == GTU)
2758 && INTVAL (x) >= -0x8000 && INTVAL (x) < 0x8000)
2764 /* Equality comparisons with 0 are cheap. */
2765 if (((outer_code) == EQ || (outer_code) == NE)
2772 /* Constants in the range 0...255 can be loaded with an unextended
2773 instruction. They are therefore as cheap as a register move.
2775 Given the choice between "li R1,0...255" and "move R1,R2"
2776 (where R2 is a known constant), it is usually better to use "li",
2777 since we do not want to unnecessarily extend the lifetime
2779 if (outer_code == SET
2781 && INTVAL (x) < 256)
2789 /* These can be used anywhere. */
2794 /* Otherwise fall through to the handling below because
2795 we'll need to construct the constant. */
2801 if (LEGITIMATE_CONSTANT_P (x))
2803 *total = COSTS_N_INSNS (1);
2808 /* The value will need to be fetched from the constant pool. */
2809 *total = CONSTANT_POOL_COST;
2815 /* If the address is legitimate, return the number of
2816 instructions it needs. */
2817 rtx addr = XEXP (x, 0);
2818 int n = mips_address_insns (addr, GET_MODE (x));
2821 *total = COSTS_N_INSNS (n + 1);
2824 /* Check for scaled indexed address. */
2825 if (mips_lwxs_address_p (addr))
2827 *total = COSTS_N_INSNS (2);
2830 /* Otherwise use the default handling. */
2835 *total = COSTS_N_INSNS (6);
2839 *total = COSTS_N_INSNS ((mode == DImode && !TARGET_64BIT) ? 2 : 1);
2845 if (mode == DImode && !TARGET_64BIT)
2847 *total = COSTS_N_INSNS (2);
2855 if (mode == DImode && !TARGET_64BIT)
2857 *total = COSTS_N_INSNS ((GET_CODE (XEXP (x, 1)) == CONST_INT)
2865 *total = COSTS_N_INSNS (1);
2867 *total = COSTS_N_INSNS (4);
2871 *total = COSTS_N_INSNS (1);
2878 *total = mips_cost->fp_add;
2882 else if (mode == DImode && !TARGET_64BIT)
2884 *total = COSTS_N_INSNS (4);
2890 if (mode == DImode && !TARGET_64BIT)
2892 *total = COSTS_N_INSNS (4);
2899 *total = mips_cost->fp_mult_sf;
2901 else if (mode == DFmode)
2902 *total = mips_cost->fp_mult_df;
2904 else if (mode == SImode)
2905 *total = mips_cost->int_mult_si;
2908 *total = mips_cost->int_mult_di;
2917 *total = mips_cost->fp_div_sf;
2919 *total = mips_cost->fp_div_df;
2928 *total = mips_cost->int_div_di;
2930 *total = mips_cost->int_div_si;
2935 /* A sign extend from SImode to DImode in 64-bit mode is often
2936 zero instructions, because the result can often be used
2937 directly by another instruction; we'll call it one. */
2938 if (TARGET_64BIT && mode == DImode
2939 && GET_MODE (XEXP (x, 0)) == SImode)
2940 *total = COSTS_N_INSNS (1);
2942 *total = COSTS_N_INSNS (2);
2946 if (TARGET_64BIT && mode == DImode
2947 && GET_MODE (XEXP (x, 0)) == SImode)
2948 *total = COSTS_N_INSNS (2);
2950 *total = COSTS_N_INSNS (1);
2954 case UNSIGNED_FLOAT:
2957 case FLOAT_TRUNCATE:
2959 *total = mips_cost->fp_add;
2967 /* Provide the costs of an addressing mode that contains ADDR.
2968 If ADDR is not a valid address, its cost is irrelevant. */
2971 mips_address_cost (rtx addr)
2973 return mips_address_insns (addr, SImode);
2976 /* Return one word of double-word value OP, taking into account the fixed
2977 endianness of certain registers. HIGH_P is true to select the high part,
2978 false to select the low part. */
2981 mips_subword (rtx op, int high_p)
2984 enum machine_mode mode;
2986 mode = GET_MODE (op);
2987 if (mode == VOIDmode)
2990 if (TARGET_BIG_ENDIAN ? !high_p : high_p)
2991 byte = UNITS_PER_WORD;
2995 if (FP_REG_RTX_P (op))
2996 return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
2999 return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
3001 return simplify_gen_subreg (word_mode, op, mode, byte);
3005 /* Return true if a 64-bit move from SRC to DEST should be split into two. */
3008 mips_split_64bit_move_p (rtx dest, rtx src)
3013 /* FP->FP moves can be done in a single instruction. */
3014 if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
3017 /* Check for floating-point loads and stores. They can be done using
3018 ldc1 and sdc1 on MIPS II and above. */
3021 if (FP_REG_RTX_P (dest) && MEM_P (src))
3023 if (FP_REG_RTX_P (src) && MEM_P (dest))
3030 /* Split a 64-bit move from SRC to DEST assuming that
3031 mips_split_64bit_move_p holds.
3033 Moves into and out of FPRs cause some difficulty here. Such moves
3034 will always be DFmode, since paired FPRs are not allowed to store
3035 DImode values. The most natural representation would be two separate
3036 32-bit moves, such as:
3038 (set (reg:SI $f0) (mem:SI ...))
3039 (set (reg:SI $f1) (mem:SI ...))
3041 However, the second insn is invalid because odd-numbered FPRs are
3042 not allowed to store independent values. Use the patterns load_df_low,
3043 load_df_high and store_df_high instead. */
3046 mips_split_64bit_move (rtx dest, rtx src)
3048 if (FP_REG_RTX_P (dest))
3050 /* Loading an FPR from memory or from GPRs. */
3053 dest = gen_lowpart (DFmode, dest);
3054 emit_insn (gen_load_df_low (dest, mips_subword (src, 0)));
3055 emit_insn (gen_mthc1 (dest, mips_subword (src, 1),
3060 emit_insn (gen_load_df_low (copy_rtx (dest),
3061 mips_subword (src, 0)));
3062 emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
3066 else if (FP_REG_RTX_P (src))
3068 /* Storing an FPR into memory or GPRs. */
3071 src = gen_lowpart (DFmode, src);
3072 emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
3073 emit_insn (gen_mfhc1 (mips_subword (dest, 1), src));
3077 emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
3078 emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
3083 /* The operation can be split into two normal moves. Decide in
3084 which order to do them. */
3087 low_dest = mips_subword (dest, 0);
3088 if (REG_P (low_dest)
3089 && reg_overlap_mentioned_p (low_dest, src))
3091 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
3092 emit_move_insn (low_dest, mips_subword (src, 0));
3096 emit_move_insn (low_dest, mips_subword (src, 0));
3097 emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
3102 /* Return the appropriate instructions to move SRC into DEST. Assume
3103 that SRC is operand 1 and DEST is operand 0. */
3106 mips_output_move (rtx dest, rtx src)
3108 enum rtx_code dest_code, src_code;
3111 dest_code = GET_CODE (dest);
3112 src_code = GET_CODE (src);
3113 dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
3115 if (dbl_p && mips_split_64bit_move_p (dest, src))
3118 if ((src_code == REG && GP_REG_P (REGNO (src)))
3119 || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
3121 if (dest_code == REG)
3123 if (GP_REG_P (REGNO (dest)))
3124 return "move\t%0,%z1";
3126 if (MD_REG_P (REGNO (dest)))
3129 if (DSP_ACC_REG_P (REGNO (dest)))
3131 static char retval[] = "mt__\t%z1,%q0";
3132 retval[2] = reg_names[REGNO (dest)][4];
3133 retval[3] = reg_names[REGNO (dest)][5];
3137 if (FP_REG_P (REGNO (dest)))
3138 return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
3140 if (ALL_COP_REG_P (REGNO (dest)))
3142 static char retval[] = "dmtc_\t%z1,%0";
3144 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3145 return (dbl_p ? retval : retval + 1);
3148 if (dest_code == MEM)
3149 return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
3151 if (dest_code == REG && GP_REG_P (REGNO (dest)))
3153 if (src_code == REG)
3155 if (DSP_ACC_REG_P (REGNO (src)))
3157 static char retval[] = "mf__\t%0,%q1";
3158 retval[2] = reg_names[REGNO (src)][4];
3159 retval[3] = reg_names[REGNO (src)][5];
3163 if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
3164 return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
3166 if (FP_REG_P (REGNO (src)))
3167 return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
3169 if (ALL_COP_REG_P (REGNO (src)))
3171 static char retval[] = "dmfc_\t%0,%1";
3173 retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3174 return (dbl_p ? retval : retval + 1);
3178 if (src_code == MEM)
3179 return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
3181 if (src_code == CONST_INT)
3183 /* Don't use the X format, because that will give out of
3184 range numbers for 64-bit hosts and 32-bit targets. */
3186 return "li\t%0,%1\t\t\t# %X1";
3188 if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
3191 if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
3195 if (src_code == HIGH)
3196 return "lui\t%0,%h1";
3198 if (CONST_GP_P (src))
3199 return "move\t%0,%1";
3201 if (symbolic_operand (src, VOIDmode))
3202 return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
3204 if (src_code == REG && FP_REG_P (REGNO (src)))
3206 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3208 if (GET_MODE (dest) == V2SFmode)
3209 return "mov.ps\t%0,%1";
3211 return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
3214 if (dest_code == MEM)
3215 return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
3217 if (dest_code == REG && FP_REG_P (REGNO (dest)))
3219 if (src_code == MEM)
3220 return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
3222 if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
3224 static char retval[] = "l_c_\t%0,%1";
3226 retval[1] = (dbl_p ? 'd' : 'w');
3227 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
3230 if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
3232 static char retval[] = "s_c_\t%1,%0";
3234 retval[1] = (dbl_p ? 'd' : 'w');
3235 retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3241 /* Restore $gp from its save slot. Valid only when using o32 or
3245 mips_restore_gp (void)
3249 gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
3251 address = mips_add_offset (pic_offset_table_rtx,
3252 frame_pointer_needed
3253 ? hard_frame_pointer_rtx
3254 : stack_pointer_rtx,
3255 current_function_outgoing_args_size);
3256 slot = gen_rtx_MEM (Pmode, address);
3258 emit_move_insn (pic_offset_table_rtx, slot);
3259 if (!TARGET_EXPLICIT_RELOCS)
3260 emit_insn (gen_blockage ());
3263 /* Emit an instruction of the form (set TARGET (CODE OP0 OP1)). */
3266 mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
3268 emit_insn (gen_rtx_SET (VOIDmode, target,
3269 gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
3272 /* Return true if CMP1 is a suitable second operand for relational
3273 operator CODE. See also the *sCC patterns in mips.md. */
3276 mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
3282 return reg_or_0_operand (cmp1, VOIDmode);
3286 return !TARGET_MIPS16 && cmp1 == const1_rtx;
3290 return arith_operand (cmp1, VOIDmode);
3293 return sle_operand (cmp1, VOIDmode);
3296 return sleu_operand (cmp1, VOIDmode);
3303 /* Canonicalize LE or LEU comparisons into LT comparisons when
3304 possible to avoid extra instructions or inverting the
3308 mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
3309 enum machine_mode mode)
3311 HOST_WIDE_INT original, plus_one;
3313 if (GET_CODE (*cmp1) != CONST_INT)
3316 original = INTVAL (*cmp1);
3317 plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
3322 if (original < plus_one)
3325 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3334 *cmp1 = force_reg (mode, GEN_INT (plus_one));
3347 /* Compare CMP0 and CMP1 using relational operator CODE and store the
3348 result in TARGET. CMP0 and TARGET are register_operands that have
3349 the same integer mode. If INVERT_PTR is nonnull, it's OK to set
3350 TARGET to the inverse of the result and flip *INVERT_PTR instead. */
3353 mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
3354 rtx target, rtx cmp0, rtx cmp1)
3356 /* First see if there is a MIPS instruction that can do this operation
3357 with CMP1 in its current form. If not, try to canonicalize the
3358 comparison to LT. If that fails, try doing the same for the
3359 inverse operation. If that also fails, force CMP1 into a register
3361 if (mips_relational_operand_ok_p (code, cmp1))
3362 mips_emit_binary (code, target, cmp0, cmp1);
3363 else if (mips_canonicalize_comparison (&code, &cmp1, GET_MODE (target)))
3364 mips_emit_binary (code, target, cmp0, cmp1);
3367 enum rtx_code inv_code = reverse_condition (code);
3368 if (!mips_relational_operand_ok_p (inv_code, cmp1))
3370 cmp1 = force_reg (GET_MODE (cmp0), cmp1);
3371 mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
3373 else if (invert_ptr == 0)
3375 rtx inv_target = gen_reg_rtx (GET_MODE (target));
3376 mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3377 mips_emit_binary (XOR, target, inv_target, const1_rtx);
3381 *invert_ptr = !*invert_ptr;
3382 mips_emit_binary (inv_code, target, cmp0, cmp1);
3387 /* Return a register that is zero iff CMP0 and CMP1 are equal.
3388 The register will have the same mode as CMP0. */
3391 mips_zero_if_equal (rtx cmp0, rtx cmp1)
3393 if (cmp1 == const0_rtx)
3396 if (uns_arith_operand (cmp1, VOIDmode))
3397 return expand_binop (GET_MODE (cmp0), xor_optab,
3398 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3400 return expand_binop (GET_MODE (cmp0), sub_optab,
3401 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3404 /* Convert *CODE into a code that can be used in a floating-point
3405 scc instruction (c.<cond>.<fmt>). Return true if the values of
3406 the condition code registers will be inverted, with 0 indicating
3407 that the condition holds. */
3410 mips_reverse_fp_cond_p (enum rtx_code *code)
3417 *code = reverse_condition_maybe_unordered (*code);
3425 /* Convert a comparison into something that can be used in a branch or
3426 conditional move. cmp_operands[0] and cmp_operands[1] are the values
3427 being compared and *CODE is the code used to compare them.
3429 Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3430 If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3431 otherwise any standard branch condition can be used. The standard branch
3434 - EQ/NE between two registers.
3435 - any comparison between a register and zero. */
3438 mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3440 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3442 if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3444 *op0 = cmp_operands[0];
3445 *op1 = cmp_operands[1];
3447 else if (*code == EQ || *code == NE)
3451 *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3456 *op0 = cmp_operands[0];
3457 *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3462 /* The comparison needs a separate scc instruction. Store the
3463 result of the scc in *OP0 and compare it against zero. */
3464 bool invert = false;
3465 *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3467 mips_emit_int_relational (*code, &invert, *op0,
3468 cmp_operands[0], cmp_operands[1]);
3469 *code = (invert ? EQ : NE);
3474 enum rtx_code cmp_code;
3476 /* Floating-point tests use a separate c.cond.fmt comparison to
3477 set a condition code register. The branch or conditional move
3478 will then compare that register against zero.
3480 Set CMP_CODE to the code of the comparison instruction and
3481 *CODE to the code that the branch or move should use. */
3483 *code = mips_reverse_fp_cond_p (&cmp_code) ? EQ : NE;
3485 ? gen_reg_rtx (CCmode)
3486 : gen_rtx_REG (CCmode, FPSW_REGNUM));
3488 mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
3492 /* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
3493 Store the result in TARGET and return true if successful.
3495 On 64-bit targets, TARGET may be wider than cmp_operands[0]. */
3498 mips_emit_scc (enum rtx_code code, rtx target)
3500 if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
3503 target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
3504 if (code == EQ || code == NE)
3506 rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3507 mips_emit_binary (code, target, zie, const0_rtx);
3510 mips_emit_int_relational (code, 0, target,
3511 cmp_operands[0], cmp_operands[1]);
3515 /* Emit the common code for doing conditional branches.
3516 operand[0] is the label to jump to.
3517 The comparison operands are saved away by cmp{si,di,sf,df}. */
3520 gen_conditional_branch (rtx *operands, enum rtx_code code)
3522 rtx op0, op1, condition;
3524 mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
3525 condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
3526 emit_jump_insn (gen_condjump (condition, operands[0]));
3531 (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
3532 (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS)) */
3535 mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
3536 enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
3541 reversed_p = mips_reverse_fp_cond_p (&cond);
3542 cmp_result = gen_reg_rtx (CCV2mode);
3543 emit_insn (gen_scc_ps (cmp_result,
3544 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
3546 emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
3549 emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
3553 /* Emit the common code for conditional moves. OPERANDS is the array
3554 of operands passed to the conditional move define_expand. */
3557 gen_conditional_move (rtx *operands)
3562 code = GET_CODE (operands[1]);
3563 mips_emit_compare (&code, &op0, &op1, true);
3564 emit_insn (gen_rtx_SET (VOIDmode, operands[0],
3565 gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3566 gen_rtx_fmt_ee (code,
3569 operands[2], operands[3])));
3572 /* Emit a conditional trap. OPERANDS is the array of operands passed to
3573 the conditional_trap expander. */
3576 mips_gen_conditional_trap (rtx *operands)
3579 enum rtx_code cmp_code = GET_CODE (operands[0]);
3580 enum machine_mode mode = GET_MODE (cmp_operands[0]);
3582 /* MIPS conditional trap machine instructions don't have GT or LE
3583 flavors, so we must invert the comparison and convert to LT and
3584 GE, respectively. */
3587 case GT: cmp_code = LT; break;
3588 case LE: cmp_code = GE; break;
3589 case GTU: cmp_code = LTU; break;
3590 case LEU: cmp_code = GEU; break;
3593 if (cmp_code == GET_CODE (operands[0]))
3595 op0 = cmp_operands[0];
3596 op1 = cmp_operands[1];
3600 op0 = cmp_operands[1];
3601 op1 = cmp_operands[0];
3603 op0 = force_reg (mode, op0);
3604 if (!arith_operand (op1, mode))
3605 op1 = force_reg (mode, op1);
3607 emit_insn (gen_rtx_TRAP_IF (VOIDmode,
3608 gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
3612 /* Return true if calls to X can use R_MIPS_CALL* relocations. */
3615 mips_ok_for_lazy_binding_p (rtx x)
3617 return (TARGET_USE_GOT
3618 && GET_CODE (x) == SYMBOL_REF
3619 && !mips_symbol_binds_local_p (x));
3622 /* Load function address ADDR into register DEST. SIBCALL_P is true
3623 if the address is needed for a sibling call. */
3626 mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
3628 /* If we're generating PIC, and this call is to a global function,
3629 try to allow its address to be resolved lazily. This isn't
3630 possible if TARGET_CALL_SAVED_GP since the value of $gp on entry
3631 to the stub would be our caller's gp, not ours. */
3632 if (TARGET_EXPLICIT_RELOCS
3633 && !(sibcall_p && TARGET_CALL_SAVED_GP)
3634 && mips_ok_for_lazy_binding_p (addr))
3636 rtx high, lo_sum_symbol;
3638 high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
3639 addr, SYMBOL_GOTOFF_CALL);
3640 lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
3641 if (Pmode == SImode)
3642 emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
3644 emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
3647 emit_move_insn (dest, addr);
3651 /* Expand a call or call_value instruction. RESULT is where the
3652 result will go (null for calls), ADDR is the address of the
3653 function, ARGS_SIZE is the size of the arguments and AUX is
3654 the value passed to us by mips_function_arg. SIBCALL_P is true
3655 if we are expanding a sibling call, false if we're expanding
3659 mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
3661 rtx orig_addr, pattern, insn;
3664 if (!call_insn_operand (addr, VOIDmode))
3666 addr = gen_reg_rtx (Pmode);
3667 mips_load_call_address (addr, orig_addr, sibcall_p);
3670 if (mips16_hard_float
3671 && build_mips16_call_stub (result, addr, args_size,
3672 aux == 0 ? 0 : (int) GET_MODE (aux)))
3676 pattern = (sibcall_p
3677 ? gen_sibcall_internal (addr, args_size)
3678 : gen_call_internal (addr, args_size));
3679 else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
3683 reg1 = XEXP (XVECEXP (result, 0, 0), 0);
3684 reg2 = XEXP (XVECEXP (result, 0, 1), 0);
3687 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
3688 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
3691 pattern = (sibcall_p
3692 ? gen_sibcall_value_internal (result, addr, args_size)
3693 : gen_call_value_internal (result, addr, args_size));
3695 insn = emit_call_insn (pattern);
3697 /* Lazy-binding stubs require $gp to be valid on entry. */
3698 if (mips_ok_for_lazy_binding_p (orig_addr))
3699 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3703 /* We can handle any sibcall when TARGET_SIBCALLS is true. */
3706 mips_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
3707 tree exp ATTRIBUTE_UNUSED)
3709 return TARGET_SIBCALLS;
3712 /* Emit code to move general operand SRC into condition-code
3713 register DEST. SCRATCH is a scratch TFmode float register.
3720 where FP1 and FP2 are single-precision float registers
3721 taken from SCRATCH. */
3724 mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
3728 /* Change the source to SFmode. */
3730 src = adjust_address (src, SFmode, 0);
3731 else if (REG_P (src) || GET_CODE (src) == SUBREG)
3732 src = gen_rtx_REG (SFmode, true_regnum (src));
3734 fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
3735 fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + MAX_FPRS_PER_FMT);
3737 emit_move_insn (copy_rtx (fp1), src);
3738 emit_move_insn (copy_rtx (fp2), CONST0_RTX (SFmode));
3739 emit_insn (gen_slt_sf (dest, fp2, fp1));
3742 /* Emit code to change the current function's return address to
3743 ADDRESS. SCRATCH is available as a scratch register, if needed.
3744 ADDRESS and SCRATCH are both word-mode GPRs. */
3747 mips_set_return_address (rtx address, rtx scratch)
3751 compute_frame_size (get_frame_size ());
3752 gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
3753 slot_address = mips_add_offset (scratch, stack_pointer_rtx,
3754 cfun->machine->frame.gp_sp_offset);
3756 emit_move_insn (gen_rtx_MEM (GET_MODE (address), slot_address), address);
3759 /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
3760 Assume that the areas do not overlap. */
3763 mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
3765 HOST_WIDE_INT offset, delta;
3766 unsigned HOST_WIDE_INT bits;
3768 enum machine_mode mode;
3771 /* Work out how many bits to move at a time. If both operands have
3772 half-word alignment, it is usually better to move in half words.
3773 For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
3774 and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
3775 Otherwise move word-sized chunks. */
3776 if (MEM_ALIGN (src) == BITS_PER_WORD / 2
3777 && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
3778 bits = BITS_PER_WORD / 2;
3780 bits = BITS_PER_WORD;
3782 mode = mode_for_size (bits, MODE_INT, 0);
3783 delta = bits / BITS_PER_UNIT;
3785 /* Allocate a buffer for the temporary registers. */
3786 regs = alloca (sizeof (rtx) * length / delta);
3788 /* Load as many BITS-sized chunks as possible. Use a normal load if
3789 the source has enough alignment, otherwise use left/right pairs. */
3790 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3792 regs[i] = gen_reg_rtx (mode);
3793 if (MEM_ALIGN (src) >= bits)
3794 emit_move_insn (regs[i], adjust_address (src, mode, offset));
3797 rtx part = adjust_address (src, BLKmode, offset);
3798 if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
3803 /* Copy the chunks to the destination. */
3804 for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3805 if (MEM_ALIGN (dest) >= bits)
3806 emit_move_insn (adjust_address (dest, mode, offset), regs[i]);
3809 rtx part = adjust_address (dest, BLKmode, offset);
3810 if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
3814 /* Mop up any left-over bytes. */
3815 if (offset < length)
3817 src = adjust_address (src, BLKmode, offset);
3818 dest = adjust_address (dest, BLKmode, offset);
3819 move_by_pieces (dest, src, length - offset,
3820 MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
3824 #define MAX_MOVE_REGS 4
3825 #define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
3828 /* Helper function for doing a loop-based block operation on memory
3829 reference MEM. Each iteration of the loop will operate on LENGTH
3832 Create a new base register for use within the loop and point it to
3833 the start of MEM. Create a new memory reference that uses this
3834 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
3837 mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
3838 rtx *loop_reg, rtx *loop_mem)
3840 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
3842 /* Although the new mem does not refer to a known location,
3843 it does keep up to LENGTH bytes of alignment. */
3844 *loop_mem = change_address (mem, BLKmode, *loop_reg);
3845 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
3849 /* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
3850 per iteration. LENGTH must be at least MAX_MOVE_BYTES. Assume that the
3851 memory regions do not overlap. */
3854 mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
3856 rtx label, src_reg, dest_reg, final_src;
3857 HOST_WIDE_INT leftover;
3859 leftover = length % MAX_MOVE_BYTES;
3862 /* Create registers and memory references for use within the loop. */
3863 mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
3864 mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
3866 /* Calculate the value that SRC_REG should have after the last iteration
3868 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
3871 /* Emit the start of the loop. */
3872 label = gen_label_rtx ();
3875 /* Emit the loop body. */
3876 mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
3878 /* Move on to the next block. */
3879 emit_move_insn (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
3880 emit_move_insn (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
3882 /* Emit the loop condition. */
3883 if (Pmode == DImode)
3884 emit_insn (gen_cmpdi (src_reg, final_src));
3886 emit_insn (gen_cmpsi (src_reg, final_src));
3887 emit_jump_insn (gen_bne (label));
3889 /* Mop up any left-over bytes. */
3891 mips_block_move_straight (dest, src, leftover);
3895 /* Expand a loop of synci insns for the address range [BEGIN, END). */
3898 mips_expand_synci_loop (rtx begin, rtx end)
3900 rtx inc, label, cmp, cmp_result;
3902 /* Load INC with the cache line size (rdhwr INC,$1). */
3903 inc = gen_reg_rtx (SImode);
3904 emit_insn (gen_rdhwr (inc, const1_rtx));
3906 /* Loop back to here. */
3907 label = gen_label_rtx ();
3910 emit_insn (gen_synci (begin));
3912 cmp = gen_reg_rtx (Pmode);
3913 mips_emit_binary (GTU, cmp, begin, end);
3915 mips_emit_binary (PLUS, begin, begin, inc);
3917 cmp_result = gen_rtx_EQ (VOIDmode, cmp, const0_rtx);
3918 emit_jump_insn (gen_condjump (cmp_result, label));
3921 /* Expand a movmemsi instruction. */
3924 mips_expand_block_move (rtx dest, rtx src, rtx length)
3926 if (GET_CODE (length) == CONST_INT)
3928 if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
3930 mips_block_move_straight (dest, src, INTVAL (length));
3935 mips_block_move_loop (dest, src, INTVAL (length));
3942 /* Argument support functions. */
3944 /* Initialize CUMULATIVE_ARGS for a function. */
3947 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
3948 rtx libname ATTRIBUTE_UNUSED)
3950 static CUMULATIVE_ARGS zero_cum;
3951 tree param, next_param;
3954 cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
3956 /* Determine if this function has variable arguments. This is
3957 indicated by the last argument being 'void_type_mode' if there
3958 are no variable arguments. The standard MIPS calling sequence
3959 passes all arguments in the general purpose registers in this case. */
3961 for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
3962 param != 0; param = next_param)
3964 next_param = TREE_CHAIN (param);
3965 if (next_param == 0 && TREE_VALUE (param) != void_type_node)
3966 cum->gp_reg_found = 1;
3971 /* Fill INFO with information about a single argument. CUM is the
3972 cumulative state for earlier arguments. MODE is the mode of this
3973 argument and TYPE is its type (if known). NAMED is true if this
3974 is a named (fixed) argument rather than a variable one. */
3977 mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3978 tree type, int named, struct mips_arg_info *info)
3980 bool doubleword_aligned_p;
3981 unsigned int num_bytes, num_words, max_regs;
3983 /* Work out the size of the argument. */
3984 num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
3985 num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3987 /* Decide whether it should go in a floating-point register, assuming
3988 one is free. Later code checks for availability.
3990 The checks against UNITS_PER_FPVALUE handle the soft-float and
3991 single-float cases. */
3995 /* The EABI conventions have traditionally been defined in terms
3996 of TYPE_MODE, regardless of the actual type. */
3997 info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
3998 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3999 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4004 /* Only leading floating-point scalars are passed in
4005 floating-point registers. We also handle vector floats the same
4006 say, which is OK because they are not covered by the standard ABI. */
4007 info->fpr_p = (!cum->gp_reg_found
4008 && cum->arg_number < 2
4009 && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
4010 || VECTOR_FLOAT_TYPE_P (type))
4011 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4012 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4013 && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
4018 /* Scalar and complex floating-point types are passed in
4019 floating-point registers. */
4020 info->fpr_p = (named
4021 && (type == 0 || FLOAT_TYPE_P (type))
4022 && (GET_MODE_CLASS (mode) == MODE_FLOAT
4023 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4024 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
4025 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
4027 /* ??? According to the ABI documentation, the real and imaginary
4028 parts of complex floats should be passed in individual registers.
4029 The real and imaginary parts of stack arguments are supposed
4030 to be contiguous and there should be an extra word of padding
4033 This has two problems. First, it makes it impossible to use a
4034 single "void *" va_list type, since register and stack arguments
4035 are passed differently. (At the time of writing, MIPSpro cannot
4036 handle complex float varargs correctly.) Second, it's unclear
4037 what should happen when there is only one register free.
4039 For now, we assume that named complex floats should go into FPRs
4040 if there are two FPRs free, otherwise they should be passed in the
4041 same way as a struct containing two floats. */
4043 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
4044 && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
4046 if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
4047 info->fpr_p = false;
4057 /* See whether the argument has doubleword alignment. */
4058 doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
4060 /* Set REG_OFFSET to the register count we're interested in.
4061 The EABI allocates the floating-point registers separately,
4062 but the other ABIs allocate them like integer registers. */
4063 info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
4067 /* Advance to an even register if the argument is doubleword-aligned. */
4068 if (doubleword_aligned_p)
4069 info->reg_offset += info->reg_offset & 1;
4071 /* Work out the offset of a stack argument. */
4072 info->stack_offset = cum->stack_words;
4073 if (doubleword_aligned_p)
4074 info->stack_offset += info->stack_offset & 1;
4076 max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
4078 /* Partition the argument between registers and stack. */
4079 info->reg_words = MIN (num_words, max_regs);
4080 info->stack_words = num_words - info->reg_words;
4084 /* INFO describes an argument that is passed in a single-register value.
4085 Return the register it uses, assuming that FPRs are available if
4089 mips_arg_regno (const struct mips_arg_info *info, bool hard_float_p)
4091 if (!info->fpr_p || !hard_float_p)
4092 return GP_ARG_FIRST + info->reg_offset;
4093 else if (mips_abi == ABI_32 && TARGET_DOUBLE_FLOAT && info->reg_offset > 0)
4094 /* In o32, the second argument is always passed in $f14
4095 for TARGET_DOUBLE_FLOAT, regardless of whether the
4096 first argument was a word or doubleword. */
4097 return FP_ARG_FIRST + 2;
4099 return FP_ARG_FIRST + info->reg_offset;
4102 /* Implement FUNCTION_ARG_ADVANCE. */
4105 function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4106 tree type, int named)
4108 struct mips_arg_info info;
4110 mips_arg_info (cum, mode, type, named, &info);
4113 cum->gp_reg_found = true;
4115 /* See the comment above the cumulative args structure in mips.h
4116 for an explanation of what this code does. It assumes the O32
4117 ABI, which passes at most 2 arguments in float registers. */
4118 if (cum->arg_number < 2 && info.fpr_p)
4119 cum->fp_code += (mode == SFmode ? 1 : 2) << (cum->arg_number * 2);
4121 if (mips_abi != ABI_EABI || !info.fpr_p)
4122 cum->num_gprs = info.reg_offset + info.reg_words;
4123 else if (info.reg_words > 0)
4124 cum->num_fprs += MAX_FPRS_PER_FMT;
4126 if (info.stack_words > 0)
4127 cum->stack_words = info.stack_offset + info.stack_words;
4132 /* Implement FUNCTION_ARG. */
4135 function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
4136 tree type, int named)
4138 struct mips_arg_info info;
4140 /* We will be called with a mode of VOIDmode after the last argument
4141 has been seen. Whatever we return will be passed to the call
4142 insn. If we need a mips16 fp_code, return a REG with the code
4143 stored as the mode. */
4144 if (mode == VOIDmode)
4146 if (TARGET_MIPS16 && cum->fp_code != 0)
4147 return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
4153 mips_arg_info (cum, mode, type, named, &info);
4155 /* Return straight away if the whole argument is passed on the stack. */
4156 if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
4160 && TREE_CODE (type) == RECORD_TYPE
4162 && TYPE_SIZE_UNIT (type)
4163 && host_integerp (TYPE_SIZE_UNIT (type), 1)
4166 /* The Irix 6 n32/n64 ABIs say that if any 64-bit chunk of the
4167 structure contains a double in its entirety, then that 64-bit
4168 chunk is passed in a floating point register. */
4171 /* First check to see if there is any such field. */
4172 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
4173 if (TREE_CODE (field) == FIELD_DECL
4174 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4175 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
4176 && host_integerp (bit_position (field), 0)
4177 && int_bit_position (field) % BITS_PER_WORD == 0)
4182 /* Now handle the special case by returning a PARALLEL
4183 indicating where each 64-bit chunk goes. INFO.REG_WORDS
4184 chunks are passed in registers. */
4186 HOST_WIDE_INT bitpos;
4189 /* assign_parms checks the mode of ENTRY_PARM, so we must
4190 use the actual mode here. */
4191 ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
4194 field = TYPE_FIELDS (type);
4195 for (i = 0; i < info.reg_words; i++)
4199 for (; field; field = TREE_CHAIN (field))
4200 if (TREE_CODE (field) == FIELD_DECL
4201 && int_bit_position (field) >= bitpos)
4205 && int_bit_position (field) == bitpos
4206 && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
4207 && !TARGET_SOFT_FLOAT
4208 && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
4209 reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
4211 reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
4214 = gen_rtx_EXPR_LIST (VOIDmode, reg,
4215 GEN_INT (bitpos / BITS_PER_UNIT));
4217 bitpos += BITS_PER_WORD;
4223 /* Handle the n32/n64 conventions for passing complex floating-point
4224 arguments in FPR pairs. The real part goes in the lower register
4225 and the imaginary part goes in the upper register. */
4228 && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
4231 enum machine_mode inner;
4234 inner = GET_MODE_INNER (mode);
4235 reg = FP_ARG_FIRST + info.reg_offset;
4236 if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
4238 /* Real part in registers, imaginary part on stack. */
4239 gcc_assert (info.stack_words == info.reg_words);
4240 return gen_rtx_REG (inner, reg);
4244 gcc_assert (info.stack_words == 0);
4245 real = gen_rtx_EXPR_LIST (VOIDmode,
4246 gen_rtx_REG (inner, reg),
4248 imag = gen_rtx_EXPR_LIST (VOIDmode,
4250 reg + info.reg_words / 2),
4251 GEN_INT (GET_MODE_SIZE (inner)));
4252 return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
4256 return gen_rtx_REG (mode, mips_arg_regno (&info, TARGET_HARD_FLOAT));
4260 /* Implement TARGET_ARG_PARTIAL_BYTES. */
4263 mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
4264 enum machine_mode mode, tree type, bool named)
4266 struct mips_arg_info info;
4268 mips_arg_info (cum, mode, type, named, &info);
4269 return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
4273 /* Implement FUNCTION_ARG_BOUNDARY. Every parameter gets at least
4274 PARM_BOUNDARY bits of alignment, but will be given anything up
4275 to STACK_BOUNDARY bits if the type requires it. */
4278 function_arg_boundary (enum machine_mode mode, tree type)
4280 unsigned int alignment;
4282 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4283 if (alignment < PARM_BOUNDARY)
4284 alignment = PARM_BOUNDARY;
4285 if (alignment > STACK_BOUNDARY)
4286 alignment = STACK_BOUNDARY;
4290 /* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4291 upward rather than downward. In other words, return true if the
4292 first byte of the stack slot has useful data, false if the last
4296 mips_pad_arg_upward (enum machine_mode mode, tree type)
4298 /* On little-endian targets, the first byte of every stack argument
4299 is passed in the first byte of the stack slot. */
4300 if (!BYTES_BIG_ENDIAN)
4303 /* Otherwise, integral types are padded downward: the last byte of a
4304 stack argument is passed in the last byte of the stack slot. */
4306 ? INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)
4307 : GET_MODE_CLASS (mode) == MODE_INT)
4310 /* Big-endian o64 pads floating-point arguments downward. */
4311 if (mips_abi == ABI_O64)
4312 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4315 /* Other types are padded upward for o32, o64, n32 and n64. */
4316 if (mips_abi != ABI_EABI)
4319 /* Arguments smaller than a stack slot are padded downward. */
4320 if (mode != BLKmode)
4321 return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
4323 return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
4327 /* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...). Return !BYTES_BIG_ENDIAN
4328 if the least significant byte of the register has useful data. Return
4329 the opposite if the most significant byte does. */
4332 mips_pad_reg_upward (enum machine_mode mode, tree type)
4334 /* No shifting is required for floating-point arguments. */
4335 if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4336 return !BYTES_BIG_ENDIAN;
4338 /* Otherwise, apply the same padding to register arguments as we do
4339 to stack arguments. */
4340 return mips_pad_arg_upward (mode, type);
4344 mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4345 tree type, int *pretend_size ATTRIBUTE_UNUSED,
4348 CUMULATIVE_ARGS local_cum;
4349 int gp_saved, fp_saved;
4351 /* The caller has advanced CUM up to, but not beyond, the last named
4352 argument. Advance a local copy of CUM past the last "real" named
4353 argument, to find out how many registers are left over. */
4356 FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
4358 /* Found out how many registers we need to save. */
4359 gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4360 fp_saved = (EABI_FLOAT_VARARGS_P
4361 ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4370 ptr = plus_constant (virtual_incoming_args_rtx,
4371 REG_PARM_STACK_SPACE (cfun->decl)
4372 - gp_saved * UNITS_PER_WORD);
4373 mem = gen_rtx_MEM (BLKmode, ptr);
4374 set_mem_alias_set (mem, get_varargs_alias_set ());
4376 move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4381 /* We can't use move_block_from_reg, because it will use
4383 enum machine_mode mode;
4386 /* Set OFF to the offset from virtual_incoming_args_rtx of
4387 the first float register. The FP save area lies below
4388 the integer one, and is aligned to UNITS_PER_FPVALUE bytes. */
4389 off = -gp_saved * UNITS_PER_WORD;
4390 off &= ~(UNITS_PER_FPVALUE - 1);
4391 off -= fp_saved * UNITS_PER_FPREG;
4393 mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4395 for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS;
4396 i += MAX_FPRS_PER_FMT)
4400 ptr = plus_constant (virtual_incoming_args_rtx, off);
4401 mem = gen_rtx_MEM (mode, ptr);
4402 set_mem_alias_set (mem, get_varargs_alias_set ());
4403 emit_move_insn (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4404 off += UNITS_PER_HWFPVALUE;
4408 if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4409 cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4410 + fp_saved * UNITS_PER_FPREG);
4413 /* Create the va_list data type.
4414 We keep 3 pointers, and two offsets.
4415 Two pointers are to the overflow area, which starts at the CFA.
4416 One of these is constant, for addressing into the GPR save area below it.
4417 The other is advanced up the stack through the overflow region.
4418 The third pointer is to the GPR save area. Since the FPR save area
4419 is just below it, we can address FPR slots off this pointer.
4420 We also keep two one-byte offsets, which are to be subtracted from the
4421 constant pointers to yield addresses in the GPR and FPR save areas.
4422 These are downcounted as float or non-float arguments are used,
4423 and when they get to zero, the argument must be obtained from the
4425 If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4426 pointer is enough. It's started at the GPR save area, and is
4428 Note that the GPR save area is not constant size, due to optimization
4429 in the prologue. Hence, we can't use a design with two pointers
4430 and two offsets, although we could have designed this with two pointers
4431 and three offsets. */
4434 mips_build_builtin_va_list (void)
4436 if (EABI_FLOAT_VARARGS_P)
4438 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4441 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4443 f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4445 f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4447 f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4449 f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4450 unsigned_char_type_node);
4451 f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4452 unsigned_char_type_node);
4453 /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4454 warn on every user file. */
4455 index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4456 array = build_array_type (unsigned_char_type_node,
4457 build_index_type (index));
4458 f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4460 DECL_FIELD_CONTEXT (f_ovfl) = record;
4461 DECL_FIELD_CONTEXT (f_gtop) = record;
4462 DECL_FIELD_CONTEXT (f_ftop) = record;
4463 DECL_FIELD_CONTEXT (f_goff) = record;
4464 DECL_FIELD_CONTEXT (f_foff) = record;
4465 DECL_FIELD_CONTEXT (f_res) = record;
4467 TYPE_FIELDS (record) = f_ovfl;
4468 TREE_CHAIN (f_ovfl) = f_gtop;
4469 TREE_CHAIN (f_gtop) = f_ftop;
4470 TREE_CHAIN (f_ftop) = f_goff;
4471 TREE_CHAIN (f_goff) = f_foff;
4472 TREE_CHAIN (f_foff) = f_res;
4474 layout_type (record);
4477 else if (TARGET_IRIX && TARGET_IRIX6)
4478 /* On IRIX 6, this type is 'char *'. */
4479 return build_pointer_type (char_type_node);
4481 /* Otherwise, we use 'void *'. */
4482 return ptr_type_node;
4485 /* Implement va_start. */
4488 mips_va_start (tree valist, rtx nextarg)
4490 if (EABI_FLOAT_VARARGS_P)
4492 const CUMULATIVE_ARGS *cum;
4493 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4494 tree ovfl, gtop, ftop, goff, foff;
4496 int gpr_save_area_size;
4497 int fpr_save_area_size;
4500 cum = ¤t_function_args_info;
4502 = (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
4504 = (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
4506 f_ovfl = TYPE_FIELDS (va_list_type_node);
4507 f_gtop = TREE_CHAIN (f_ovfl);
4508 f_ftop = TREE_CHAIN (f_gtop);
4509 f_goff = TREE_CHAIN (f_ftop);
4510 f_foff = TREE_CHAIN (f_goff);
4512 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4514 gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4516 ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4518 goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4520 foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4523 /* Emit code to initialize OVFL, which points to the next varargs
4524 stack argument. CUM->STACK_WORDS gives the number of stack
4525 words used by named arguments. */
4526 t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
4527 if (cum->stack_words > 0)
4528 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), t,
4529 size_int (cum->stack_words * UNITS_PER_WORD));
4530 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4531 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4533 /* Emit code to initialize GTOP, the top of the GPR save area. */
4534 t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
4535 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (gtop), gtop, t);
4536 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4538 /* Emit code to initialize FTOP, the top of the FPR save area.
4539 This address is gpr_save_area_bytes below GTOP, rounded
4540 down to the next fp-aligned boundary. */
4541 t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
4542 fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
4543 fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
4545 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ftop), t,
4546 size_int (-fpr_offset));
4547 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ftop), ftop, t);
4548 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4550 /* Emit code to initialize GOFF, the offset from GTOP of the
4551 next GPR argument. */
4552 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (goff), goff,
4553 build_int_cst (NULL_TREE, gpr_save_area_size));
4554 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4556 /* Likewise emit code to initialize FOFF, the offset from FTOP
4557 of the next FPR argument. */
4558 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (foff), foff,
4559 build_int_cst (NULL_TREE, fpr_save_area_size));
4560 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4564 nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
4565 std_expand_builtin_va_start (valist, nextarg);
4569 /* Implement va_arg. */
4572 mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
4574 HOST_WIDE_INT size, rsize;
4578 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
4581 type = build_pointer_type (type);
4583 size = int_size_in_bytes (type);
4584 rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
4586 if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
4587 addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4590 /* Not a simple merged stack. */
4592 tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4593 tree ovfl, top, off, align;
4594 HOST_WIDE_INT osize;
4597 f_ovfl = TYPE_FIELDS (va_list_type_node);
4598 f_gtop = TREE_CHAIN (f_ovfl);
4599 f_ftop = TREE_CHAIN (f_gtop);
4600 f_goff = TREE_CHAIN (f_ftop);
4601 f_foff = TREE_CHAIN (f_goff);
4603 /* We maintain separate pointers and offsets for floating-point
4604 and integer arguments, but we need similar code in both cases.
4607 TOP be the top of the register save area;
4608 OFF be the offset from TOP of the next register;
4609 ADDR_RTX be the address of the argument;
4610 RSIZE be the number of bytes used to store the argument
4611 when it's in the register save area;
4612 OSIZE be the number of bytes used to store it when it's
4613 in the stack overflow area; and
4614 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
4616 The code we want is:
4618 1: off &= -rsize; // round down
4621 4: addr_rtx = top - off;
4626 9: ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
4627 10: addr_rtx = ovfl + PADDING;
4631 [1] and [9] can sometimes be optimized away. */
4633 ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4636 if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
4637 && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
4639 top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4641 off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4644 /* When floating-point registers are saved to the stack,
4645 each one will take up UNITS_PER_HWFPVALUE bytes, regardless
4646 of the float's precision. */
4647 rsize = UNITS_PER_HWFPVALUE;
4649 /* Overflow arguments are padded to UNITS_PER_WORD bytes
4650 (= PARM_BOUNDARY bits). This can be different from RSIZE
4653 (1) On 32-bit targets when TYPE is a structure such as:
4655 struct s { float f; };
4657 Such structures are passed in paired FPRs, so RSIZE
4658 will be 8 bytes. However, the structure only takes
4659 up 4 bytes of memory, so OSIZE will only be 4.
4661 (2) In combinations such as -mgp64 -msingle-float
4662 -fshort-double. Doubles passed in registers
4663 will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
4664 but those passed on the stack take up
4665 UNITS_PER_WORD bytes. */
4666 osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
4670 top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4672 off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4674 if (rsize > UNITS_PER_WORD)
4676 /* [1] Emit code for: off &= -rsize. */
4677 t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
4678 build_int_cst (NULL_TREE, -rsize));
4679 t = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (off), off, t);
4680 gimplify_and_add (t, pre_p);
4685 /* [2] Emit code to branch if off == 0. */
4686 t = build2 (NE_EXPR, boolean_type_node, off,
4687 build_int_cst (TREE_TYPE (off), 0));
4688 addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
4690 /* [5] Emit code for: off -= rsize. We do this as a form of
4691 post-increment not available to C. Also widen for the
4692 coming pointer arithmetic. */
4693 t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
4694 t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
4695 t = fold_convert (sizetype, t);
4696 t = fold_build1 (NEGATE_EXPR, sizetype, t);
4698 /* [4] Emit code for: addr_rtx = top - off. On big endian machines,
4699 the argument has RSIZE - SIZE bytes of leading padding. */
4700 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (top), top, t);
4701 if (BYTES_BIG_ENDIAN && rsize > size)
4703 u = size_int (rsize - size);
4704 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
4706 COND_EXPR_THEN (addr) = t;
4708 if (osize > UNITS_PER_WORD)
4710 /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize. */
4711 u = size_int (osize - 1);
4712 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
4713 t = fold_convert (sizetype, t);
4714 u = size_int (-osize);
4715 t = build2 (BIT_AND_EXPR, sizetype, t, u);
4716 t = fold_convert (TREE_TYPE (ovfl), t);
4717 align = build2 (GIMPLE_MODIFY_STMT, TREE_TYPE (ovfl), ovfl, t);
4722 /* [10, 11]. Emit code to store ovfl in addr_rtx, then
4723 post-increment ovfl by osize. On big-endian machines,
4724 the argument has OSIZE - SIZE bytes of leading padding. */
4725 u = fold_convert (TREE_TYPE (ovfl),
4726 build_int_cst (NULL_TREE, osize));
4727 t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
4728 if (BYTES_BIG_ENDIAN && osize > size)
4730 u = size_int (osize - size);
4731 t = build2 (POINTER_PLUS_EXPR, TREE_TYPE (t), t, u);
4734 /* String [9] and [10,11] together. */
4736 t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
4737 COND_EXPR_ELSE (addr) = t;
4739 addr = fold_convert (build_pointer_type (type), addr);
4740 addr = build_va_arg_indirect_ref (addr);
4744 addr = build_va_arg_indirect_ref (addr);
4749 /* Return true if it is possible to use left/right accesses for a
4750 bitfield of WIDTH bits starting BITPOS bits into *OP. When
4751 returning true, update *OP, *LEFT and *RIGHT as follows:
4753 *OP is a BLKmode reference to the whole field.
4755 *LEFT is a QImode reference to the first byte if big endian or
4756 the last byte if little endian. This address can be used in the
4757 left-side instructions (lwl, swl, ldl, sdl).
4759 *RIGHT is a QImode reference to the opposite end of the field and
4760 can be used in the patterning right-side instruction. */
4763 mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
4764 rtx *left, rtx *right)
4768 /* Check that the operand really is a MEM. Not all the extv and
4769 extzv predicates are checked. */
4773 /* Check that the size is valid. */
4774 if (width != 32 && (!TARGET_64BIT || width != 64))
4777 /* We can only access byte-aligned values. Since we are always passed
4778 a reference to the first byte of the field, it is not necessary to
4779 do anything with BITPOS after this check. */
4780 if (bitpos % BITS_PER_UNIT != 0)
4783 /* Reject aligned bitfields: we want to use a normal load or store
4784 instead of a left/right pair. */
4785 if (MEM_ALIGN (*op) >= width)
4788 /* Adjust *OP to refer to the whole field. This also has the effect
4789 of legitimizing *OP's address for BLKmode, possibly simplifying it. */
4790 *op = adjust_address (*op, BLKmode, 0);
4791 set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
4793 /* Get references to both ends of the field. We deliberately don't
4794 use the original QImode *OP for FIRST since the new BLKmode one
4795 might have a simpler address. */
4796 first = adjust_address (*op, QImode, 0);
4797 last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
4799 /* Allocate to LEFT and RIGHT according to endianness. LEFT should
4800 be the upper word and RIGHT the lower word. */
4801 if (TARGET_BIG_ENDIAN)
4802 *left = first, *right = last;
4804 *left = last, *right = first;
4810 /* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
4811 Return true on success. We only handle cases where zero_extract is
4812 equivalent to sign_extract. */
4815 mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
4817 rtx left, right, temp;
4819 /* If TARGET_64BIT, the destination of a 32-bit load will be a
4820 paradoxical word_mode subreg. This is the only case in which
4821 we allow the destination to be larger than the source. */
4822 if (GET_CODE (dest) == SUBREG
4823 && GET_MODE (dest) == DImode
4824 && SUBREG_BYTE (dest) == 0
4825 && GET_MODE (SUBREG_REG (dest)) == SImode)
4826 dest = SUBREG_REG (dest);
4828 /* After the above adjustment, the destination must be the same
4829 width as the source. */
4830 if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
4833 if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
4836 temp = gen_reg_rtx (GET_MODE (dest));
4837 if (GET_MODE (dest) == DImode)
4839 emit_insn (gen_mov_ldl (temp, src, left));
4840 emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
4844 emit_insn (gen_mov_lwl (temp, src, left));
4845 emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
4851 /* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC). Return
4855 mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
4858 enum machine_mode mode;
4860 if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
4863 mode = mode_for_size (width, MODE_INT, 0);
4864 src = gen_lowpart (mode, src);
4868 emit_insn (gen_mov_sdl (dest, src, left));
4869 emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
4873 emit_insn (gen_mov_swl (dest, src, left));
4874 emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
4879 /* Return true if X is a MEM with the same size as MODE. */
4882 mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
4889 size = MEM_SIZE (x);
4890 return size && INTVAL (size) == GET_MODE_SIZE (mode);
4893 /* Return true if (zero_extract OP SIZE POSITION) can be used as the
4894 source of an "ext" instruction or the destination of an "ins"
4895 instruction. OP must be a register operand and the following
4896 conditions must hold:
4898 0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
4899 0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4900 0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4902 Also reject lengths equal to a word as they are better handled
4903 by the move patterns. */
4906 mips_use_ins_ext_p (rtx op, rtx size, rtx position)
4908 HOST_WIDE_INT len, pos;
4910 if (!ISA_HAS_EXT_INS
4911 || !register_operand (op, VOIDmode)
4912 || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
4915 len = INTVAL (size);
4916 pos = INTVAL (position);
4918 if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
4919 || pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
4925 /* Set up globals to generate code for the ISA or processor
4926 described by INFO. */
4929 mips_set_architecture (const struct mips_cpu_info *info)
4933 mips_arch_info = info;
4934 mips_arch = info->cpu;
4935 mips_isa = info->isa;
4940 /* Likewise for tuning. */
4943 mips_set_tune (const struct mips_cpu_info *info)
4947 mips_tune_info = info;
4948 mips_tune = info->cpu;
4952 /* Implement TARGET_HANDLE_OPTION. */
4955 mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
4960 if (strcmp (arg, "32") == 0)
4962 else if (strcmp (arg, "o64") == 0)
4964 else if (strcmp (arg, "n32") == 0)
4966 else if (strcmp (arg, "64") == 0)
4968 else if (strcmp (arg, "eabi") == 0)
4969 mips_abi = ABI_EABI;
4976 return mips_parse_cpu (arg) != 0;
4979 mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
4980 return mips_isa_info != 0;
4982 case OPT_mno_flush_func:
4983 mips_cache_flush_func = NULL;
4991 /* Set up the threshold for data to go into the small data area, instead
4992 of the normal data area, and detect any conflicts in the switches. */
4995 override_options (void)
4997 int i, start, regno;
4998 enum machine_mode mode;
5000 #ifdef SUBTARGET_OVERRIDE_OPTIONS
5001 SUBTARGET_OVERRIDE_OPTIONS;
5004 mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
5006 /* The following code determines the architecture and register size.
5007 Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
5008 The GAS and GCC code should be kept in sync as much as possible. */
5010 if (mips_arch_string != 0)
5011 mips_set_architecture (mips_parse_cpu (mips_arch_string));
5013 if (mips_isa_info != 0)
5015 if (mips_arch_info == 0)
5016 mips_set_architecture (mips_isa_info);
5017 else if (mips_arch_info->isa != mips_isa_info->isa)
5018 error ("-%s conflicts with the other architecture options, "
5019 "which specify a %s processor",
5020 mips_isa_info->name,
5021 mips_cpu_info_from_isa (mips_arch_info->isa)->name);
5024 if (mips_arch_info == 0)
5026 #ifdef MIPS_CPU_STRING_DEFAULT
5027 mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
5029 mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
5033 if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
5034 error ("-march=%s is not compatible with the selected ABI",
5035 mips_arch_info->name);
5037 /* Optimize for mips_arch, unless -mtune selects a different processor. */
5038 if (mips_tune_string != 0)
5039 mips_set_tune (mips_parse_cpu (mips_tune_string));
5041 if (mips_tune_info == 0)
5042 mips_set_tune (mips_arch_info);
5044 /* Set cost structure for the processor. */
5046 mips_cost = &mips_rtx_cost_optimize_size;
5048 mips_cost = &mips_rtx_cost_data[mips_tune];
5050 /* If the user hasn't specified a branch cost, use the processor's
5052 if (mips_branch_cost == 0)
5053 mips_branch_cost = mips_cost->branch_cost;
5055 if ((target_flags_explicit & MASK_64BIT) != 0)
5057 /* The user specified the size of the integer registers. Make sure
5058 it agrees with the ABI and ISA. */
5059 if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
5060 error ("-mgp64 used with a 32-bit processor");
5061 else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
5062 error ("-mgp32 used with a 64-bit ABI");
5063 else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
5064 error ("-mgp64 used with a 32-bit ABI");
5068 /* Infer the integer register size from the ABI and processor.
5069 Restrict ourselves to 32-bit registers if that's all the
5070 processor has, or if the ABI cannot handle 64-bit registers. */
5071 if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
5072 target_flags &= ~MASK_64BIT;
5074 target_flags |= MASK_64BIT;
5077 if ((target_flags_explicit & MASK_FLOAT64) != 0)
5079 /* Really, -mfp32 and -mfp64 are ornamental options. There's
5080 only one right answer here. */
5081 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
5082 error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
5083 else if (!TARGET_64BIT && TARGET_FLOAT64
5084 && !(ISA_HAS_MXHC1 && mips_abi == ABI_32))
5085 error ("-mgp32 and -mfp64 can only be combined if the target"
5086 " supports the mfhc1 and mthc1 instructions");
5087 else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
5088 error ("unsupported combination: %s", "-mfp64 -msingle-float");
5092 /* -msingle-float selects 32-bit float registers. Otherwise the
5093 float registers should be the same size as the integer ones. */
5094 if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
5095 target_flags |= MASK_FLOAT64;
5097 target_flags &= ~MASK_FLOAT64;
5100 /* End of code shared with GAS. */
5102 if ((target_flags_explicit & MASK_LONG64) == 0)
5104 if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
5105 target_flags |= MASK_LONG64;
5107 target_flags &= ~MASK_LONG64;
5110 if (MIPS_MARCH_CONTROLS_SOFT_FLOAT
5111 && (target_flags_explicit & MASK_SOFT_FLOAT) == 0)
5113 /* For some configurations, it is useful to have -march control
5114 the default setting of MASK_SOFT_FLOAT. */
5115 switch ((int) mips_arch)
5117 case PROCESSOR_R4100:
5118 case PROCESSOR_R4111:
5119 case PROCESSOR_R4120:
5120 case PROCESSOR_R4130:
5121 target_flags |= MASK_SOFT_FLOAT;
5125 target_flags &= ~MASK_SOFT_FLOAT;
5131 flag_pcc_struct_return = 0;
5133 if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
5135 /* If neither -mbranch-likely nor -mno-branch-likely was given
5136 on the command line, set MASK_BRANCHLIKELY based on the target
5139 By default, we enable use of Branch Likely instructions on
5140 all architectures which support them with the following
5141 exceptions: when creating MIPS32 or MIPS64 code, and when
5142 tuning for architectures where their use tends to hurt
5145 The MIPS32 and MIPS64 architecture specifications say "Software
5146 is strongly encouraged to avoid use of Branch Likely
5147 instructions, as they will be removed from a future revision
5148 of the [MIPS32 and MIPS64] architecture." Therefore, we do not
5149 issue those instructions unless instructed to do so by
5151 if (ISA_HAS_BRANCHLIKELY
5152 && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64)
5153 && !(TUNE_MIPS5500 || TUNE_SB1))
5154 target_flags |= MASK_BRANCHLIKELY;
5156 target_flags &= ~MASK_BRANCHLIKELY;
5158 if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
5159 warning (0, "generation of Branch Likely instructions enabled, but not supported by architecture");
5161 /* The effect of -mabicalls isn't defined for the EABI. */
5162 if (mips_abi == ABI_EABI && TARGET_ABICALLS)
5164 error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
5165 target_flags &= ~MASK_ABICALLS;
5168 if (TARGET_ABICALLS)
5170 /* We need to set flag_pic for executables as well as DSOs
5171 because we may reference symbols that are not defined in
5172 the final executable. (MIPS does not use things like
5173 copy relocs, for example.)
5175 Also, there is a body of code that uses __PIC__ to distinguish
5176 between -mabicalls and -mno-abicalls code. */
5178 if (mips_section_threshold > 0)
5179 warning (0, "%<-G%> is incompatible with %<-mabicalls%>");
5182 if (TARGET_VXWORKS_RTP && mips_section_threshold > 0)
5183 warning (0, "-G and -mrtp are incompatible");
5185 /* mips_split_addresses is a half-way house between explicit
5186 relocations and the traditional assembler macros. It can
5187 split absolute 32-bit symbolic constants into a high/lo_sum
5188 pair but uses macros for other sorts of access.
5190 Like explicit relocation support for REL targets, it relies
5191 on GNU extensions in the assembler and the linker.
5193 Although this code should work for -O0, it has traditionally
5194 been treated as an optimization. */
5195 if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
5196 && optimize && !flag_pic
5197 && !ABI_HAS_64BIT_SYMBOLS)
5198 mips_split_addresses = 1;
5200 mips_split_addresses = 0;
5202 /* -mvr4130-align is a "speed over size" optimization: it usually produces
5203 faster code, but at the expense of more nops. Enable it at -O3 and
5205 if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
5206 target_flags |= MASK_VR4130_ALIGN;
5208 /* When compiling for the mips16, we cannot use floating point. We
5209 record the original hard float value in mips16_hard_float. */
5212 if (TARGET_SOFT_FLOAT)
5213 mips16_hard_float = 0;
5215 mips16_hard_float = 1;
5216 target_flags |= MASK_SOFT_FLOAT;
5218 /* Don't run the scheduler before reload, since it tends to
5219 increase register pressure. */
5220 flag_schedule_insns = 0;
5222 /* Don't do hot/cold partitioning. The constant layout code expects
5223 the whole function to be in a single section. */
5224 flag_reorder_blocks_and_partition = 0;
5226 /* Silently disable -mexplicit-relocs since it doesn't apply
5227 to mips16 code. Even so, it would overly pedantic to warn
5228 about "-mips16 -mexplicit-relocs", especially given that
5229 we use a %gprel() operator. */
5230 target_flags &= ~MASK_EXPLICIT_RELOCS;
5233 /* When using explicit relocs, we call dbr_schedule from within
5235 if (TARGET_EXPLICIT_RELOCS)
5237 mips_flag_delayed_branch = flag_delayed_branch;
5238 flag_delayed_branch = 0;
5241 #ifdef MIPS_TFMODE_FORMAT
5242 REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
5245 /* Make sure that the user didn't turn off paired single support when
5246 MIPS-3D support is requested. */
5247 if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
5248 && !TARGET_PAIRED_SINGLE_FLOAT)
5249 error ("-mips3d requires -mpaired-single");
5251 /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT. */
5253 target_flags |= MASK_PAIRED_SINGLE_FLOAT;
5255 /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
5256 and TARGET_HARD_FLOAT are both true. */
5257 if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
5258 error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
5260 /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
5262 if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
5263 error ("-mips3d/-mpaired-single must be used with -mips64");
5265 /* If TARGET_DSPR2, enable MASK_DSP. */
5267 target_flags |= MASK_DSP;
5269 if (TARGET_MIPS16 && TARGET_DSP)
5270 error ("-mips16 and -mdsp cannot be used together");
5272 mips_print_operand_punct['?'] = 1;
5273 mips_print_operand_punct['#'] = 1;
5274 mips_print_operand_punct['/'] = 1;
5275 mips_print_operand_punct['&'] = 1;
5276 mips_print_operand_punct['!'] = 1;
5277 mips_print_operand_punct['*'] = 1;
5278 mips_print_operand_punct['@'] = 1;
5279 mips_print_operand_punct['.'] = 1;
5280 mips_print_operand_punct['('] = 1;
5281 mips_print_operand_punct[')'] = 1;
5282 mips_print_operand_punct['['] = 1;
5283 mips_print_operand_punct[']'] = 1;
5284 mips_print_operand_punct['<'] = 1;
5285 mips_print_operand_punct['>'] = 1;
5286 mips_print_operand_punct['{'] = 1;
5287 mips_print_operand_punct['}'] = 1;
5288 mips_print_operand_punct['^'] = 1;
5289 mips_print_operand_punct['$'] = 1;
5290 mips_print_operand_punct['+'] = 1;
5291 mips_print_operand_punct['~'] = 1;
5293 /* Set up array to map GCC register number to debug register number.
5294 Ignore the special purpose register numbers. */
5296 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5298 mips_dbx_regno[i] = INVALID_REGNUM;
5299 if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
5300 mips_dwarf_regno[i] = i;
5302 mips_dwarf_regno[i] = INVALID_REGNUM;
5305 start = GP_DBX_FIRST - GP_REG_FIRST;
5306 for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
5307 mips_dbx_regno[i] = i + start;
5309 start = FP_DBX_FIRST - FP_REG_FIRST;
5310 for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
5311 mips_dbx_regno[i] = i + start;
5313 /* HI and LO debug registers use big-endian ordering. */
5314 mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
5315 mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
5316 mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
5317 mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
5318 for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
5320 mips_dwarf_regno[i + TARGET_LITTLE_ENDIAN] = i;
5321 mips_dwarf_regno[i + TARGET_BIG_ENDIAN] = i + 1;
5324 /* Set up array giving whether a given register can hold a given mode. */
5326 for (mode = VOIDmode;
5327 mode != MAX_MACHINE_MODE;
5328 mode = (enum machine_mode) ((int)mode + 1))
5330 register int size = GET_MODE_SIZE (mode);
5331 register enum mode_class class = GET_MODE_CLASS (mode);
5333 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5337 if (mode == CCV2mode)
5340 && (regno - ST_REG_FIRST) % 2 == 0);
5342 else if (mode == CCV4mode)
5345 && (regno - ST_REG_FIRST) % 4 == 0);
5347 else if (mode == CCmode)
5350 temp = (regno == FPSW_REGNUM);
5352 temp = (ST_REG_P (regno) || GP_REG_P (regno)
5353 || FP_REG_P (regno));
5356 else if (GP_REG_P (regno))
5357 temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
5359 else if (FP_REG_P (regno))
5360 temp = ((((regno % MAX_FPRS_PER_FMT) == 0)
5361 || (MIN_FPRS_PER_FMT == 1
5362 && size <= UNITS_PER_FPREG))
5363 && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
5364 || class == MODE_VECTOR_FLOAT)
5365 && size <= UNITS_PER_FPVALUE)
5366 /* Allow integer modes that fit into a single
5367 register. We need to put integers into FPRs
5368 when using instructions like cvt and trunc.
5369 We can't allow sizes smaller than a word,
5370 the FPU has no appropriate load/store
5371 instructions for those. */
5372 || (class == MODE_INT
5373 && size >= MIN_UNITS_PER_WORD
5374 && size <= UNITS_PER_FPREG)
5375 /* Allow TFmode for CCmode reloads. */
5376 || (ISA_HAS_8CC && mode == TFmode)));
5378 else if (ACC_REG_P (regno))
5379 temp = (INTEGRAL_MODE_P (mode)
5380 && size <= UNITS_PER_WORD * 2
5381 && (size <= UNITS_PER_WORD
5382 || regno == MD_REG_FIRST
5383 || (DSP_ACC_REG_P (regno)
5384 && ((regno - DSP_ACC_REG_FIRST) & 1) == 0)));
5386 else if (ALL_COP_REG_P (regno))
5387 temp = (class == MODE_INT && size <= UNITS_PER_WORD);
5391 mips_hard_regno_mode_ok[(int)mode][regno] = temp;
5395 /* Save GPR registers in word_mode sized hunks. word_mode hasn't been
5396 initialized yet, so we can't use that here. */
5397 gpr_mode = TARGET_64BIT ? DImode : SImode;
5399 /* Provide default values for align_* for 64-bit targets. */
5400 if (TARGET_64BIT && !TARGET_MIPS16)
5402 if (align_loops == 0)
5404 if (align_jumps == 0)
5406 if (align_functions == 0)
5407 align_functions = 8;
5410 /* Function to allocate machine-dependent function status. */
5411 init_machine_status = &mips_init_machine_status;
5413 if (ABI_HAS_64BIT_SYMBOLS)
5415 if (TARGET_EXPLICIT_RELOCS)
5417 mips_split_p[SYMBOL_64_HIGH] = true;
5418 mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
5419 mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
5421 mips_split_p[SYMBOL_64_MID] = true;
5422 mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
5423 mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
5425 mips_split_p[SYMBOL_64_LOW] = true;
5426 mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
5427 mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
5429 mips_split_p[SYMBOL_GENERAL] = true;
5430 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
5435 if (TARGET_EXPLICIT_RELOCS || mips_split_addresses)
5437 mips_split_p[SYMBOL_GENERAL] = true;
5438 mips_hi_relocs[SYMBOL_GENERAL] = "%hi(";
5439 mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
5445 /* The high part is provided by a pseudo copy of $gp. */
5446 mips_split_p[SYMBOL_SMALL_DATA] = true;
5447 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gprel(";
5450 if (TARGET_EXPLICIT_RELOCS)
5452 /* Small data constants are kept whole until after reload,
5453 then lowered by mips_rewrite_small_data. */
5454 mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gp_rel(";
5456 mips_split_p[SYMBOL_GOT_PAGE_OFST] = true;
5459 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
5460 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%got_ofst(";
5464 mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
5465 mips_lo_relocs[SYMBOL_GOT_PAGE_OFST] = "%lo(";
5470 /* The HIGH and LO_SUM are matched by special .md patterns. */
5471 mips_split_p[SYMBOL_GOT_DISP] = true;
5473 mips_split_p[SYMBOL_GOTOFF_DISP] = true;
5474 mips_hi_relocs[SYMBOL_GOTOFF_DISP] = "%got_hi(";
5475 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_lo(";
5477 mips_split_p[SYMBOL_GOTOFF_CALL] = true;
5478 mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
5479 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
5484 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got_disp(";
5486 mips_lo_relocs[SYMBOL_GOTOFF_DISP] = "%got(";
5487 mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
5493 mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
5494 mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
5495 mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
5498 /* Thread-local relocation operators. */
5499 mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
5500 mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
5501 mips_split_p[SYMBOL_DTPREL] = 1;
5502 mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
5503 mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
5504 mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
5505 mips_split_p[SYMBOL_TPREL] = 1;
5506 mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
5507 mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
5509 mips_lo_relocs[SYMBOL_HALF] = "%half(";
5511 /* We don't have a thread pointer access instruction on MIPS16, or
5512 appropriate TLS relocations. */
5514 targetm.have_tls = false;
5516 /* Default to working around R4000 errata only if the processor
5517 was selected explicitly. */
5518 if ((target_flags_explicit & MASK_FIX_R4000) == 0
5519 && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
5520 target_flags |= MASK_FIX_R4000;
5522 /* Default to working around R4400 errata only if the processor
5523 was selected explicitly. */
5524 if ((target_flags_explicit & MASK_FIX_R4400) == 0
5525 && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
5526 target_flags |= MASK_FIX_R4400;
5529 /* Swap the register information for registers I and I + 1, which
5530 currently have the wrong endianness. Note that the registers'
5531 fixedness and call-clobberedness might have been set on the
5535 mips_swap_registers (unsigned int i)
5540 #define SWAP_INT(X, Y) (tmpi = (X), (X) = (Y), (Y) = tmpi)
5541 #define SWAP_STRING(X, Y) (tmps = (X), (X) = (Y), (Y) = tmps)
5543 SWAP_INT (fixed_regs[i], fixed_regs[i + 1]);
5544 SWAP_INT (call_used_regs[i], call_used_regs[i + 1]);
5545 SWAP_INT (call_really_used_regs[i], call_really_used_regs[i + 1]);
5546 SWAP_STRING (reg_names[i], reg_names[i + 1]);
5552 /* Implement CONDITIONAL_REGISTER_USAGE. */
5555 mips_conditional_register_usage (void)
5561 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
5562 fixed_regs[regno] = call_used_regs[regno] = 1;
5564 if (!TARGET_HARD_FLOAT)
5568 for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
5569 fixed_regs[regno] = call_used_regs[regno] = 1;
5570 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5571 fixed_regs[regno] = call_used_regs[regno] = 1;
5573 else if (! ISA_HAS_8CC)
5577 /* We only have a single condition code register. We
5578 implement this by hiding all the condition code registers,
5579 and generating RTL that refers directly to ST_REG_FIRST. */
5580 for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5581 fixed_regs[regno] = call_used_regs[regno] = 1;
5583 /* In mips16 mode, we permit the $t temporary registers to be used
5584 for reload. We prohibit the unused $s registers, since they
5585 are caller saved, and saving them via a mips16 register would
5586 probably waste more time than just reloading the value. */
5589 fixed_regs[18] = call_used_regs[18] = 1;
5590 fixed_regs[19] = call_used_regs[19] = 1;
5591 fixed_regs[20] = call_used_regs[20] = 1;
5592 fixed_regs[21] = call_used_regs[21] = 1;
5593 fixed_regs[22] = call_used_regs[22] = 1;
5594 fixed_regs[23] = call_used_regs[23] = 1;
5595 fixed_regs[26] = call_used_regs[26] = 1;
5596 fixed_regs[27] = call_used_regs[27] = 1;
5597 fixed_regs[30] = call_used_regs[30] = 1;
5599 /* fp20-23 are now caller saved. */
5600 if (mips_abi == ABI_64)
5603 for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
5604 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5606 /* Odd registers from fp21 to fp31 are now caller saved. */
5607 if (mips_abi == ABI_N32)
5610 for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
5611 call_really_used_regs[regno] = call_used_regs[regno] = 1;
5613 /* Make sure that double-register accumulator values are correctly
5614 ordered for the current endianness. */
5615 if (TARGET_LITTLE_ENDIAN)
5618 mips_swap_registers (MD_REG_FIRST);
5619 for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno += 2)
5620 mips_swap_registers (regno);
5624 /* Allocate a chunk of memory for per-function machine-dependent data. */
5625 static struct machine_function *
5626 mips_init_machine_status (void)
5628 return ((struct machine_function *)
5629 ggc_alloc_cleared (sizeof (struct machine_function)));
5632 /* On the mips16, we want to allocate $24 (T_REG) before other
5633 registers for instructions for which it is possible. This helps
5634 avoid shuffling registers around in order to set up for an xor,
5635 encouraging the compiler to use a cmp instead. */
5638 mips_order_regs_for_local_alloc (void)
5642 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5643 reg_alloc_order[i] = i;
5647 /* It really doesn't matter where we put register 0, since it is
5648 a fixed register anyhow. */
5649 reg_alloc_order[0] = 24;
5650 reg_alloc_order[24] = 0;
5655 /* The MIPS debug format wants all automatic variables and arguments
5656 to be in terms of the virtual frame pointer (stack pointer before
5657 any adjustment in the function), while the MIPS 3.0 linker wants
5658 the frame pointer to be the stack pointer after the initial
5659 adjustment. So, we do the adjustment here. The arg pointer (which
5660 is eliminated) points to the virtual frame pointer, while the frame
5661 pointer (which may be eliminated) points to the stack pointer after
5662 the initial adjustments. */
5665 mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
5667 rtx offset2 = const0_rtx;
5668 rtx reg = eliminate_constant_term (addr, &offset2);
5671 offset = INTVAL (offset2);
5673 if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
5674 || reg == hard_frame_pointer_rtx)
5676 HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
5677 ? compute_frame_size (get_frame_size ())
5678 : cfun->machine->frame.total_size;
5680 /* MIPS16 frame is smaller */
5681 if (frame_pointer_needed && TARGET_MIPS16)
5682 frame_size -= cfun->machine->frame.args_size;
5684 offset = offset - frame_size;
5687 /* sdbout_parms does not want this to crash for unrecognized cases. */
5689 else if (reg != arg_pointer_rtx)
5690 fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
5697 /* Implement the PRINT_OPERAND macro. The MIPS-specific operand codes are:
5699 'X' OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
5700 'x' OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
5701 'h' OP is HIGH, prints %hi(X),
5702 'd' output integer constant in decimal,
5703 'z' if the operand is 0, use $0 instead of normal operand.
5704 'D' print second part of double-word register or memory operand.
5705 'L' print low-order register of double-word register operand.
5706 'M' print high-order register of double-word register operand.
5707 'C' print part of opcode for a branch condition.
5708 'F' print part of opcode for a floating-point branch condition.
5709 'N' print part of opcode for a branch condition, inverted.
5710 'W' print part of opcode for a floating-point branch condition, inverted.
5711 'T' print 'f' for (eq:CC ...), 't' for (ne:CC ...),
5712 'z' for (eq:?I ...), 'n' for (ne:?I ...).
5713 't' like 'T', but with the EQ/NE cases reversed
5714 'Y' for a CONST_INT X, print mips_fp_conditions[X]
5715 'Z' print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
5716 'R' print the reloc associated with LO_SUM
5717 'q' print DSP accumulator registers
5719 The punctuation characters are:
5721 '(' Turn on .set noreorder
5722 ')' Turn on .set reorder
5723 '[' Turn on .set noat
5725 '<' Turn on .set nomacro
5726 '>' Turn on .set macro
5727 '{' Turn on .set volatile (not GAS)
5728 '}' Turn on .set novolatile (not GAS)
5729 '&' Turn on .set noreorder if filling delay slots
5730 '*' Turn on both .set noreorder and .set nomacro if filling delay slots
5731 '!' Turn on .set nomacro if filling delay slots
5732 '#' Print nop if in a .set noreorder section.
5733 '/' Like '#', but does nothing within a delayed branch sequence
5734 '?' Print 'l' if we are to use a branch likely instead of normal branch.
5735 '@' Print the name of the assembler temporary register (at or $1).
5736 '.' Print the name of the register with a hard-wired zero (zero or $0).
5737 '^' Print the name of the pic call-through register (t9 or $25).
5738 '$' Print the name of the stack pointer register (sp or $29).
5739 '+' Print the name of the gp register (usually gp or $28).
5740 '~' Output a branch alignment to LABEL_ALIGN(NULL). */
5743 print_operand (FILE *file, rtx op, int letter)
5745 register enum rtx_code code;
5747 if (PRINT_OPERAND_PUNCT_VALID_P (letter))
5752 if (mips_branch_likely)
5757 fputs (reg_names [GP_REG_FIRST + 1], file);
5761 fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
5765 fputs (reg_names [GP_REG_FIRST + 0], file);
5769 fputs (reg_names[STACK_POINTER_REGNUM], file);
5773 fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
5777 if (final_sequence != 0 && set_noreorder++ == 0)
5778 fputs (".set\tnoreorder\n\t", file);
5782 if (final_sequence != 0)
5784 if (set_noreorder++ == 0)
5785 fputs (".set\tnoreorder\n\t", file);
5787 if (set_nomacro++ == 0)
5788 fputs (".set\tnomacro\n\t", file);
5793 if (final_sequence != 0 && set_nomacro++ == 0)
5794 fputs ("\n\t.set\tnomacro", file);
5798 if (set_noreorder != 0)
5799 fputs ("\n\tnop", file);
5803 /* Print an extra newline so that the delayed insn is separated
5804 from the following ones. This looks neater and is consistent
5805 with non-nop delayed sequences. */
5806 if (set_noreorder != 0 && final_sequence == 0)
5807 fputs ("\n\tnop\n", file);
5811 if (set_noreorder++ == 0)
5812 fputs (".set\tnoreorder\n\t", file);
5816 if (set_noreorder == 0)
5817 error ("internal error: %%) found without a %%( in assembler pattern");
5819 else if (--set_noreorder == 0)
5820 fputs ("\n\t.set\treorder", file);
5825 if (set_noat++ == 0)
5826 fputs (".set\tnoat\n\t", file);
5831 error ("internal error: %%] found without a %%[ in assembler pattern");
5832 else if (--set_noat == 0)
5833 fputs ("\n\t.set\tat", file);
5838 if (set_nomacro++ == 0)
5839 fputs (".set\tnomacro\n\t", file);
5843 if (set_nomacro == 0)
5844 error ("internal error: %%> found without a %%< in assembler pattern");
5845 else if (--set_nomacro == 0)
5846 fputs ("\n\t.set\tmacro", file);
5851 if (set_volatile++ == 0)
5852 fputs ("#.set\tvolatile\n\t", file);
5856 if (set_volatile == 0)
5857 error ("internal error: %%} found without a %%{ in assembler pattern");
5858 else if (--set_volatile == 0)
5859 fputs ("\n\t#.set\tnovolatile", file);
5865 if (align_labels_log > 0)
5866 ASM_OUTPUT_ALIGN (file, align_labels_log);
5871 error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
5880 error ("PRINT_OPERAND null pointer");
5884 code = GET_CODE (op);
5889 case EQ: fputs ("eq", file); break;
5890 case NE: fputs ("ne", file); break;
5891 case GT: fputs ("gt", file); break;
5892 case GE: fputs ("ge", file); break;
5893 case LT: fputs ("lt", file); break;
5894 case LE: fputs ("le", file); break;
5895 case GTU: fputs ("gtu", file); break;
5896 case GEU: fputs ("geu", file); break;
5897 case LTU: fputs ("ltu", file); break;
5898 case LEU: fputs ("leu", file); break;
5900 fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
5903 else if (letter == 'N')
5906 case EQ: fputs ("ne", file); break;
5907 case NE: fputs ("eq", file); break;
5908 case GT: fputs ("le", file); break;
5909 case GE: fputs ("lt", file); break;
5910 case LT: fputs ("ge", file); break;
5911 case LE: fputs ("gt", file); break;
5912 case GTU: fputs ("leu", file); break;
5913 case GEU: fputs ("ltu", file); break;
5914 case LTU: fputs ("geu", file); break;
5915 case LEU: fputs ("gtu", file); break;
5917 fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
5920 else if (letter == 'F')
5923 case EQ: fputs ("c1f", file); break;
5924 case NE: fputs ("c1t", file); break;
5926 fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
5929 else if (letter == 'W')
5932 case EQ: fputs ("c1t", file); break;
5933 case NE: fputs ("c1f", file); break;
5935 fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
5938 else if (letter == 'h')
5940 if (GET_CODE (op) == HIGH)
5943 print_operand_reloc (file, op, mips_hi_relocs);
5946 else if (letter == 'R')
5947 print_operand_reloc (file, op, mips_lo_relocs);
5949 else if (letter == 'Y')
5951 if (GET_CODE (op) == CONST_INT
5952 && ((unsigned HOST_WIDE_INT) INTVAL (op)
5953 < ARRAY_SIZE (mips_fp_conditions)))
5954 fputs (mips_fp_conditions[INTVAL (op)], file);
5956 output_operand_lossage ("invalid %%Y value");
5959 else if (letter == 'Z')
5963 print_operand (file, op, 0);
5968 else if (letter == 'q')
5973 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
5975 regnum = REGNO (op);
5976 if (MD_REG_P (regnum))
5977 fprintf (file, "$ac0");
5978 else if (DSP_ACC_REG_P (regnum))
5979 fprintf (file, "$ac%c", reg_names[regnum][3]);
5981 fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
5984 else if (code == REG || code == SUBREG)
5986 register int regnum;
5989 regnum = REGNO (op);
5991 regnum = true_regnum (op);
5993 if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
5994 || (letter == 'L' && WORDS_BIG_ENDIAN)
5998 fprintf (file, "%s", reg_names[regnum]);
6001 else if (code == MEM)
6004 output_address (plus_constant (XEXP (op, 0), 4));
6006 output_address (XEXP (op, 0));
6009 else if (letter == 'x' && GET_CODE (op) == CONST_INT)
6010 fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
6012 else if (letter == 'X' && GET_CODE(op) == CONST_INT)
6013 fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
6015 else if (letter == 'd' && GET_CODE(op) == CONST_INT)
6016 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
6018 else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
6019 fputs (reg_names[GP_REG_FIRST], file);
6021 else if (letter == 'd' || letter == 'x' || letter == 'X')
6022 output_operand_lossage ("invalid use of %%d, %%x, or %%X");
6024 else if (letter == 'T' || letter == 't')
6026 int truth = (code == NE) == (letter == 'T');
6027 fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
6030 else if (CONST_GP_P (op))
6031 fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
6034 output_addr_const (file, op);
6038 /* Print symbolic operand OP, which is part of a HIGH or LO_SUM.
6039 RELOCS is the array of relocations to use. */
6042 print_operand_reloc (FILE *file, rtx op, const char **relocs)
6044 enum mips_symbol_type symbol_type;
6048 if (!mips_symbolic_constant_p (op, &symbol_type) || relocs[symbol_type] == 0)
6049 fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
6051 /* If OP uses an UNSPEC address, we want to print the inner symbol. */
6052 split_const (op, &base, &offset);
6053 if (UNSPEC_ADDRESS_P (base))
6054 op = plus_constant (UNSPEC_ADDRESS (base), INTVAL (offset));
6056 fputs (relocs[symbol_type], file);
6057 output_addr_const (file, op);
6058 for (p = relocs[symbol_type]; *p != 0; p++)
6063 /* Output address operand X to FILE. */
6066 print_operand_address (FILE *file, rtx x)
6068 struct mips_address_info addr;
6070 if (mips_classify_address (&addr, x, word_mode, true))
6074 print_operand (file, addr.offset, 0);
6075 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6078 case ADDRESS_LO_SUM:
6079 print_operand (file, addr.offset, 'R');
6080 fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
6083 case ADDRESS_CONST_INT:
6084 output_addr_const (file, x);
6085 fprintf (file, "(%s)", reg_names[0]);
6088 case ADDRESS_SYMBOLIC:
6089 output_addr_const (file, x);
6095 /* When using assembler macros, keep track of all of small-data externs
6096 so that mips_file_end can emit the appropriate declarations for them.
6098 In most cases it would be safe (though pointless) to emit .externs
6099 for other symbols too. One exception is when an object is within
6100 the -G limit but declared by the user to be in a section other
6101 than .sbss or .sdata. */
6104 mips_output_external (FILE *file, tree decl, const char *name)
6106 default_elf_asm_output_external (file, decl, name);
6108 /* We output the name if and only if TREE_SYMBOL_REFERENCED is
6109 set in order to avoid putting out names that are never really
6111 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl)))
6113 if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
6115 fputs ("\t.extern\t", file);
6116 assemble_name (file, name);
6117 fprintf (file, ", " HOST_WIDE_INT_PRINT_DEC "\n",
6118 int_size_in_bytes (TREE_TYPE (decl)));
6120 else if (TARGET_IRIX
6121 && mips_abi == ABI_32
6122 && TREE_CODE (decl) == FUNCTION_DECL)
6124 /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
6125 `.global name .text' directive for every used but
6126 undefined function. If we don't, the linker may perform
6127 an optimization (skipping over the insns that set $gp)
6128 when it is unsafe. */
6129 fputs ("\t.globl ", file);
6130 assemble_name (file, name);
6131 fputs (" .text\n", file);
6136 /* Emit a new filename to a stream. If we are smuggling stabs, try to
6137 put out a MIPS ECOFF file and a stab. */
6140 mips_output_filename (FILE *stream, const char *name)
6143 /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
6145 if (write_symbols == DWARF2_DEBUG)
6147 else if (mips_output_filename_first_time)
6149 mips_output_filename_first_time = 0;
6150 num_source_filenames += 1;
6151 current_function_file = name;
6152 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6153 output_quoted_string (stream, name);
6154 putc ('\n', stream);
6157 /* If we are emitting stabs, let dbxout.c handle this (except for
6158 the mips_output_filename_first_time case). */
6159 else if (write_symbols == DBX_DEBUG)
6162 else if (name != current_function_file
6163 && strcmp (name, current_function_file) != 0)
6165 num_source_filenames += 1;
6166 current_function_file = name;
6167 fprintf (stream, "\t.file\t%d ", num_source_filenames);
6168 output_quoted_string (stream, name);
6169 putc ('\n', stream);
6173 /* Output an ASCII string, in a space-saving way. PREFIX is the string
6174 that should be written before the opening quote, such as "\t.ascii\t"
6175 for real string data or "\t# " for a comment. */
6178 mips_output_ascii (FILE *stream, const char *string_param, size_t len,
6183 register const unsigned char *string =
6184 (const unsigned char *)string_param;
6186 fprintf (stream, "%s\"", prefix);
6187 for (i = 0; i < len; i++)
6189 register int c = string[i];
6193 if (c == '\\' || c == '\"')
6195 putc ('\\', stream);
6203 fprintf (stream, "\\%03o", c);
6207 if (cur_pos > 72 && i+1 < len)
6210 fprintf (stream, "\"\n%s\"", prefix);
6213 fprintf (stream, "\"\n");
6216 /* Implement TARGET_ASM_FILE_START. */
6219 mips_file_start (void)
6221 default_file_start ();
6225 /* Generate a special section to describe the ABI switches used to
6226 produce the resultant binary. This used to be done by the assembler
6227 setting bits in the ELF header's flags field, but we have run out of
6228 bits. GDB needs this information in order to be able to correctly
6229 debug these binaries. See the function mips_gdbarch_init() in
6230 gdb/mips-tdep.c. This is unnecessary for the IRIX 5/6 ABIs and
6231 causes unnecessary IRIX 6 ld warnings. */
6232 const char * abi_string = NULL;
6236 case ABI_32: abi_string = "abi32"; break;
6237 case ABI_N32: abi_string = "abiN32"; break;
6238 case ABI_64: abi_string = "abi64"; break;
6239 case ABI_O64: abi_string = "abiO64"; break;
6240 case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
6244 /* Note - we use fprintf directly rather than calling switch_to_section
6245 because in this way we can avoid creating an allocated section. We
6246 do not want this section to take up any space in the running
6248 fprintf (asm_out_file, "\t.section .mdebug.%s\n", abi_string);
6250 /* There is no ELF header flag to distinguish long32 forms of the
6251 EABI from long64 forms. Emit a special section to help tools
6252 such as GDB. Do the same for o64, which is sometimes used with
6254 if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
6255 fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n",
6256 TARGET_LONG64 ? 64 : 32);
6258 /* Restore the default section. */
6259 fprintf (asm_out_file, "\t.previous\n");
6261 #ifdef HAVE_AS_GNU_ATTRIBUTE
6262 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
6263 TARGET_HARD_FLOAT_ABI ? (TARGET_DOUBLE_FLOAT ? 1 : 2) : 3);
6267 /* Generate the pseudo ops that System V.4 wants. */
6268 if (TARGET_ABICALLS)
6269 fprintf (asm_out_file, "\t.abicalls\n");
6272 fprintf (asm_out_file, "\t.set\tmips16\n");
6274 if (flag_verbose_asm)
6275 fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
6277 mips_section_threshold, mips_arch_info->name, mips_isa);
6280 #ifdef BSS_SECTION_ASM_OP
6281 /* Implement ASM_OUTPUT_ALIGNED_BSS. This differs from the default only
6282 in the use of sbss. */
6285 mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
6286 unsigned HOST_WIDE_INT size, int align)
6288 extern tree last_assemble_variable_decl;
6290 if (mips_in_small_data_p (decl))
6291 switch_to_section (get_named_section (NULL, ".sbss", 0));
6293 switch_to_section (bss_section);
6294 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6295 last_assemble_variable_decl = decl;
6296 ASM_DECLARE_OBJECT_NAME (stream, name, decl);
6297 ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
6301 /* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON. This is usually the same as the
6302 elfos.h version, but we also need to handle -muninit-const-in-rodata. */
6305 mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
6306 unsigned HOST_WIDE_INT size,
6309 /* If the target wants uninitialized const declarations in
6310 .rdata then don't put them in .comm. */
6311 if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
6312 && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
6313 && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
6315 if (TREE_PUBLIC (decl) && DECL_NAME (decl))
6316 targetm.asm_out.globalize_label (stream, name);
6318 switch_to_section (readonly_data_section);
6319 ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6320 mips_declare_object (stream, name, "",
6321 ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
6325 mips_declare_common_object (stream, name, "\n\t.comm\t",
6329 /* Declare a common object of SIZE bytes using asm directive INIT_STRING.
6330 NAME is the name of the object and ALIGN is the required alignment
6331 in bytes. TAKES_ALIGNMENT_P is true if the directive takes a third
6332 alignment argument. */
6335 mips_declare_common_object (FILE *stream, const char *name,
6336 const char *init_string,
6337 unsigned HOST_WIDE_INT size,
6338 unsigned int align, bool takes_alignment_p)
6340 if (!takes_alignment_p)
6342 size += (align / BITS_PER_UNIT) - 1;
6343 size -= size % (align / BITS_PER_UNIT);
6344 mips_declare_object (stream, name, init_string,
6345 "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
6348 mips_declare_object (stream, name, init_string,
6349 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
6350 size, align / BITS_PER_UNIT);
6353 /* Emit either a label, .comm, or .lcomm directive. When using assembler
6354 macros, mark the symbol as written so that mips_file_end won't emit an
6355 .extern for it. STREAM is the output file, NAME is the name of the
6356 symbol, INIT_STRING is the string that should be written before the
6357 symbol and FINAL_STRING is the string that should be written after it.
6358 FINAL_STRING is a printf() format that consumes the remaining arguments. */
6361 mips_declare_object (FILE *stream, const char *name, const char *init_string,
6362 const char *final_string, ...)
6366 fputs (init_string, stream);
6367 assemble_name (stream, name);
6368 va_start (ap, final_string);
6369 vfprintf (stream, final_string, ap);
6372 if (!TARGET_EXPLICIT_RELOCS)
6374 tree name_tree = get_identifier (name);
6375 TREE_ASM_WRITTEN (name_tree) = 1;
6379 #ifdef ASM_OUTPUT_SIZE_DIRECTIVE
6380 extern int size_directive_output;
6382 /* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF
6383 definitions except that it uses mips_declare_object() to emit the label. */
6386 mips_declare_object_name (FILE *stream, const char *name,
6387 tree decl ATTRIBUTE_UNUSED)
6389 #ifdef ASM_OUTPUT_TYPE_DIRECTIVE
6390 ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
6393 size_directive_output = 0;
6394 if (!flag_inhibit_size_directive && DECL_SIZE (decl))
6398 size_directive_output = 1;
6399 size = int_size_in_bytes (TREE_TYPE (decl));
6400 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6403 mips_declare_object (stream, name, "", ":\n");
6406 /* Implement ASM_FINISH_DECLARE_OBJECT. This is generic ELF stuff. */
6409 mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
6413 name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
6414 if (!flag_inhibit_size_directive
6415 && DECL_SIZE (decl) != 0
6416 && !at_end && top_level
6417 && DECL_INITIAL (decl) == error_mark_node
6418 && !size_directive_output)
6422 size_directive_output = 1;
6423 size = int_size_in_bytes (TREE_TYPE (decl));
6424 ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6429 /* Return true if X is a small data address that can be rewritten
6433 mips_rewrite_small_data_p (rtx x)
6435 enum mips_symbol_type symbol_type;
6437 return (TARGET_EXPLICIT_RELOCS
6438 && mips_symbolic_constant_p (x, &symbol_type)
6439 && symbol_type == SYMBOL_SMALL_DATA);
6443 /* A for_each_rtx callback for mips_small_data_pattern_p. */
6446 mips_small_data_pattern_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
6448 if (GET_CODE (*loc) == LO_SUM)
6451 return mips_rewrite_small_data_p (*loc);
6454 /* Return true if OP refers to small data symbols directly, not through
6458 mips_small_data_pattern_p (rtx op)
6460 return for_each_rtx (&op, mips_small_data_pattern_1, 0);
6463 /* A for_each_rtx callback, used by mips_rewrite_small_data. */
6466 mips_rewrite_small_data_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
6468 if (mips_rewrite_small_data_p (*loc))
6469 *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
6471 if (GET_CODE (*loc) == LO_SUM)
6477 /* If possible, rewrite OP so that it refers to small data using
6478 explicit relocations. */
6481 mips_rewrite_small_data (rtx op)
6483 op = copy_insn (op);
6484 for_each_rtx (&op, mips_rewrite_small_data_1, 0);
6488 /* Return true if the current function has an insn that implicitly
6492 mips_function_has_gp_insn (void)
6494 /* Don't bother rechecking if we found one last time. */
6495 if (!cfun->machine->has_gp_insn_p)
6499 push_topmost_sequence ();
6500 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6502 && GET_CODE (PATTERN (insn)) != USE
6503 && GET_CODE (PATTERN (insn)) != CLOBBER
6504 && (get_attr_got (insn) != GOT_UNSET
6505 || small_data_pattern (PATTERN (insn), VOIDmode)))
6507 pop_topmost_sequence ();
6509 cfun->machine->has_gp_insn_p = (insn != 0);
6511 return cfun->machine->has_gp_insn_p;
6515 /* Return the register that should be used as the global pointer
6516 within this function. Return 0 if the function doesn't need
6517 a global pointer. */
6520 mips_global_pointer (void)
6524 /* $gp is always available unless we're using a GOT. */
6525 if (!TARGET_USE_GOT)
6526 return GLOBAL_POINTER_REGNUM;
6528 /* We must always provide $gp when it is used implicitly. */
6529 if (!TARGET_EXPLICIT_RELOCS)
6530 return GLOBAL_POINTER_REGNUM;
6532 /* FUNCTION_PROFILER includes a jal macro, so we need to give it
6534 if (current_function_profile)
6535 return GLOBAL_POINTER_REGNUM;
6537 /* If the function has a nonlocal goto, $gp must hold the correct
6538 global pointer for the target function. */
6539 if (current_function_has_nonlocal_goto)
6540 return GLOBAL_POINTER_REGNUM;
6542 /* If the gp is never referenced, there's no need to initialize it.
6543 Note that reload can sometimes introduce constant pool references
6544 into a function that otherwise didn't need them. For example,
6545 suppose we have an instruction like:
6547 (set (reg:DF R1) (float:DF (reg:SI R2)))
6549 If R2 turns out to be constant such as 1, the instruction may have a
6550 REG_EQUAL note saying that R1 == 1.0. Reload then has the option of
6551 using this constant if R2 doesn't get allocated to a register.
6553 In cases like these, reload will have added the constant to the pool
6554 but no instruction will yet refer to it. */
6555 if (!df_regs_ever_live_p (GLOBAL_POINTER_REGNUM)
6556 && !current_function_uses_const_pool
6557 && !mips_function_has_gp_insn ())
6560 /* We need a global pointer, but perhaps we can use a call-clobbered
6561 register instead of $gp. */
6562 if (TARGET_CALL_SAVED_GP && current_function_is_leaf)
6563 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6564 if (!df_regs_ever_live_p (regno)
6565 && call_used_regs[regno]
6566 && !fixed_regs[regno]
6567 && regno != PIC_FUNCTION_ADDR_REGNUM)
6570 return GLOBAL_POINTER_REGNUM;
6574 /* Return true if the function return value MODE will get returned in a
6575 floating-point register. */
6578 mips_return_mode_in_fpr_p (enum machine_mode mode)
6580 return ((GET_MODE_CLASS (mode) == MODE_FLOAT
6581 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
6582 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6583 && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_HWFPVALUE);
6586 /* Return a two-character string representing a function floating-point
6587 return mode, used to name MIPS16 function stubs. */
6590 mips16_call_stub_mode_suffix (enum machine_mode mode)
6594 else if (mode == DFmode)
6596 else if (mode == SCmode)
6598 else if (mode == DCmode)
6600 else if (mode == V2SFmode)
6606 /* Return true if the current function returns its value in a floating-point
6607 register in MIPS16 mode. */
6610 mips16_cfun_returns_in_fpr_p (void)
6612 tree return_type = DECL_RESULT (current_function_decl);
6613 return (mips16_hard_float
6614 && !aggregate_value_p (return_type, current_function_decl)
6615 && mips_return_mode_in_fpr_p (DECL_MODE (return_type)));
6619 /* Return true if the current function must save REGNO. */
6622 mips_save_reg_p (unsigned int regno)
6624 /* We only need to save $gp if TARGET_CALL_SAVED_GP and only then
6625 if we have not chosen a call-clobbered substitute. */
6626 if (regno == GLOBAL_POINTER_REGNUM)
6627 return TARGET_CALL_SAVED_GP && cfun->machine->global_pointer == regno;
6629 /* Check call-saved registers. */
6630 if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
6633 /* Save both registers in an FPR pair if either one is used. This is
6634 needed for the case when MIN_FPRS_PER_FMT == 1, which allows the odd
6635 register to be used without the even register. */
6636 if (FP_REG_P (regno)
6637 && MAX_FPRS_PER_FMT == 2
6638 && df_regs_ever_live_p (regno + 1)
6639 && !call_used_regs[regno + 1])
6642 /* We need to save the old frame pointer before setting up a new one. */
6643 if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
6646 /* We need to save the incoming return address if it is ever clobbered
6647 within the function. */
6648 if (regno == GP_REG_FIRST + 31 && df_regs_ever_live_p (regno))
6653 /* $18 is a special case in mips16 code. It may be used to call
6654 a function which returns a floating point value, but it is
6655 marked in call_used_regs. */
6656 if (regno == GP_REG_FIRST + 18 && df_regs_ever_live_p (regno))
6659 /* $31 is also a special case. It will be used to copy a return
6660 value into the floating point registers if the return value is
6662 if (regno == GP_REG_FIRST + 31
6663 && mips16_cfun_returns_in_fpr_p ())
6670 /* Return the index of the lowest X in the range [0, SIZE) for which
6671 bit REGS[X] is set in MASK. Return SIZE if there is no such X. */
6674 mips16e_find_first_register (unsigned int mask, const unsigned char *regs,
6679 for (i = 0; i < size; i++)
6680 if (BITSET_P (mask, regs[i]))
6686 /* *MASK_PTR is a mask of general purpose registers and *GP_REG_SIZE_PTR
6687 is the number of bytes that they occupy. If *MASK_PTR contains REGS[X]
6688 for some X in [0, SIZE), adjust *MASK_PTR and *GP_REG_SIZE_PTR so that
6689 the same is true for all indexes (X, SIZE). */
6692 mips16e_mask_registers (unsigned int *mask_ptr, const unsigned char *regs,
6693 unsigned int size, HOST_WIDE_INT *gp_reg_size_ptr)
6697 i = mips16e_find_first_register (*mask_ptr, regs, size);
6698 for (i++; i < size; i++)
6699 if (!BITSET_P (*mask_ptr, regs[i]))
6701 *gp_reg_size_ptr += GET_MODE_SIZE (gpr_mode);
6702 *mask_ptr |= 1 << regs[i];
6706 /* Return the bytes needed to compute the frame pointer from the current
6707 stack pointer. SIZE is the size (in bytes) of the local variables.
6709 MIPS stack frames look like:
6711 Before call After call
6712 high +-----------------------+ +-----------------------+
6714 | caller's temps. | | caller's temps. |
6716 +-----------------------+ +-----------------------+
6718 | arguments on stack. | | arguments on stack. |
6720 +-----------------------+ +-----------------------+
6721 | 4 words to save | | 4 words to save |
6722 | arguments passed | | arguments passed |
6723 | in registers, even | | in registers, even |
6724 | if not passed. | | if not passed. |
6725 SP->+-----------------------+ VFP->+-----------------------+
6726 (VFP = SP+fp_sp_offset) | |\
6727 | fp register save | | fp_reg_size
6729 SP+gp_sp_offset->+-----------------------+
6731 | | gp register save | | gp_reg_size
6732 gp_reg_rounded | | |/
6733 | +-----------------------+
6734 \| alignment padding |
6735 +-----------------------+
6737 | local variables | | var_size
6739 +-----------------------+
6741 | alloca allocations |
6743 +-----------------------+
6745 cprestore_size | | GP save for V.4 abi |
6747 +-----------------------+
6749 | arguments on stack | |
6751 +-----------------------+ |
6752 | 4 words to save | | args_size
6753 | arguments passed | |
6754 | in registers, even | |
6755 | if not passed. | |
6756 low | (TARGET_OLDABI only) |/
6757 memory SP->+-----------------------+
6762 compute_frame_size (HOST_WIDE_INT size)
6765 HOST_WIDE_INT total_size; /* # bytes that the entire frame takes up */
6766 HOST_WIDE_INT var_size; /* # bytes that variables take up */
6767 HOST_WIDE_INT args_size; /* # bytes that outgoing arguments take up */
6768 HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
6769 HOST_WIDE_INT gp_reg_rounded; /* # bytes needed to store gp after rounding */
6770 HOST_WIDE_INT gp_reg_size; /* # bytes needed to store gp regs */
6771 HOST_WIDE_INT fp_reg_size; /* # bytes needed to store fp regs */
6772 unsigned int mask; /* mask of saved gp registers */
6773 unsigned int fmask; /* mask of saved fp registers */
6775 cfun->machine->global_pointer = mips_global_pointer ();
6781 var_size = MIPS_STACK_ALIGN (size);
6782 args_size = current_function_outgoing_args_size;
6783 cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
6785 /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
6786 functions. If the function has local variables, we're committed
6787 to allocating it anyway. Otherwise reclaim it here. */
6788 if (var_size == 0 && current_function_is_leaf)
6789 cprestore_size = args_size = 0;
6791 /* The MIPS 3.0 linker does not like functions that dynamically
6792 allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
6793 looks like we are trying to create a second frame pointer to the
6794 function, so allocate some stack space to make it happy. */
6796 if (args_size == 0 && current_function_calls_alloca)
6797 args_size = 4 * UNITS_PER_WORD;
6799 total_size = var_size + args_size + cprestore_size;
6801 /* Calculate space needed for gp registers. */
6802 for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6803 if (mips_save_reg_p (regno))
6805 gp_reg_size += GET_MODE_SIZE (gpr_mode);
6806 mask |= 1 << (regno - GP_REG_FIRST);
6809 /* We need to restore these for the handler. */
6810 if (current_function_calls_eh_return)
6815 regno = EH_RETURN_DATA_REGNO (i);
6816 if (regno == INVALID_REGNUM)
6818 gp_reg_size += GET_MODE_SIZE (gpr_mode);
6819 mask |= 1 << (regno - GP_REG_FIRST);
6823 /* The MIPS16e SAVE and RESTORE instructions have two ranges of registers:
6824 $a3-$a0 and $s2-$s8. If we save one register in the range, we must
6825 save all later registers too. */
6826 if (GENERATE_MIPS16E_SAVE_RESTORE)
6828 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
6829 ARRAY_SIZE (mips16e_s2_s8_regs), &gp_reg_size);
6830 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
6831 ARRAY_SIZE (mips16e_a0_a3_regs), &gp_reg_size);
6834 /* This loop must iterate over the same space as its companion in
6835 mips_for_each_saved_reg. */
6836 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
6837 regno >= FP_REG_FIRST;
6838 regno -= MAX_FPRS_PER_FMT)
6840 if (mips_save_reg_p (regno))
6842 fp_reg_size += MAX_FPRS_PER_FMT * UNITS_PER_FPREG;
6843 fmask |= ((1 << MAX_FPRS_PER_FMT) - 1) << (regno - FP_REG_FIRST);
6847 gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
6848 total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
6850 /* Add in the space required for saving incoming register arguments. */
6851 total_size += current_function_pretend_args_size;
6852 total_size += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
6854 /* Save other computed information. */
6855 cfun->machine->frame.total_size = total_size;
6856 cfun->machine->frame.var_size = var_size;
6857 cfun->machine->frame.args_size = args_size;
6858 cfun->machine->frame.cprestore_size = cprestore_size;
6859 cfun->machine->frame.gp_reg_size = gp_reg_size;
6860 cfun->machine->frame.fp_reg_size = fp_reg_size;
6861 cfun->machine->frame.mask = mask;
6862 cfun->machine->frame.fmask = fmask;
6863 cfun->machine->frame.initialized = reload_completed;
6864 cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
6865 cfun->machine->frame.num_fp = (fp_reg_size
6866 / (MAX_FPRS_PER_FMT * UNITS_PER_FPREG));
6870 HOST_WIDE_INT offset;
6872 if (GENERATE_MIPS16E_SAVE_RESTORE)
6873 /* MIPS16e SAVE and RESTORE instructions require the GP save area
6874 to be aligned at the high end with any padding at the low end.
6875 It is only safe to use this calculation for o32, where we never
6876 have pretend arguments, and where any varargs will be saved in
6877 the caller-allocated area rather than at the top of the frame. */
6878 offset = (total_size - GET_MODE_SIZE (gpr_mode));
6880 offset = (args_size + cprestore_size + var_size
6881 + gp_reg_size - GET_MODE_SIZE (gpr_mode));
6882 cfun->machine->frame.gp_sp_offset = offset;
6883 cfun->machine->frame.gp_save_offset = offset - total_size;
6887 cfun->machine->frame.gp_sp_offset = 0;
6888 cfun->machine->frame.gp_save_offset = 0;
6893 HOST_WIDE_INT offset;
6895 offset = (args_size + cprestore_size + var_size
6896 + gp_reg_rounded + fp_reg_size
6897 - MAX_FPRS_PER_FMT * UNITS_PER_FPREG);
6898 cfun->machine->frame.fp_sp_offset = offset;
6899 cfun->machine->frame.fp_save_offset = offset - total_size;
6903 cfun->machine->frame.fp_sp_offset = 0;
6904 cfun->machine->frame.fp_save_offset = 0;
6907 /* Ok, we're done. */
6911 /* Implement INITIAL_ELIMINATION_OFFSET. FROM is either the frame
6912 pointer or argument pointer. TO is either the stack pointer or
6913 hard frame pointer. */
6916 mips_initial_elimination_offset (int from, int to)
6918 HOST_WIDE_INT offset;
6920 compute_frame_size (get_frame_size ());
6922 /* Set OFFSET to the offset from the stack pointer. */
6925 case FRAME_POINTER_REGNUM:
6929 case ARG_POINTER_REGNUM:
6930 offset = (cfun->machine->frame.total_size
6931 - current_function_pretend_args_size);
6938 if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
6939 offset -= cfun->machine->frame.args_size;
6944 /* Implement RETURN_ADDR_RTX. Note, we do not support moving
6945 back to a previous frame. */
6947 mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
6952 return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
6955 /* Use FN to save or restore register REGNO. MODE is the register's
6956 mode and OFFSET is the offset of its save slot from the current
6960 mips_save_restore_reg (enum machine_mode mode, int regno,
6961 HOST_WIDE_INT offset, mips_save_restore_fn fn)
6965 mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
6967 fn (gen_rtx_REG (mode, regno), mem);
6971 /* Call FN for each register that is saved by the current function.
6972 SP_OFFSET is the offset of the current stack pointer from the start
6976 mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
6978 enum machine_mode fpr_mode;
6979 HOST_WIDE_INT offset;
6982 /* Save registers starting from high to low. The debuggers prefer at least
6983 the return register be stored at func+4, and also it allows us not to
6984 need a nop in the epilogue if at least one register is reloaded in
6985 addition to return address. */
6986 offset = cfun->machine->frame.gp_sp_offset - sp_offset;
6987 for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
6988 if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
6990 mips_save_restore_reg (gpr_mode, regno, offset, fn);
6991 offset -= GET_MODE_SIZE (gpr_mode);
6994 /* This loop must iterate over the same space as its companion in
6995 compute_frame_size. */
6996 offset = cfun->machine->frame.fp_sp_offset - sp_offset;
6997 fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
6998 for (regno = (FP_REG_LAST - MAX_FPRS_PER_FMT + 1);
6999 regno >= FP_REG_FIRST;
7000 regno -= MAX_FPRS_PER_FMT)
7001 if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
7003 mips_save_restore_reg (fpr_mode, regno, offset, fn);
7004 offset -= GET_MODE_SIZE (fpr_mode);
7008 /* If we're generating n32 or n64 abicalls, and the current function
7009 does not use $28 as its global pointer, emit a cplocal directive.
7010 Use pic_offset_table_rtx as the argument to the directive. */
7013 mips_output_cplocal (void)
7015 if (!TARGET_EXPLICIT_RELOCS
7016 && cfun->machine->global_pointer > 0
7017 && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
7018 output_asm_insn (".cplocal %+", 0);
7021 /* Return the style of GP load sequence that is being used for the
7022 current function. */
7024 enum mips_loadgp_style
7025 mips_current_loadgp_style (void)
7027 if (!TARGET_USE_GOT || cfun->machine->global_pointer == 0)
7033 if (TARGET_ABSOLUTE_ABICALLS)
7034 return LOADGP_ABSOLUTE;
7036 return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
7039 /* The __gnu_local_gp symbol. */
7041 static GTY(()) rtx mips_gnu_local_gp;
7043 /* If we're generating n32 or n64 abicalls, emit instructions
7044 to set up the global pointer. */
7047 mips_emit_loadgp (void)
7049 rtx addr, offset, incoming_address, base, index;
7051 switch (mips_current_loadgp_style ())
7053 case LOADGP_ABSOLUTE:
7054 if (mips_gnu_local_gp == NULL)
7056 mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
7057 SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
7059 emit_insn (gen_loadgp_absolute (mips_gnu_local_gp));
7063 addr = XEXP (DECL_RTL (current_function_decl), 0);
7064 offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
7065 incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
7066 emit_insn (gen_loadgp_newabi (offset, incoming_address));
7067 if (!TARGET_EXPLICIT_RELOCS)
7068 emit_insn (gen_loadgp_blockage ());
7072 base = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_BASE));
7073 index = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (VXWORKS_GOTT_INDEX));
7074 emit_insn (gen_loadgp_rtp (base, index));
7075 if (!TARGET_EXPLICIT_RELOCS)
7076 emit_insn (gen_loadgp_blockage ());
7084 /* Set up the stack and frame (if desired) for the function. */
7087 mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
7090 HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
7092 #ifdef SDB_DEBUGGING_INFO
7093 if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
7094 SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
7097 /* In mips16 mode, we may need to generate a 32 bit to handle
7098 floating point arguments. The linker will arrange for any 32-bit
7099 functions to call this stub, which will then jump to the 16-bit
7101 if (mips16_hard_float
7102 && current_function_args_info.fp_code != 0)
7103 build_mips16_function_stub (file);
7105 if (!FUNCTION_NAME_ALREADY_DECLARED)
7107 /* Get the function name the same way that toplev.c does before calling
7108 assemble_start_function. This is needed so that the name used here
7109 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
7110 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7112 if (!flag_inhibit_size_directive)
7114 fputs ("\t.ent\t", file);
7115 assemble_name (file, fnname);
7119 assemble_name (file, fnname);
7120 fputs (":\n", file);
7123 /* Stop mips_file_end from treating this function as external. */
7124 if (TARGET_IRIX && mips_abi == ABI_32)
7125 TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
7127 if (!flag_inhibit_size_directive)
7129 /* .frame FRAMEREG, FRAMESIZE, RETREG */
7131 "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
7132 "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
7133 ", args= " HOST_WIDE_INT_PRINT_DEC
7134 ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
7135 (reg_names[(frame_pointer_needed)
7136 ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
7137 ((frame_pointer_needed && TARGET_MIPS16)
7138 ? tsize - cfun->machine->frame.args_size
7140 reg_names[GP_REG_FIRST + 31],
7141 cfun->machine->frame.var_size,
7142 cfun->machine->frame.num_gp,
7143 cfun->machine->frame.num_fp,
7144 cfun->machine->frame.args_size,
7145 cfun->machine->frame.cprestore_size);
7147 /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
7148 fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7149 cfun->machine->frame.mask,
7150 cfun->machine->frame.gp_save_offset);
7151 fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
7152 cfun->machine->frame.fmask,
7153 cfun->machine->frame.fp_save_offset);
7156 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
7157 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs. */
7160 if (mips_current_loadgp_style () == LOADGP_OLDABI)
7162 /* Handle the initialization of $gp for SVR4 PIC. */
7163 if (!cfun->machine->all_noreorder_p)
7164 output_asm_insn ("%(.cpload\t%^%)", 0);
7166 output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
7168 else if (cfun->machine->all_noreorder_p)
7169 output_asm_insn ("%(%<", 0);
7171 /* Tell the assembler which register we're using as the global
7172 pointer. This is needed for thunks, since they can use either
7173 explicit relocs or assembler macros. */
7174 mips_output_cplocal ();
7177 /* Make the last instruction frame related and note that it performs
7178 the operation described by FRAME_PATTERN. */
7181 mips_set_frame_expr (rtx frame_pattern)
7185 insn = get_last_insn ();
7186 RTX_FRAME_RELATED_P (insn) = 1;
7187 REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
7193 /* Return a frame-related rtx that stores REG at MEM.
7194 REG must be a single register. */
7197 mips_frame_set (rtx mem, rtx reg)
7201 /* If we're saving the return address register and the dwarf return
7202 address column differs from the hard register number, adjust the
7203 note reg to refer to the former. */
7204 if (REGNO (reg) == GP_REG_FIRST + 31
7205 && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
7206 reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
7208 set = gen_rtx_SET (VOIDmode, mem, reg);
7209 RTX_FRAME_RELATED_P (set) = 1;
7215 /* Save register REG to MEM. Make the instruction frame-related. */
7218 mips_save_reg (rtx reg, rtx mem)
7220 if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
7224 if (mips_split_64bit_move_p (mem, reg))
7225 mips_split_64bit_move (mem, reg);
7227 emit_move_insn (mem, reg);
7229 x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
7230 x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
7231 mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
7236 && REGNO (reg) != GP_REG_FIRST + 31
7237 && !M16_REG_P (REGNO (reg)))
7239 /* Save a non-mips16 register by moving it through a temporary.
7240 We don't need to do this for $31 since there's a special
7241 instruction for it. */
7242 emit_move_insn (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
7243 emit_move_insn (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
7246 emit_move_insn (mem, reg);
7248 mips_set_frame_expr (mips_frame_set (mem, reg));
7252 /* Return a move between register REGNO and memory location SP + OFFSET.
7253 Make the move a load if RESTORE_P, otherwise make it a frame-related
7257 mips16e_save_restore_reg (bool restore_p, HOST_WIDE_INT offset,
7262 mem = gen_frame_mem (SImode, plus_constant (stack_pointer_rtx, offset));
7263 reg = gen_rtx_REG (SImode, regno);
7265 ? gen_rtx_SET (VOIDmode, reg, mem)
7266 : mips_frame_set (mem, reg));
7269 /* Return RTL for a MIPS16e SAVE or RESTORE instruction; RESTORE_P says which.
7270 The instruction must:
7272 - Allocate or deallocate SIZE bytes in total; SIZE is known
7275 - Save or restore as many registers in *MASK_PTR as possible.
7276 The instruction saves the first registers at the top of the
7277 allocated area, with the other registers below it.
7279 - Save NARGS argument registers above the allocated area.
7281 (NARGS is always zero if RESTORE_P.)
7283 The SAVE and RESTORE instructions cannot save and restore all general
7284 registers, so there may be some registers left over for the caller to
7285 handle. Destructively modify *MASK_PTR so that it contains the registers
7286 that still need to be saved or restored. The caller can save these
7287 registers in the memory immediately below *OFFSET_PTR, which is a
7288 byte offset from the bottom of the allocated stack area. */
7291 mips16e_build_save_restore (bool restore_p, unsigned int *mask_ptr,
7292 HOST_WIDE_INT *offset_ptr, unsigned int nargs,
7296 HOST_WIDE_INT offset, top_offset;
7297 unsigned int i, regno;
7300 gcc_assert (cfun->machine->frame.fp_reg_size == 0);
7302 /* Calculate the number of elements in the PARALLEL. We need one element
7303 for the stack adjustment, one for each argument register save, and one
7304 for each additional register move. */
7306 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7307 if (BITSET_P (*mask_ptr, mips16e_save_restore_regs[i]))
7310 /* Create the final PARALLEL. */
7311 pattern = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (n));
7314 /* Add the stack pointer adjustment. */
7315 set = gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7316 plus_constant (stack_pointer_rtx,
7317 restore_p ? size : -size));
7318 RTX_FRAME_RELATED_P (set) = 1;
7319 XVECEXP (pattern, 0, n++) = set;
7321 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7322 top_offset = restore_p ? size : 0;
7324 /* Save the arguments. */
7325 for (i = 0; i < nargs; i++)
7327 offset = top_offset + i * GET_MODE_SIZE (gpr_mode);
7328 set = mips16e_save_restore_reg (restore_p, offset, GP_ARG_FIRST + i);
7329 XVECEXP (pattern, 0, n++) = set;
7332 /* Then fill in the other register moves. */
7333 offset = top_offset;
7334 for (i = 0; i < ARRAY_SIZE (mips16e_save_restore_regs); i++)
7336 regno = mips16e_save_restore_regs[i];
7337 if (BITSET_P (*mask_ptr, regno))
7339 offset -= UNITS_PER_WORD;
7340 set = mips16e_save_restore_reg (restore_p, offset, regno);
7341 XVECEXP (pattern, 0, n++) = set;
7342 *mask_ptr &= ~(1 << regno);
7346 /* Tell the caller what offset it should use for the remaining registers. */
7347 *offset_ptr = size + (offset - top_offset) + size;
7349 gcc_assert (n == XVECLEN (pattern, 0));
7354 /* PATTERN is a PARALLEL whose first element adds ADJUST to the stack
7355 pointer. Return true if PATTERN matches the kind of instruction
7356 generated by mips16e_build_save_restore. If INFO is nonnull,
7357 initialize it when returning true. */
7360 mips16e_save_restore_pattern_p (rtx pattern, HOST_WIDE_INT adjust,
7361 struct mips16e_save_restore_info *info)
7363 unsigned int i, nargs, mask;
7364 HOST_WIDE_INT top_offset, save_offset, offset, extra;
7365 rtx set, reg, mem, base;
7368 if (!GENERATE_MIPS16E_SAVE_RESTORE)
7371 /* Stack offsets in the PARALLEL are relative to the old stack pointer. */
7372 top_offset = adjust > 0 ? adjust : 0;
7374 /* Interpret all other members of the PARALLEL. */
7375 save_offset = top_offset - GET_MODE_SIZE (gpr_mode);
7379 for (n = 1; n < XVECLEN (pattern, 0); n++)
7381 /* Check that we have a SET. */
7382 set = XVECEXP (pattern, 0, n);
7383 if (GET_CODE (set) != SET)
7386 /* Check that the SET is a load (if restoring) or a store
7388 mem = adjust > 0 ? SET_SRC (set) : SET_DEST (set);
7392 /* Check that the address is the sum of the stack pointer and a
7393 possibly-zero constant offset. */
7394 mips_split_plus (XEXP (mem, 0), &base, &offset);
7395 if (base != stack_pointer_rtx)
7398 /* Check that SET's other operand is a register. */
7399 reg = adjust > 0 ? SET_DEST (set) : SET_SRC (set);
7403 /* Check for argument saves. */
7404 if (offset == top_offset + nargs * GET_MODE_SIZE (gpr_mode)
7405 && REGNO (reg) == GP_ARG_FIRST + nargs)
7407 else if (offset == save_offset)
7409 while (mips16e_save_restore_regs[i++] != REGNO (reg))
7410 if (i == ARRAY_SIZE (mips16e_save_restore_regs))
7413 mask |= 1 << REGNO (reg);
7414 save_offset -= GET_MODE_SIZE (gpr_mode);
7420 /* Check that the restrictions on register ranges are met. */
7422 mips16e_mask_registers (&mask, mips16e_s2_s8_regs,
7423 ARRAY_SIZE (mips16e_s2_s8_regs), &extra);
7424 mips16e_mask_registers (&mask, mips16e_a0_a3_regs,
7425 ARRAY_SIZE (mips16e_a0_a3_regs), &extra);
7429 /* Make sure that the topmost argument register is not saved twice.
7430 The checks above ensure that the same is then true for the other
7431 argument registers. */
7432 if (nargs > 0 && BITSET_P (mask, GP_ARG_FIRST + nargs - 1))
7435 /* Pass back information, if requested. */
7438 info->nargs = nargs;
7440 info->size = (adjust > 0 ? adjust : -adjust);
7446 /* Add a MIPS16e SAVE or RESTORE register-range argument to string S
7447 for the register range [MIN_REG, MAX_REG]. Return a pointer to
7448 the null terminator. */
7451 mips16e_add_register_range (char *s, unsigned int min_reg,
7452 unsigned int max_reg)
7454 if (min_reg != max_reg)
7455 s += sprintf (s, ",%s-%s", reg_names[min_reg], reg_names[max_reg]);
7457 s += sprintf (s, ",%s", reg_names[min_reg]);
7461 /* Return the assembly instruction for a MIPS16e SAVE or RESTORE instruction.
7462 PATTERN and ADJUST are as for mips16e_save_restore_pattern_p. */
7465 mips16e_output_save_restore (rtx pattern, HOST_WIDE_INT adjust)
7467 static char buffer[300];
7469 struct mips16e_save_restore_info info;
7470 unsigned int i, end;
7473 /* Parse the pattern. */
7474 if (!mips16e_save_restore_pattern_p (pattern, adjust, &info))
7477 /* Add the mnemonic. */
7478 s = strcpy (buffer, adjust > 0 ? "restore\t" : "save\t");
7481 /* Save the arguments. */
7483 s += sprintf (s, "%s-%s,", reg_names[GP_ARG_FIRST],
7484 reg_names[GP_ARG_FIRST + info.nargs - 1]);
7485 else if (info.nargs == 1)
7486 s += sprintf (s, "%s,", reg_names[GP_ARG_FIRST]);
7488 /* Emit the amount of stack space to allocate or deallocate. */
7489 s += sprintf (s, "%d", (int) info.size);
7491 /* Save or restore $16. */
7492 if (BITSET_P (info.mask, 16))
7493 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 16]);
7495 /* Save or restore $17. */
7496 if (BITSET_P (info.mask, 17))
7497 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 17]);
7499 /* Save or restore registers in the range $s2...$s8, which
7500 mips16e_s2_s8_regs lists in decreasing order. Note that this
7501 is a software register range; the hardware registers are not
7502 numbered consecutively. */
7503 end = ARRAY_SIZE (mips16e_s2_s8_regs);
7504 i = mips16e_find_first_register (info.mask, mips16e_s2_s8_regs, end);
7506 s = mips16e_add_register_range (s, mips16e_s2_s8_regs[end - 1],
7507 mips16e_s2_s8_regs[i]);
7509 /* Save or restore registers in the range $a0...$a3. */
7510 end = ARRAY_SIZE (mips16e_a0_a3_regs);
7511 i = mips16e_find_first_register (info.mask, mips16e_a0_a3_regs, end);
7513 s = mips16e_add_register_range (s, mips16e_a0_a3_regs[i],
7514 mips16e_a0_a3_regs[end - 1]);
7516 /* Save or restore $31. */
7517 if (BITSET_P (info.mask, 31))
7518 s += sprintf (s, ",%s", reg_names[GP_REG_FIRST + 31]);
7523 /* Return a simplified form of X using the register values in REG_VALUES.
7524 REG_VALUES[R] is the last value assigned to hard register R, or null
7525 if R has not been modified.
7527 This function is rather limited, but is good enough for our purposes. */
7530 mips16e_collect_propagate_value (rtx x, rtx *reg_values)
7534 x = avoid_constant_pool_reference (x);
7538 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7539 return simplify_gen_unary (GET_CODE (x), GET_MODE (x),
7540 x0, GET_MODE (XEXP (x, 0)));
7543 if (ARITHMETIC_P (x))
7545 x0 = mips16e_collect_propagate_value (XEXP (x, 0), reg_values);
7546 x1 = mips16e_collect_propagate_value (XEXP (x, 1), reg_values);
7547 return simplify_gen_binary (GET_CODE (x), GET_MODE (x), x0, x1);
7551 && reg_values[REGNO (x)]
7552 && !rtx_unstable_p (reg_values[REGNO (x)]))
7553 return reg_values[REGNO (x)];
7558 /* Return true if (set DEST SRC) stores an argument register into its
7559 caller-allocated save slot, storing the number of that argument
7560 register in *REGNO_PTR if so. REG_VALUES is as for
7561 mips16e_collect_propagate_value. */
7564 mips16e_collect_argument_save_p (rtx dest, rtx src, rtx *reg_values,
7565 unsigned int *regno_ptr)
7567 unsigned int argno, regno;
7568 HOST_WIDE_INT offset, required_offset;
7571 /* Check that this is a word-mode store. */
7572 if (!MEM_P (dest) || !REG_P (src) || GET_MODE (dest) != word_mode)
7575 /* Check that the register being saved is an unmodified argument
7577 regno = REGNO (src);
7578 if (regno < GP_ARG_FIRST || regno > GP_ARG_LAST || reg_values[regno])
7580 argno = regno - GP_ARG_FIRST;
7582 /* Check whether the address is an appropriate stack pointer or
7583 frame pointer access. The frame pointer is offset from the
7584 stack pointer by the size of the outgoing arguments. */
7585 addr = mips16e_collect_propagate_value (XEXP (dest, 0), reg_values);
7586 mips_split_plus (addr, &base, &offset);
7587 required_offset = cfun->machine->frame.total_size + argno * UNITS_PER_WORD;
7588 if (base == hard_frame_pointer_rtx)
7589 required_offset -= cfun->machine->frame.args_size;
7590 else if (base != stack_pointer_rtx)
7592 if (offset != required_offset)
7599 /* A subroutine of mips_expand_prologue, called only when generating
7600 MIPS16e SAVE instructions. Search the start of the function for any
7601 instructions that save argument registers into their caller-allocated
7602 save slots. Delete such instructions and return a value N such that
7603 saving [GP_ARG_FIRST, GP_ARG_FIRST + N) would make all the deleted
7604 instructions redundant. */
7607 mips16e_collect_argument_saves (void)
7609 rtx reg_values[FIRST_PSEUDO_REGISTER];
7610 rtx insn, next, set, dest, src;
7611 unsigned int nargs, regno;
7613 push_topmost_sequence ();
7615 memset (reg_values, 0, sizeof (reg_values));
7616 for (insn = get_insns (); insn; insn = next)
7618 next = NEXT_INSN (insn);
7625 set = PATTERN (insn);
7626 if (GET_CODE (set) != SET)
7629 dest = SET_DEST (set);
7630 src = SET_SRC (set);
7631 if (mips16e_collect_argument_save_p (dest, src, reg_values, ®no))
7633 if (!BITSET_P (cfun->machine->frame.mask, regno))
7636 nargs = MAX (nargs, (regno - GP_ARG_FIRST) + 1);
7639 else if (REG_P (dest) && GET_MODE (dest) == word_mode)
7640 reg_values[REGNO (dest)]
7641 = mips16e_collect_propagate_value (src, reg_values);
7645 pop_topmost_sequence ();
7650 /* Expand the prologue into a bunch of separate insns. */
7653 mips_expand_prologue (void)
7659 if (cfun->machine->global_pointer > 0)
7660 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
7662 size = compute_frame_size (get_frame_size ());
7664 /* Save the registers. Allocate up to MIPS_MAX_FIRST_STACK_STEP
7665 bytes beforehand; this is enough to cover the register save area
7666 without going out of range. */
7667 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
7669 HOST_WIDE_INT step1;
7671 step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
7673 if (GENERATE_MIPS16E_SAVE_RESTORE)
7675 HOST_WIDE_INT offset;
7676 unsigned int mask, regno;
7678 /* Try to merge argument stores into the save instruction. */
7679 nargs = mips16e_collect_argument_saves ();
7681 /* Build the save instruction. */
7682 mask = cfun->machine->frame.mask;
7683 insn = mips16e_build_save_restore (false, &mask, &offset,
7685 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
7688 /* Check if we need to save other registers. */
7689 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
7690 if (BITSET_P (mask, regno - GP_REG_FIRST))
7692 offset -= GET_MODE_SIZE (gpr_mode);
7693 mips_save_restore_reg (gpr_mode, regno, offset, mips_save_reg);
7698 insn = gen_add3_insn (stack_pointer_rtx,
7701 RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
7703 mips_for_each_saved_reg (size, mips_save_reg);
7707 /* Allocate the rest of the frame. */
7710 if (SMALL_OPERAND (-size))
7711 RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
7713 GEN_INT (-size)))) = 1;
7716 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
7719 /* There are no instructions to add or subtract registers
7720 from the stack pointer, so use the frame pointer as a
7721 temporary. We should always be using a frame pointer
7722 in this case anyway. */
7723 gcc_assert (frame_pointer_needed);
7724 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
7725 emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
7726 hard_frame_pointer_rtx,
7727 MIPS_PROLOGUE_TEMP (Pmode)));
7728 emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
7731 emit_insn (gen_sub3_insn (stack_pointer_rtx,
7733 MIPS_PROLOGUE_TEMP (Pmode)));
7735 /* Describe the combined effect of the previous instructions. */
7737 (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
7738 plus_constant (stack_pointer_rtx, -size)));
7742 /* Set up the frame pointer, if we're using one. In mips16 code,
7743 we point the frame pointer ahead of the outgoing argument area.
7744 This should allow more variables & incoming arguments to be
7745 accessed with unextended instructions. */
7746 if (frame_pointer_needed)
7748 if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
7750 rtx offset = GEN_INT (cfun->machine->frame.args_size);
7751 if (SMALL_OPERAND (cfun->machine->frame.args_size))
7753 (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
7758 emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), offset);
7759 emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
7760 emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
7761 hard_frame_pointer_rtx,
7762 MIPS_PROLOGUE_TEMP (Pmode)));
7764 (gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
7765 plus_constant (stack_pointer_rtx,
7766 cfun->machine->frame.args_size)));
7770 RTX_FRAME_RELATED_P (emit_move_insn (hard_frame_pointer_rtx,
7771 stack_pointer_rtx)) = 1;
7774 mips_emit_loadgp ();
7776 /* If generating o32/o64 abicalls, save $gp on the stack. */
7777 if (TARGET_ABICALLS && TARGET_OLDABI && !current_function_is_leaf)
7778 emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
7780 /* If we are profiling, make sure no instructions are scheduled before
7781 the call to mcount. */
7783 if (current_function_profile)
7784 emit_insn (gen_blockage ());
7787 /* Do any necessary cleanup after a function to restore stack, frame,
7790 #define RA_MASK BITMASK_HIGH /* 1 << 31 */
7793 mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
7794 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
7796 /* Reinstate the normal $gp. */
7797 SET_REGNO (pic_offset_table_rtx, GLOBAL_POINTER_REGNUM);
7798 mips_output_cplocal ();
7800 if (cfun->machine->all_noreorder_p)
7802 /* Avoid using %>%) since it adds excess whitespace. */
7803 output_asm_insn (".set\tmacro", 0);
7804 output_asm_insn (".set\treorder", 0);
7805 set_noreorder = set_nomacro = 0;
7808 if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
7812 /* Get the function name the same way that toplev.c does before calling
7813 assemble_start_function. This is needed so that the name used here
7814 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME. */
7815 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7816 fputs ("\t.end\t", file);
7817 assemble_name (file, fnname);
7822 /* Emit instructions to restore register REG from slot MEM. */
7825 mips_restore_reg (rtx reg, rtx mem)
7827 /* There's no mips16 instruction to load $31 directly. Load into
7828 $7 instead and adjust the return insn appropriately. */
7829 if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
7830 reg = gen_rtx_REG (GET_MODE (reg), 7);
7832 if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
7834 /* Can't restore directly; move through a temporary. */
7835 emit_move_insn (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
7836 emit_move_insn (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
7839 emit_move_insn (reg, mem);
7843 /* Expand the epilogue into a bunch of separate insns. SIBCALL_P is true
7844 if this epilogue precedes a sibling call, false if it is for a normal
7845 "epilogue" pattern. */
7848 mips_expand_epilogue (int sibcall_p)
7850 HOST_WIDE_INT step1, step2;
7853 if (!sibcall_p && mips_can_use_return_insn ())
7855 emit_jump_insn (gen_return ());
7859 /* In mips16 mode, if the return value should go into a floating-point
7860 register, we need to call a helper routine to copy it over. */
7861 if (mips16_cfun_returns_in_fpr_p ())
7870 enum machine_mode return_mode;
7872 return_type = DECL_RESULT (current_function_decl);
7873 return_mode = DECL_MODE (return_type);
7875 name = ACONCAT (("__mips16_ret_",
7876 mips16_call_stub_mode_suffix (return_mode),
7878 id = get_identifier (name);
7879 func = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
7880 retval = gen_rtx_REG (return_mode, GP_RETURN);
7881 call = gen_call_value_internal (retval, func, const0_rtx);
7882 insn = emit_call_insn (call);
7883 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), retval);
7886 /* Split the frame into two. STEP1 is the amount of stack we should
7887 deallocate before restoring the registers. STEP2 is the amount we
7888 should deallocate afterwards.
7890 Start off by assuming that no registers need to be restored. */
7891 step1 = cfun->machine->frame.total_size;
7894 /* Work out which register holds the frame address. Account for the
7895 frame pointer offset used by mips16 code. */
7896 if (!frame_pointer_needed)
7897 base = stack_pointer_rtx;
7900 base = hard_frame_pointer_rtx;
7902 step1 -= cfun->machine->frame.args_size;
7905 /* If we need to restore registers, deallocate as much stack as
7906 possible in the second step without going out of range. */
7907 if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
7909 step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
7913 /* Set TARGET to BASE + STEP1. */
7919 /* Get an rtx for STEP1 that we can add to BASE. */
7920 adjust = GEN_INT (step1);
7921 if (!SMALL_OPERAND (step1))
7923 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), adjust);
7924 adjust = MIPS_EPILOGUE_TEMP (Pmode);
7927 /* Normal mode code can copy the result straight into $sp. */
7929 target = stack_pointer_rtx;
7931 emit_insn (gen_add3_insn (target, base, adjust));
7934 /* Copy TARGET into the stack pointer. */
7935 if (target != stack_pointer_rtx)
7936 emit_move_insn (stack_pointer_rtx, target);
7938 /* If we're using addressing macros, $gp is implicitly used by all
7939 SYMBOL_REFs. We must emit a blockage insn before restoring $gp
7941 if (TARGET_CALL_SAVED_GP && !TARGET_EXPLICIT_RELOCS)
7942 emit_insn (gen_blockage ());
7944 if (GENERATE_MIPS16E_SAVE_RESTORE && cfun->machine->frame.mask != 0)
7946 unsigned int regno, mask;
7947 HOST_WIDE_INT offset;
7950 /* Generate the restore instruction. */
7951 mask = cfun->machine->frame.mask;
7952 restore = mips16e_build_save_restore (true, &mask, &offset, 0, step2);
7954 /* Restore any other registers manually. */
7955 for (regno = GP_REG_FIRST; regno < GP_REG_LAST; regno++)
7956 if (BITSET_P (mask, regno - GP_REG_FIRST))
7958 offset -= GET_MODE_SIZE (gpr_mode);
7959 mips_save_restore_reg (gpr_mode, regno, offset, mips_restore_reg);
7962 /* Restore the remaining registers and deallocate the final bit
7964 emit_insn (restore);
7968 /* Restore the registers. */
7969 mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
7972 /* Deallocate the final bit of the frame. */
7974 emit_insn (gen_add3_insn (stack_pointer_rtx,
7979 /* Add in the __builtin_eh_return stack adjustment. We need to
7980 use a temporary in mips16 code. */
7981 if (current_function_calls_eh_return)
7985 emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
7986 emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
7987 MIPS_EPILOGUE_TEMP (Pmode),
7988 EH_RETURN_STACKADJ_RTX));
7989 emit_move_insn (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
7992 emit_insn (gen_add3_insn (stack_pointer_rtx,
7994 EH_RETURN_STACKADJ_RTX));
7999 /* When generating MIPS16 code, the normal mips_for_each_saved_reg
8000 path will restore the return address into $7 rather than $31. */
8002 && !GENERATE_MIPS16E_SAVE_RESTORE
8003 && (cfun->machine->frame.mask & RA_MASK) != 0)
8004 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8005 GP_REG_FIRST + 7)));
8007 emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
8008 GP_REG_FIRST + 31)));
8012 /* Return nonzero if this function is known to have a null epilogue.
8013 This allows the optimizer to omit jumps to jumps if no stack
8017 mips_can_use_return_insn (void)
8019 if (! reload_completed)
8022 if (df_regs_ever_live_p (31) || current_function_profile)
8025 /* In mips16 mode, a function that returns a floating point value
8026 needs to arrange to copy the return value into the floating point
8028 if (mips16_cfun_returns_in_fpr_p ())
8031 if (cfun->machine->frame.initialized)
8032 return cfun->machine->frame.total_size == 0;
8034 return compute_frame_size (get_frame_size ()) == 0;
8037 /* Implement TARGET_ASM_OUTPUT_MI_THUNK. Generate rtl rather than asm text
8038 in order to avoid duplicating too much logic from elsewhere. */
8041 mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
8042 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
8045 rtx this, temp1, temp2, insn, fnaddr;
8047 /* Pretend to be a post-reload pass while generating rtl. */
8048 reload_completed = 1;
8050 /* Mark the end of the (empty) prologue. */
8051 emit_note (NOTE_INSN_PROLOGUE_END);
8053 /* Pick a global pointer. Use a call-clobbered register if
8054 TARGET_CALL_SAVED_GP, so that we can use a sibcall. */
8057 cfun->machine->global_pointer =
8058 TARGET_CALL_SAVED_GP ? 15 : GLOBAL_POINTER_REGNUM;
8060 SET_REGNO (pic_offset_table_rtx, cfun->machine->global_pointer);
8064 /* Set up the global pointer for n32 or n64 abicalls. If
8065 LOADGP_ABSOLUTE then the thunk does not use the gp and there is
8066 no need to load it.*/
8067 if (mips_current_loadgp_style () != LOADGP_ABSOLUTE
8068 || !targetm.binds_local_p (function))
8069 mips_emit_loadgp ();
8071 /* We need two temporary registers in some cases. */
8072 temp1 = gen_rtx_REG (Pmode, 2);
8073 temp2 = gen_rtx_REG (Pmode, 3);
8075 /* Find out which register contains the "this" pointer. */
8076 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
8077 this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
8079 this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
8081 /* Add DELTA to THIS. */
8084 rtx offset = GEN_INT (delta);
8085 if (!SMALL_OPERAND (delta))
8087 emit_move_insn (temp1, offset);
8090 emit_insn (gen_add3_insn (this, this, offset));
8093 /* If needed, add *(*THIS + VCALL_OFFSET) to THIS. */
8094 if (vcall_offset != 0)
8098 /* Set TEMP1 to *THIS. */
8099 emit_move_insn (temp1, gen_rtx_MEM (Pmode, this));
8101 /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET. */
8102 addr = mips_add_offset (temp2, temp1, vcall_offset);
8104 /* Load the offset and add it to THIS. */
8105 emit_move_insn (temp1, gen_rtx_MEM (Pmode, addr));
8106 emit_insn (gen_add3_insn (this, this, temp1));
8109 /* Jump to the target function. Use a sibcall if direct jumps are
8110 allowed, otherwise load the address into a register first. */
8111 fnaddr = XEXP (DECL_RTL (function), 0);
8112 if (TARGET_MIPS16 || TARGET_USE_GOT || SYMBOL_REF_LONG_CALL_P (fnaddr))
8114 /* This is messy. gas treats "la $25,foo" as part of a call
8115 sequence and may allow a global "foo" to be lazily bound.
8116 The general move patterns therefore reject this combination.
8118 In this context, lazy binding would actually be OK
8119 for TARGET_CALL_CLOBBERED_GP, but it's still wrong for
8120 TARGET_CALL_SAVED_GP; see mips_load_call_address.
8121 We must therefore load the address via a temporary
8122 register if mips_dangerous_for_la25_p.
8124 If we jump to the temporary register rather than $25, the assembler
8125 can use the move insn to fill the jump's delay slot. */
8126 if (TARGET_USE_PIC_FN_ADDR_REG
8127 && !mips_dangerous_for_la25_p (fnaddr))
8128 temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
8129 mips_load_call_address (temp1, fnaddr, true);
8131 if (TARGET_USE_PIC_FN_ADDR_REG
8132 && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
8133 emit_move_insn (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
8134 emit_jump_insn (gen_indirect_jump (temp1));
8138 insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
8139 SIBLING_CALL_P (insn) = 1;
8142 /* Run just enough of rest_of_compilation. This sequence was
8143 "borrowed" from alpha.c. */
8144 insn = get_insns ();
8145 insn_locators_alloc ();
8146 split_all_insns_noflow ();
8148 mips16_lay_out_constants ();
8149 shorten_branches (insn);
8150 final_start_function (insn, file, 1);
8151 final (insn, file, 1);
8152 final_end_function ();
8154 /* Clean up the vars set above. Note that final_end_function resets
8155 the global pointer for us. */
8156 reload_completed = 0;
8159 /* Returns nonzero if X contains a SYMBOL_REF. */
8162 symbolic_expression_p (rtx x)
8164 if (GET_CODE (x) == SYMBOL_REF)
8167 if (GET_CODE (x) == CONST)
8168 return symbolic_expression_p (XEXP (x, 0));
8171 return symbolic_expression_p (XEXP (x, 0));
8173 if (ARITHMETIC_P (x))
8174 return (symbolic_expression_p (XEXP (x, 0))
8175 || symbolic_expression_p (XEXP (x, 1)));
8180 /* Choose the section to use for the constant rtx expression X that has
8184 mips_select_rtx_section (enum machine_mode mode, rtx x,
8185 unsigned HOST_WIDE_INT align)
8189 /* In mips16 mode, the constant table always goes in the same section
8190 as the function, so that constants can be loaded using PC relative
8192 return function_section (current_function_decl);
8194 else if (TARGET_EMBEDDED_DATA)
8196 /* For embedded applications, always put constants in read-only data,
8197 in order to reduce RAM usage. */
8198 return mergeable_constant_section (mode, align, 0);
8202 /* For hosted applications, always put constants in small data if
8203 possible, as this gives the best performance. */
8204 /* ??? Consider using mergeable small data sections. */
8206 if (GET_MODE_SIZE (mode) <= (unsigned) mips_section_threshold
8207 && mips_section_threshold > 0)
8208 return get_named_section (NULL, ".sdata", 0);
8209 else if (flag_pic && symbolic_expression_p (x))
8210 return get_named_section (NULL, ".data.rel.ro", 3);
8212 return mergeable_constant_section (mode, align, 0);
8216 /* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
8218 The complication here is that, with the combination TARGET_ABICALLS
8219 && !TARGET_GPWORD, jump tables will use absolute addresses, and should
8220 therefore not be included in the read-only part of a DSO. Handle such
8221 cases by selecting a normal data section instead of a read-only one.
8222 The logic apes that in default_function_rodata_section. */
8225 mips_function_rodata_section (tree decl)
8227 if (!TARGET_ABICALLS || TARGET_GPWORD)
8228 return default_function_rodata_section (decl);
8230 if (decl && DECL_SECTION_NAME (decl))
8232 const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8233 if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
8235 char *rname = ASTRDUP (name);
8237 return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
8239 else if (flag_function_sections && flag_data_sections
8240 && strncmp (name, ".text.", 6) == 0)
8242 char *rname = ASTRDUP (name);
8243 memcpy (rname + 1, "data", 4);
8244 return get_section (rname, SECTION_WRITE, decl);
8247 return data_section;
8250 /* Implement TARGET_IN_SMALL_DATA_P. This function controls whether
8251 locally-defined objects go in a small data section. It also controls
8252 the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
8253 mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses. */
8256 mips_in_small_data_p (tree decl)
8260 if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
8263 /* We don't yet generate small-data references for -mabicalls or
8264 VxWorks RTP code. See the related -G handling in override_options. */
8265 if (TARGET_ABICALLS || TARGET_VXWORKS_RTP)
8268 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
8272 /* Reject anything that isn't in a known small-data section. */
8273 name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
8274 if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
8277 /* If a symbol is defined externally, the assembler will use the
8278 usual -G rules when deciding how to implement macros. */
8279 if (TARGET_EXPLICIT_RELOCS || !DECL_EXTERNAL (decl))
8282 else if (TARGET_EMBEDDED_DATA)
8284 /* Don't put constants into the small data section: we want them
8285 to be in ROM rather than RAM. */
8286 if (TREE_CODE (decl) != VAR_DECL)
8289 if (TREE_READONLY (decl)
8290 && !TREE_SIDE_EFFECTS (decl)
8291 && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
8295 size = int_size_in_bytes (TREE_TYPE (decl));
8296 return (size > 0 && size <= mips_section_threshold);
8299 /* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P. We don't want to use
8300 anchors for small data: the GP register acts as an anchor in that
8301 case. We also don't want to use them for PC-relative accesses,
8302 where the PC acts as an anchor. */
8305 mips_use_anchors_for_symbol_p (rtx symbol)
8307 switch (mips_classify_symbol (symbol))
8309 case SYMBOL_CONSTANT_POOL:
8310 case SYMBOL_SMALL_DATA:
8318 /* See whether VALTYPE is a record whose fields should be returned in
8319 floating-point registers. If so, return the number of fields and
8320 list them in FIELDS (which should have two elements). Return 0
8323 For n32 & n64, a structure with one or two fields is returned in
8324 floating-point registers as long as every field has a floating-point
8328 mips_fpr_return_fields (tree valtype, tree *fields)
8336 if (TREE_CODE (valtype) != RECORD_TYPE)
8340 for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
8342 if (TREE_CODE (field) != FIELD_DECL)
8345 if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
8351 fields[i++] = field;
8357 /* Implement TARGET_RETURN_IN_MSB. For n32 & n64, we should return
8358 a value in the most significant part of $2/$3 if:
8360 - the target is big-endian;
8362 - the value has a structure or union type (we generalize this to
8363 cover aggregates from other languages too); and
8365 - the structure is not returned in floating-point registers. */
8368 mips_return_in_msb (tree valtype)
8372 return (TARGET_NEWABI
8373 && TARGET_BIG_ENDIAN
8374 && AGGREGATE_TYPE_P (valtype)
8375 && mips_fpr_return_fields (valtype, fields) == 0);
8379 /* Return a composite value in a pair of floating-point registers.
8380 MODE1 and OFFSET1 are the mode and byte offset for the first value,
8381 likewise MODE2 and OFFSET2 for the second. MODE is the mode of the
8384 For n32 & n64, $f0 always holds the first value and $f2 the second.
8385 Otherwise the values are packed together as closely as possible. */
8388 mips_return_fpr_pair (enum machine_mode mode,
8389 enum machine_mode mode1, HOST_WIDE_INT offset1,
8390 enum machine_mode mode2, HOST_WIDE_INT offset2)
8394 inc = (TARGET_NEWABI ? 2 : MAX_FPRS_PER_FMT);
8395 return gen_rtx_PARALLEL
8398 gen_rtx_EXPR_LIST (VOIDmode,
8399 gen_rtx_REG (mode1, FP_RETURN),
8401 gen_rtx_EXPR_LIST (VOIDmode,
8402 gen_rtx_REG (mode2, FP_RETURN + inc),
8403 GEN_INT (offset2))));
8408 /* Implement FUNCTION_VALUE and LIBCALL_VALUE. For normal calls,
8409 VALTYPE is the return type and MODE is VOIDmode. For libcalls,
8410 VALTYPE is null and MODE is the mode of the return value. */
8413 mips_function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
8414 enum machine_mode mode)
8421 mode = TYPE_MODE (valtype);
8422 unsignedp = TYPE_UNSIGNED (valtype);
8424 /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
8425 true, we must promote the mode just as PROMOTE_MODE does. */
8426 mode = promote_mode (valtype, mode, &unsignedp, 1);
8428 /* Handle structures whose fields are returned in $f0/$f2. */
8429 switch (mips_fpr_return_fields (valtype, fields))
8432 return gen_rtx_REG (mode, FP_RETURN);
8435 return mips_return_fpr_pair (mode,
8436 TYPE_MODE (TREE_TYPE (fields[0])),
8437 int_byte_position (fields[0]),
8438 TYPE_MODE (TREE_TYPE (fields[1])),
8439 int_byte_position (fields[1]));
8442 /* If a value is passed in the most significant part of a register, see
8443 whether we have to round the mode up to a whole number of words. */
8444 if (mips_return_in_msb (valtype))
8446 HOST_WIDE_INT size = int_size_in_bytes (valtype);
8447 if (size % UNITS_PER_WORD != 0)
8449 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
8450 mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
8454 /* For EABI, the class of return register depends entirely on MODE.
8455 For example, "struct { some_type x; }" and "union { some_type x; }"
8456 are returned in the same way as a bare "some_type" would be.
8457 Other ABIs only use FPRs for scalar, complex or vector types. */
8458 if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
8459 return gen_rtx_REG (mode, GP_RETURN);
8464 /* Handle long doubles for n32 & n64. */
8466 return mips_return_fpr_pair (mode,
8468 DImode, GET_MODE_SIZE (mode) / 2);
8470 if (mips_return_mode_in_fpr_p (mode))
8472 if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
8473 return mips_return_fpr_pair (mode,
8474 GET_MODE_INNER (mode), 0,
8475 GET_MODE_INNER (mode),
8476 GET_MODE_SIZE (mode) / 2);
8478 return gen_rtx_REG (mode, FP_RETURN);
8482 return gen_rtx_REG (mode, GP_RETURN);
8485 /* Return nonzero when an argument must be passed by reference. */
8488 mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
8489 enum machine_mode mode, tree type,
8490 bool named ATTRIBUTE_UNUSED)
8492 if (mips_abi == ABI_EABI)
8496 /* ??? How should SCmode be handled? */
8497 if (mode == DImode || mode == DFmode)
8500 size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
8501 return size == -1 || size > UNITS_PER_WORD;
8505 /* If we have a variable-sized parameter, we have no choice. */
8506 return targetm.calls.must_pass_in_stack (mode, type);
8511 mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
8512 enum machine_mode mode ATTRIBUTE_UNUSED,
8513 tree type ATTRIBUTE_UNUSED, bool named)
8515 return mips_abi == ABI_EABI && named;
8518 /* Return true if registers of class CLASS cannot change from mode FROM
8522 mips_cannot_change_mode_class (enum machine_mode from,
8523 enum machine_mode to, enum reg_class class)
8525 if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
8526 && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
8528 if (TARGET_BIG_ENDIAN)
8530 /* When a multi-word value is stored in paired floating-point
8531 registers, the first register always holds the low word.
8532 We therefore can't allow FPRs to change between single-word
8533 and multi-word modes. */
8534 if (MAX_FPRS_PER_FMT > 1 && reg_classes_intersect_p (FP_REGS, class))
8539 /* gcc assumes that each word of a multiword register can be accessed
8540 individually using SUBREGs. This is not true for floating-point
8541 registers if they are bigger than a word. */
8542 if (UNITS_PER_FPREG > UNITS_PER_WORD
8543 && GET_MODE_SIZE (from) > UNITS_PER_WORD
8544 && GET_MODE_SIZE (to) < UNITS_PER_FPREG
8545 && reg_classes_intersect_p (FP_REGS, class))
8548 /* Loading a 32-bit value into a 64-bit floating-point register
8549 will not sign-extend the value, despite what LOAD_EXTEND_OP says.
8550 We can't allow 64-bit float registers to change from SImode to
8555 && GET_MODE_SIZE (to) >= UNITS_PER_WORD
8556 && reg_classes_intersect_p (FP_REGS, class))
8562 /* Return true if X should not be moved directly into register $25.
8563 We need this because many versions of GAS will treat "la $25,foo" as
8564 part of a call sequence and so allow a global "foo" to be lazily bound. */
8567 mips_dangerous_for_la25_p (rtx x)
8569 return (!TARGET_EXPLICIT_RELOCS
8571 && GET_CODE (x) == SYMBOL_REF
8572 && mips_global_symbol_p (x));
8575 /* Implement PREFERRED_RELOAD_CLASS. */
8578 mips_preferred_reload_class (rtx x, enum reg_class class)
8580 if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
8583 if (TARGET_HARD_FLOAT
8584 && FLOAT_MODE_P (GET_MODE (x))
8585 && reg_class_subset_p (FP_REGS, class))
8588 if (reg_class_subset_p (GR_REGS, class))
8591 if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
8597 /* This function returns the register class required for a secondary
8598 register when copying between one of the registers in CLASS, and X,
8599 using MODE. If IN_P is nonzero, the copy is going from X to the
8600 register, otherwise the register is the source. A return value of
8601 NO_REGS means that no secondary register is required. */
8604 mips_secondary_reload_class (enum reg_class class,
8605 enum machine_mode mode, rtx x, int in_p)
8607 enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
8611 if (REG_P (x)|| GET_CODE (x) == SUBREG)
8612 regno = true_regnum (x);
8614 gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
8616 if (mips_dangerous_for_la25_p (x))
8619 if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
8623 /* Copying from HI or LO to anywhere other than a general register
8624 requires a general register.
8625 This rule applies to both the original HI/LO pair and the new
8626 DSP accumulators. */
8627 if (reg_class_subset_p (class, ACC_REGS))
8629 if (TARGET_MIPS16 && in_p)
8631 /* We can't really copy to HI or LO at all in mips16 mode. */
8634 return gp_reg_p ? NO_REGS : gr_regs;
8636 if (ACC_REG_P (regno))
8638 if (TARGET_MIPS16 && ! in_p)
8640 /* We can't really copy to HI or LO at all in mips16 mode. */
8643 return class == gr_regs ? NO_REGS : gr_regs;
8646 /* We can only copy a value to a condition code register from a
8647 floating point register, and even then we require a scratch
8648 floating point register. We can only copy a value out of a
8649 condition code register into a general register. */
8650 if (class == ST_REGS)
8654 return gp_reg_p ? NO_REGS : gr_regs;
8656 if (ST_REG_P (regno))
8660 return class == gr_regs ? NO_REGS : gr_regs;
8663 if (class == FP_REGS)
8667 /* In this case we can use lwc1, swc1, ldc1 or sdc1. */
8670 else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
8672 /* We can use the l.s and l.d macros to load floating-point
8673 constants. ??? For l.s, we could probably get better
8674 code by returning GR_REGS here. */
8677 else if (gp_reg_p || x == CONST0_RTX (mode))
8679 /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1. */
8682 else if (FP_REG_P (regno))
8684 /* In this case we can use mov.s or mov.d. */
8689 /* Otherwise, we need to reload through an integer register. */
8694 /* In mips16 mode, going between memory and anything but M16_REGS
8695 requires an M16_REG. */
8698 if (class != M16_REGS && class != M16_NA_REGS)
8706 if (class == M16_REGS || class == M16_NA_REGS)
8715 /* Implement CLASS_MAX_NREGS.
8717 - UNITS_PER_FPREG controls the number of registers needed by FP_REGS.
8719 - ST_REGS are always hold CCmode values, and CCmode values are
8720 considered to be 4 bytes wide.
8722 All other register classes are covered by UNITS_PER_WORD. Note that
8723 this is true even for unions of integer and float registers when the
8724 latter are smaller than the former. The only supported combination
8725 in which case this occurs is -mgp64 -msingle-float, which has 64-bit
8726 words but 32-bit float registers. A word-based calculation is correct
8727 in that case since -msingle-float disallows multi-FPR values. */
8730 mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
8731 enum machine_mode mode)
8733 if (class == ST_REGS)
8734 return (GET_MODE_SIZE (mode) + 3) / 4;
8735 else if (class == FP_REGS)
8736 return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
8738 return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
8742 mips_valid_pointer_mode (enum machine_mode mode)
8744 return (mode == SImode || (TARGET_64BIT && mode == DImode));
8747 /* Target hook for vector_mode_supported_p. */
8750 mips_vector_mode_supported_p (enum machine_mode mode)
8755 return TARGET_PAIRED_SINGLE_FLOAT;
8766 /* If we can access small data directly (using gp-relative relocation
8767 operators) return the small data pointer, otherwise return null.
8769 For each mips16 function which refers to GP relative symbols, we
8770 use a pseudo register, initialized at the start of the function, to
8771 hold the $gp value. */
8774 mips16_gp_pseudo_reg (void)
8776 if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
8780 cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
8782 /* We want to initialize this to a value which gcc will believe
8784 insn = gen_load_const_gp (cfun->machine->mips16_gp_pseudo_rtx);
8786 push_topmost_sequence ();
8787 /* We need to emit the initialization after the FUNCTION_BEG
8788 note, so that it will be integrated. */
8789 for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
8791 && NOTE_KIND (scan) == NOTE_INSN_FUNCTION_BEG)
8793 if (scan == NULL_RTX)
8794 scan = get_insns ();
8795 insn = emit_insn_after (insn, scan);
8796 pop_topmost_sequence ();
8799 return cfun->machine->mips16_gp_pseudo_rtx;
8802 /* Write out code to move floating point arguments in or out of
8803 general registers. Output the instructions to FILE. FP_CODE is
8804 the code describing which arguments are present (see the comment at
8805 the definition of CUMULATIVE_ARGS in mips.h). FROM_FP_P is nonzero if
8806 we are copying from the floating point registers. */
8809 mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
8814 CUMULATIVE_ARGS cum;
8816 /* This code only works for the original 32-bit ABI and the O64 ABI. */
8817 gcc_assert (TARGET_OLDABI);
8824 init_cumulative_args (&cum, NULL, NULL);
8826 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
8828 enum machine_mode mode;
8829 struct mips_arg_info info;
8833 else if ((f & 3) == 2)
8838 mips_arg_info (&cum, mode, NULL, true, &info);
8839 gparg = mips_arg_regno (&info, false);
8840 fparg = mips_arg_regno (&info, true);
8843 fprintf (file, "\t%s\t%s,%s\n", s,
8844 reg_names[gparg], reg_names[fparg]);
8845 else if (TARGET_64BIT)
8846 fprintf (file, "\td%s\t%s,%s\n", s,
8847 reg_names[gparg], reg_names[fparg]);
8848 else if (ISA_HAS_MXHC1)
8849 /* -mips32r2 -mfp64 */
8850 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n",
8852 reg_names[gparg + (WORDS_BIG_ENDIAN ? 1 : 0)],
8854 from_fp_p ? "mfhc1" : "mthc1",
8855 reg_names[gparg + (WORDS_BIG_ENDIAN ? 0 : 1)],
8857 else if (TARGET_BIG_ENDIAN)
8858 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
8859 reg_names[gparg], reg_names[fparg + 1], s,
8860 reg_names[gparg + 1], reg_names[fparg]);
8862 fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
8863 reg_names[gparg], reg_names[fparg], s,
8864 reg_names[gparg + 1], reg_names[fparg + 1]);
8866 function_arg_advance (&cum, mode, NULL, true);
8870 /* Build a mips16 function stub. This is used for functions which
8871 take arguments in the floating point registers. It is 32-bit code
8872 that moves the floating point args into the general registers, and
8873 then jumps to the 16-bit code. */
8876 build_mips16_function_stub (FILE *file)
8879 char *secname, *stubname;
8880 tree stubid, stubdecl;
8884 fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
8885 secname = (char *) alloca (strlen (fnname) + 20);
8886 sprintf (secname, ".mips16.fn.%s", fnname);
8887 stubname = (char *) alloca (strlen (fnname) + 20);
8888 sprintf (stubname, "__fn_stub_%s", fnname);
8889 stubid = get_identifier (stubname);
8890 stubdecl = build_decl (FUNCTION_DECL, stubid,
8891 build_function_type (void_type_node, NULL_TREE));
8892 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
8893 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
8895 fprintf (file, "\t# Stub function for %s (", current_function_name ());
8897 for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
8899 fprintf (file, "%s%s",
8900 need_comma ? ", " : "",
8901 (f & 3) == 1 ? "float" : "double");
8904 fprintf (file, ")\n");
8906 fprintf (file, "\t.set\tnomips16\n");
8907 switch_to_section (function_section (stubdecl));
8908 ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
8910 /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
8911 within a .ent, and we cannot emit another .ent. */
8912 if (!FUNCTION_NAME_ALREADY_DECLARED)
8914 fputs ("\t.ent\t", file);
8915 assemble_name (file, stubname);
8919 assemble_name (file, stubname);
8920 fputs (":\n", file);
8922 /* We don't want the assembler to insert any nops here. */
8923 fprintf (file, "\t.set\tnoreorder\n");
8925 mips16_fp_args (file, current_function_args_info.fp_code, 1);
8927 fprintf (asm_out_file, "\t.set\tnoat\n");
8928 fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
8929 assemble_name (file, fnname);
8930 fprintf (file, "\n");
8931 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
8932 fprintf (asm_out_file, "\t.set\tat\n");
8934 /* Unfortunately, we can't fill the jump delay slot. We can't fill
8935 with one of the mfc1 instructions, because the result is not
8936 available for one instruction, so if the very first instruction
8937 in the function refers to the register, it will see the wrong
8939 fprintf (file, "\tnop\n");
8941 fprintf (file, "\t.set\treorder\n");
8943 if (!FUNCTION_NAME_ALREADY_DECLARED)
8945 fputs ("\t.end\t", file);
8946 assemble_name (file, stubname);
8950 fprintf (file, "\t.set\tmips16\n");
8952 switch_to_section (function_section (current_function_decl));
8955 /* We keep a list of functions for which we have already built stubs
8956 in build_mips16_call_stub. */
8960 struct mips16_stub *next;
8965 static struct mips16_stub *mips16_stubs;
8967 /* Emit code to return a double value from a mips16 stub. GPREG is the
8968 first GP reg to use, FPREG is the first FP reg to use. */
8971 mips16_fpret_double (int gpreg, int fpreg)
8974 fprintf (asm_out_file, "\tdmfc1\t%s,%s\n",
8975 reg_names[gpreg], reg_names[fpreg]);
8976 else if (TARGET_FLOAT64)
8978 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8979 reg_names[gpreg + WORDS_BIG_ENDIAN],
8981 fprintf (asm_out_file, "\tmfhc1\t%s,%s\n",
8982 reg_names[gpreg + !WORDS_BIG_ENDIAN],
8987 if (TARGET_BIG_ENDIAN)
8989 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8990 reg_names[gpreg + 0],
8991 reg_names[fpreg + 1]);
8992 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8993 reg_names[gpreg + 1],
8994 reg_names[fpreg + 0]);
8998 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8999 reg_names[gpreg + 0],
9000 reg_names[fpreg + 0]);
9001 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9002 reg_names[gpreg + 1],
9003 reg_names[fpreg + 1]);
9008 /* Build a call stub for a mips16 call. A stub is needed if we are
9009 passing any floating point values which should go into the floating
9010 point registers. If we are, and the call turns out to be to a
9011 32-bit function, the stub will be used to move the values into the
9012 floating point registers before calling the 32-bit function. The
9013 linker will magically adjust the function call to either the 16-bit
9014 function or the 32-bit stub, depending upon where the function call
9015 is actually defined.
9017 Similarly, we need a stub if the return value might come back in a
9018 floating point register.
9020 RETVAL is the location of the return value, or null if this is
9021 a call rather than a call_value. FN is the address of the
9022 function and ARG_SIZE is the size of the arguments. FP_CODE
9023 is the code built by function_arg. This function returns a nonzero
9024 value if it builds the call instruction itself. */
9027 build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
9031 char *secname, *stubname;
9032 struct mips16_stub *l;
9033 tree stubid, stubdecl;
9037 /* We don't need to do anything if we aren't in mips16 mode, or if
9038 we were invoked with the -msoft-float option. */
9039 if (!mips16_hard_float)
9042 /* Figure out whether the value might come back in a floating point
9045 fpret = mips_return_mode_in_fpr_p (GET_MODE (retval));
9047 /* We don't need to do anything if there were no floating point
9048 arguments and the value will not be returned in a floating point
9050 if (fp_code == 0 && ! fpret)
9053 /* We don't need to do anything if this is a call to a special
9054 mips16 support function. */
9055 if (GET_CODE (fn) == SYMBOL_REF
9056 && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
9059 /* This code will only work for o32 and o64 abis. The other ABI's
9060 require more sophisticated support. */
9061 gcc_assert (TARGET_OLDABI);
9063 /* If we're calling via a function pointer, then we must always call
9064 via a stub. There are magic stubs provided in libgcc.a for each
9065 of the required cases. Each of them expects the function address
9066 to arrive in register $2. */
9068 if (GET_CODE (fn) != SYMBOL_REF)
9074 /* ??? If this code is modified to support other ABI's, we need
9075 to handle PARALLEL return values here. */
9078 sprintf (buf, "__mips16_call_stub_%s_%d",
9079 mips16_call_stub_mode_suffix (GET_MODE (retval)),
9082 sprintf (buf, "__mips16_call_stub_%d",
9085 id = get_identifier (buf);
9086 stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
9088 emit_move_insn (gen_rtx_REG (Pmode, 2), fn);
9090 if (retval == NULL_RTX)
9091 insn = gen_call_internal (stub_fn, arg_size);
9093 insn = gen_call_value_internal (retval, stub_fn, arg_size);
9094 insn = emit_call_insn (insn);
9096 /* Put the register usage information on the CALL. */
9097 CALL_INSN_FUNCTION_USAGE (insn) =
9098 gen_rtx_EXPR_LIST (VOIDmode,
9099 gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
9100 CALL_INSN_FUNCTION_USAGE (insn));
9102 /* If we are handling a floating point return value, we need to
9103 save $18 in the function prologue. Putting a note on the
9104 call will mean that df_regs_ever_live_p ($18) will be true if the
9105 call is not eliminated, and we can check that in the prologue
9108 CALL_INSN_FUNCTION_USAGE (insn) =
9109 gen_rtx_EXPR_LIST (VOIDmode,
9110 gen_rtx_USE (VOIDmode,
9111 gen_rtx_REG (word_mode, 18)),
9112 CALL_INSN_FUNCTION_USAGE (insn));
9114 /* Return 1 to tell the caller that we've generated the call
9119 /* We know the function we are going to call. If we have already
9120 built a stub, we don't need to do anything further. */
9122 fnname = XSTR (fn, 0);
9123 for (l = mips16_stubs; l != NULL; l = l->next)
9124 if (strcmp (l->name, fnname) == 0)
9129 /* Build a special purpose stub. When the linker sees a
9130 function call in mips16 code, it will check where the target
9131 is defined. If the target is a 32-bit call, the linker will
9132 search for the section defined here. It can tell which
9133 symbol this section is associated with by looking at the
9134 relocation information (the name is unreliable, since this
9135 might be a static function). If such a section is found, the
9136 linker will redirect the call to the start of the magic
9139 If the function does not return a floating point value, the
9140 special stub section is named
9143 If the function does return a floating point value, the stub
9145 .mips16.call.fp.FNNAME
9148 secname = (char *) alloca (strlen (fnname) + 40);
9149 sprintf (secname, ".mips16.call.%s%s",
9152 stubname = (char *) alloca (strlen (fnname) + 20);
9153 sprintf (stubname, "__call_stub_%s%s",
9156 stubid = get_identifier (stubname);
9157 stubdecl = build_decl (FUNCTION_DECL, stubid,
9158 build_function_type (void_type_node, NULL_TREE));
9159 DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
9160 DECL_RESULT (stubdecl) = build_decl (RESULT_DECL, NULL_TREE, void_type_node);
9162 fprintf (asm_out_file, "\t# Stub function to call %s%s (",
9164 ? (GET_MODE (retval) == SFmode ? "float " : "double ")
9168 for (f = (unsigned int) fp_code; f != 0; f >>= 2)
9170 fprintf (asm_out_file, "%s%s",
9171 need_comma ? ", " : "",
9172 (f & 3) == 1 ? "float" : "double");
9175 fprintf (asm_out_file, ")\n");
9177 fprintf (asm_out_file, "\t.set\tnomips16\n");
9178 assemble_start_function (stubdecl, stubname);
9180 if (!FUNCTION_NAME_ALREADY_DECLARED)
9182 fputs ("\t.ent\t", asm_out_file);
9183 assemble_name (asm_out_file, stubname);
9184 fputs ("\n", asm_out_file);
9186 assemble_name (asm_out_file, stubname);
9187 fputs (":\n", asm_out_file);
9190 /* We build the stub code by hand. That's the only way we can
9191 do it, since we can't generate 32-bit code during a 16-bit
9194 /* We don't want the assembler to insert any nops here. */
9195 fprintf (asm_out_file, "\t.set\tnoreorder\n");
9197 mips16_fp_args (asm_out_file, fp_code, 0);
9201 fprintf (asm_out_file, "\t.set\tnoat\n");
9202 fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
9204 fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
9205 fprintf (asm_out_file, "\t.set\tat\n");
9206 /* Unfortunately, we can't fill the jump delay slot. We
9207 can't fill with one of the mtc1 instructions, because the
9208 result is not available for one instruction, so if the
9209 very first instruction in the function refers to the
9210 register, it will see the wrong value. */
9211 fprintf (asm_out_file, "\tnop\n");
9215 fprintf (asm_out_file, "\tmove\t%s,%s\n",
9216 reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
9217 fprintf (asm_out_file, "\tjal\t%s\n", fnname);
9218 /* As above, we can't fill the delay slot. */
9219 fprintf (asm_out_file, "\tnop\n");
9220 if (GET_MODE (retval) == SFmode)
9221 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9222 reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
9223 else if (GET_MODE (retval) == SCmode)
9225 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9226 reg_names[GP_REG_FIRST + 2],
9227 reg_names[FP_REG_FIRST + 0]);
9228 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9229 reg_names[GP_REG_FIRST + 3],
9230 reg_names[FP_REG_FIRST + MAX_FPRS_PER_FMT]);
9232 else if (GET_MODE (retval) == DFmode
9233 || GET_MODE (retval) == V2SFmode)
9235 mips16_fpret_double (GP_REG_FIRST + 2, FP_REG_FIRST + 0);
9237 else if (GET_MODE (retval) == DCmode)
9239 mips16_fpret_double (GP_REG_FIRST + 2,
9241 mips16_fpret_double (GP_REG_FIRST + 4,
9242 FP_REG_FIRST + MAX_FPRS_PER_FMT);
9246 if (TARGET_BIG_ENDIAN)
9248 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9249 reg_names[GP_REG_FIRST + 2],
9250 reg_names[FP_REG_FIRST + 1]);
9251 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9252 reg_names[GP_REG_FIRST + 3],
9253 reg_names[FP_REG_FIRST + 0]);
9257 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9258 reg_names[GP_REG_FIRST + 2],
9259 reg_names[FP_REG_FIRST + 0]);
9260 fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
9261 reg_names[GP_REG_FIRST + 3],
9262 reg_names[FP_REG_FIRST + 1]);
9265 fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
9266 /* As above, we can't fill the delay slot. */
9267 fprintf (asm_out_file, "\tnop\n");
9270 fprintf (asm_out_file, "\t.set\treorder\n");
9272 #ifdef ASM_DECLARE_FUNCTION_SIZE
9273 ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
9276 if (!FUNCTION_NAME_ALREADY_DECLARED)
9278 fputs ("\t.end\t", asm_out_file);
9279 assemble_name (asm_out_file, stubname);
9280 fputs ("\n", asm_out_file);
9283 fprintf (asm_out_file, "\t.set\tmips16\n");
9285 /* Record this stub. */
9286 l = (struct mips16_stub *) xmalloc (sizeof *l);
9287 l->name = xstrdup (fnname);
9289 l->next = mips16_stubs;
9293 /* If we expect a floating point return value, but we've built a
9294 stub which does not expect one, then we're in trouble. We can't
9295 use the existing stub, because it won't handle the floating point
9296 value. We can't build a new stub, because the linker won't know
9297 which stub to use for the various calls in this object file.
9298 Fortunately, this case is illegal, since it means that a function
9299 was declared in two different ways in a single compilation. */
9300 if (fpret && ! l->fpret)
9301 error ("cannot handle inconsistent calls to %qs", fnname);
9303 /* If we are calling a stub which handles a floating point return
9304 value, we need to arrange to save $18 in the prologue. We do
9305 this by marking the function call as using the register. The
9306 prologue will later see that it is used, and emit code to save
9313 if (retval == NULL_RTX)
9314 insn = gen_call_internal (fn, arg_size);
9316 insn = gen_call_value_internal (retval, fn, arg_size);
9317 insn = emit_call_insn (insn);
9319 CALL_INSN_FUNCTION_USAGE (insn) =
9320 gen_rtx_EXPR_LIST (VOIDmode,
9321 gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
9322 CALL_INSN_FUNCTION_USAGE (insn));
9324 /* Return 1 to tell the caller that we've generated the call
9329 /* Return 0 to let the caller generate the call insn. */
9333 /* An entry in the mips16 constant pool. VALUE is the pool constant,
9334 MODE is its mode, and LABEL is the CODE_LABEL associated with it. */
9336 struct mips16_constant {
9337 struct mips16_constant *next;
9340 enum machine_mode mode;
9343 /* Information about an incomplete mips16 constant pool. FIRST is the
9344 first constant, HIGHEST_ADDRESS is the highest address that the first
9345 byte of the pool can have, and INSN_ADDRESS is the current instruction
9348 struct mips16_constant_pool {
9349 struct mips16_constant *first;
9350 int highest_address;
9354 /* Add constant VALUE to POOL and return its label. MODE is the
9355 value's mode (used for CONST_INTs, etc.). */
9358 add_constant (struct mips16_constant_pool *pool,
9359 rtx value, enum machine_mode mode)
9361 struct mips16_constant **p, *c;
9362 bool first_of_size_p;
9364 /* See whether the constant is already in the pool. If so, return the
9365 existing label, otherwise leave P pointing to the place where the
9366 constant should be added.
9368 Keep the pool sorted in increasing order of mode size so that we can
9369 reduce the number of alignments needed. */
9370 first_of_size_p = true;
9371 for (p = &pool->first; *p != 0; p = &(*p)->next)
9373 if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
9375 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
9377 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
9378 first_of_size_p = false;
9381 /* In the worst case, the constant needed by the earliest instruction
9382 will end up at the end of the pool. The entire pool must then be
9383 accessible from that instruction.
9385 When adding the first constant, set the pool's highest address to
9386 the address of the first out-of-range byte. Adjust this address
9387 downwards each time a new constant is added. */
9388 if (pool->first == 0)
9389 /* For pc-relative lw, addiu and daddiu instructions, the base PC value
9390 is the address of the instruction with the lowest two bits clear.
9391 The base PC value for ld has the lowest three bits clear. Assume
9392 the worst case here. */
9393 pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
9394 pool->highest_address -= GET_MODE_SIZE (mode);
9395 if (first_of_size_p)
9396 /* Take into account the worst possible padding due to alignment. */
9397 pool->highest_address -= GET_MODE_SIZE (mode) - 1;
9399 /* Create a new entry. */
9400 c = (struct mips16_constant *) xmalloc (sizeof *c);
9403 c->label = gen_label_rtx ();
9410 /* Output constant VALUE after instruction INSN and return the last
9411 instruction emitted. MODE is the mode of the constant. */
9414 dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
9416 switch (GET_MODE_CLASS (mode))
9420 rtx size = GEN_INT (GET_MODE_SIZE (mode));
9421 return emit_insn_after (gen_consttable_int (value, size), insn);
9425 return emit_insn_after (gen_consttable_float (value), insn);
9427 case MODE_VECTOR_FLOAT:
9428 case MODE_VECTOR_INT:
9431 for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
9432 insn = dump_constants_1 (GET_MODE_INNER (mode),
9433 CONST_VECTOR_ELT (value, i), insn);
9443 /* Dump out the constants in CONSTANTS after INSN. */
9446 dump_constants (struct mips16_constant *constants, rtx insn)
9448 struct mips16_constant *c, *next;
9452 for (c = constants; c != NULL; c = next)
9454 /* If necessary, increase the alignment of PC. */
9455 if (align < GET_MODE_SIZE (c->mode))
9457 int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
9458 insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
9460 align = GET_MODE_SIZE (c->mode);
9462 insn = emit_label_after (c->label, insn);
9463 insn = dump_constants_1 (c->mode, c->value, insn);
9469 emit_barrier_after (insn);
9472 /* Return the length of instruction INSN. */
9475 mips16_insn_length (rtx insn)
9479 rtx body = PATTERN (insn);
9480 if (GET_CODE (body) == ADDR_VEC)
9481 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
9482 if (GET_CODE (body) == ADDR_DIFF_VEC)
9483 return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
9485 return get_attr_length (insn);
9488 /* Rewrite *X so that constant pool references refer to the constant's
9489 label instead. DATA points to the constant pool structure. */
9492 mips16_rewrite_pool_refs (rtx *x, void *data)
9494 struct mips16_constant_pool *pool = data;
9495 if (GET_CODE (*x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (*x))
9496 *x = gen_rtx_LABEL_REF (Pmode, add_constant (pool,
9497 get_pool_constant (*x),
9498 get_pool_mode (*x)));
9502 /* Build MIPS16 constant pools. */
9505 mips16_lay_out_constants (void)
9507 struct mips16_constant_pool pool;
9511 memset (&pool, 0, sizeof (pool));
9512 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
9514 /* Rewrite constant pool references in INSN. */
9516 for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &pool);
9518 pool.insn_address += mips16_insn_length (insn);
9520 if (pool.first != NULL)
9522 /* If there are no natural barriers between the first user of
9523 the pool and the highest acceptable address, we'll need to
9524 create a new instruction to jump around the constant pool.
9525 In the worst case, this instruction will be 4 bytes long.
9527 If it's too late to do this transformation after INSN,
9528 do it immediately before INSN. */
9529 if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
9533 label = gen_label_rtx ();
9535 jump = emit_jump_insn_before (gen_jump (label), insn);
9536 JUMP_LABEL (jump) = label;
9537 LABEL_NUSES (label) = 1;
9538 barrier = emit_barrier_after (jump);
9540 emit_label_after (label, barrier);
9541 pool.insn_address += 4;
9544 /* See whether the constant pool is now out of range of the first
9545 user. If so, output the constants after the previous barrier.
9546 Note that any instructions between BARRIER and INSN (inclusive)
9547 will use negative offsets to refer to the pool. */
9548 if (pool.insn_address > pool.highest_address)
9550 dump_constants (pool.first, barrier);
9554 else if (BARRIER_P (insn))
9558 dump_constants (pool.first, get_last_insn ());
9561 /* A temporary variable used by for_each_rtx callbacks, etc. */
9562 static rtx mips_sim_insn;
9564 /* A structure representing the state of the processor pipeline.
9565 Used by the mips_sim_* family of functions. */
9567 /* The maximum number of instructions that can be issued in a cycle.
9568 (Caches mips_issue_rate.) */
9569 unsigned int issue_rate;
9571 /* The current simulation time. */
9574 /* How many more instructions can be issued in the current cycle. */
9575 unsigned int insns_left;
9577 /* LAST_SET[X].INSN is the last instruction to set register X.
9578 LAST_SET[X].TIME is the time at which that instruction was issued.
9579 INSN is null if no instruction has yet set register X. */
9583 } last_set[FIRST_PSEUDO_REGISTER];
9585 /* The pipeline's current DFA state. */
9589 /* Reset STATE to the initial simulation state. */
9592 mips_sim_reset (struct mips_sim *state)
9595 state->insns_left = state->issue_rate;
9596 memset (&state->last_set, 0, sizeof (state->last_set));
9597 state_reset (state->dfa_state);
9600 /* Initialize STATE before its first use. DFA_STATE points to an
9601 allocated but uninitialized DFA state. */
9604 mips_sim_init (struct mips_sim *state, state_t dfa_state)
9606 state->issue_rate = mips_issue_rate ();
9607 state->dfa_state = dfa_state;
9608 mips_sim_reset (state);
9611 /* Advance STATE by one clock cycle. */
9614 mips_sim_next_cycle (struct mips_sim *state)
9617 state->insns_left = state->issue_rate;
9618 state_transition (state->dfa_state, 0);
9621 /* Advance simulation state STATE until instruction INSN can read
9625 mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
9629 for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
9630 if (state->last_set[REGNO (reg) + i].insn != 0)
9634 t = state->last_set[REGNO (reg) + i].time;
9635 t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
9636 while (state->time < t)
9637 mips_sim_next_cycle (state);
9641 /* A for_each_rtx callback. If *X is a register, advance simulation state
9642 DATA until mips_sim_insn can read the register's value. */
9645 mips_sim_wait_regs_2 (rtx *x, void *data)
9648 mips_sim_wait_reg (data, mips_sim_insn, *x);
9652 /* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X. */
9655 mips_sim_wait_regs_1 (rtx *x, void *data)
9657 for_each_rtx (x, mips_sim_wait_regs_2, data);
9660 /* Advance simulation state STATE until all of INSN's register
9661 dependencies are satisfied. */
9664 mips_sim_wait_regs (struct mips_sim *state, rtx insn)
9666 mips_sim_insn = insn;
9667 note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
9670 /* Advance simulation state STATE until the units required by
9671 instruction INSN are available. */
9674 mips_sim_wait_units (struct mips_sim *state, rtx insn)
9678 tmp_state = alloca (state_size ());
9679 while (state->insns_left == 0
9680 || (memcpy (tmp_state, state->dfa_state, state_size ()),
9681 state_transition (tmp_state, insn) >= 0))
9682 mips_sim_next_cycle (state);
9685 /* Advance simulation state STATE until INSN is ready to issue. */
9688 mips_sim_wait_insn (struct mips_sim *state, rtx insn)
9690 mips_sim_wait_regs (state, insn);
9691 mips_sim_wait_units (state, insn);
9694 /* mips_sim_insn has just set X. Update the LAST_SET array
9695 in simulation state DATA. */
9698 mips_sim_record_set (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
9700 struct mips_sim *state;
9705 for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
9707 state->last_set[REGNO (x) + i].insn = mips_sim_insn;
9708 state->last_set[REGNO (x) + i].time = state->time;
9712 /* Issue instruction INSN in scheduler state STATE. Assume that INSN
9713 can issue immediately (i.e., that mips_sim_wait_insn has already
9717 mips_sim_issue_insn (struct mips_sim *state, rtx insn)
9719 state_transition (state->dfa_state, insn);
9720 state->insns_left--;
9722 mips_sim_insn = insn;
9723 note_stores (PATTERN (insn), mips_sim_record_set, state);
9726 /* Simulate issuing a NOP in state STATE. */
9729 mips_sim_issue_nop (struct mips_sim *state)
9731 if (state->insns_left == 0)
9732 mips_sim_next_cycle (state);
9733 state->insns_left--;
9736 /* Update simulation state STATE so that it's ready to accept the instruction
9737 after INSN. INSN should be part of the main rtl chain, not a member of a
9741 mips_sim_finish_insn (struct mips_sim *state, rtx insn)
9743 /* If INSN is a jump with an implicit delay slot, simulate a nop. */
9745 mips_sim_issue_nop (state);
9747 switch (GET_CODE (SEQ_BEGIN (insn)))
9751 /* We can't predict the processor state after a call or label. */
9752 mips_sim_reset (state);
9756 /* The delay slots of branch likely instructions are only executed
9757 when the branch is taken. Therefore, if the caller has simulated
9758 the delay slot instruction, STATE does not really reflect the state
9759 of the pipeline for the instruction after the delay slot. Also,
9760 branch likely instructions tend to incur a penalty when not taken,
9761 so there will probably be an extra delay between the branch and
9762 the instruction after the delay slot. */
9763 if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
9764 mips_sim_reset (state);
9772 /* The VR4130 pipeline issues aligned pairs of instructions together,
9773 but it stalls the second instruction if it depends on the first.
9774 In order to cut down the amount of logic required, this dependence
9775 check is not based on a full instruction decode. Instead, any non-SPECIAL
9776 instruction is assumed to modify the register specified by bits 20-16
9777 (which is usually the "rt" field).
9779 In beq, beql, bne and bnel instructions, the rt field is actually an
9780 input, so we can end up with a false dependence between the branch
9781 and its delay slot. If this situation occurs in instruction INSN,
9782 try to avoid it by swapping rs and rt. */
9785 vr4130_avoid_branch_rt_conflict (rtx insn)
9789 first = SEQ_BEGIN (insn);
9790 second = SEQ_END (insn);
9792 && NONJUMP_INSN_P (second)
9793 && GET_CODE (PATTERN (first)) == SET
9794 && GET_CODE (SET_DEST (PATTERN (first))) == PC
9795 && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
9797 /* Check for the right kind of condition. */
9798 rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
9799 if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
9800 && REG_P (XEXP (cond, 0))
9801 && REG_P (XEXP (cond, 1))
9802 && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
9803 && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
9805 /* SECOND mentions the rt register but not the rs register. */
9806 rtx tmp = XEXP (cond, 0);
9807 XEXP (cond, 0) = XEXP (cond, 1);
9808 XEXP (cond, 1) = tmp;
9813 /* Implement -mvr4130-align. Go through each basic block and simulate the
9814 processor pipeline. If we find that a pair of instructions could execute
9815 in parallel, and the first of those instruction is not 8-byte aligned,
9816 insert a nop to make it aligned. */
9819 vr4130_align_insns (void)
9821 struct mips_sim state;
9822 rtx insn, subinsn, last, last2, next;
9827 /* LAST is the last instruction before INSN to have a nonzero length.
9828 LAST2 is the last such instruction before LAST. */
9832 /* ALIGNED_P is true if INSN is known to be at an aligned address. */
9835 mips_sim_init (&state, alloca (state_size ()));
9836 for (insn = get_insns (); insn != 0; insn = next)
9838 unsigned int length;
9840 next = NEXT_INSN (insn);
9842 /* See the comment above vr4130_avoid_branch_rt_conflict for details.
9843 This isn't really related to the alignment pass, but we do it on
9844 the fly to avoid a separate instruction walk. */
9845 vr4130_avoid_branch_rt_conflict (insn);
9847 if (USEFUL_INSN_P (insn))
9848 FOR_EACH_SUBINSN (subinsn, insn)
9850 mips_sim_wait_insn (&state, subinsn);
9852 /* If we want this instruction to issue in parallel with the
9853 previous one, make sure that the previous instruction is
9854 aligned. There are several reasons why this isn't worthwhile
9855 when the second instruction is a call:
9857 - Calls are less likely to be performance critical,
9858 - There's a good chance that the delay slot can execute
9859 in parallel with the call.
9860 - The return address would then be unaligned.
9862 In general, if we're going to insert a nop between instructions
9863 X and Y, it's better to insert it immediately after X. That
9864 way, if the nop makes Y aligned, it will also align any labels
9866 if (state.insns_left != state.issue_rate
9867 && !CALL_P (subinsn))
9869 if (subinsn == SEQ_BEGIN (insn) && aligned_p)
9871 /* SUBINSN is the first instruction in INSN and INSN is
9872 aligned. We want to align the previous instruction
9873 instead, so insert a nop between LAST2 and LAST.
9875 Note that LAST could be either a single instruction
9876 or a branch with a delay slot. In the latter case,
9877 LAST, like INSN, is already aligned, but the delay
9878 slot must have some extra delay that stops it from
9879 issuing at the same time as the branch. We therefore
9880 insert a nop before the branch in order to align its
9882 emit_insn_after (gen_nop (), last2);
9885 else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
9887 /* SUBINSN is the delay slot of INSN, but INSN is
9888 currently unaligned. Insert a nop between
9889 LAST and INSN to align it. */
9890 emit_insn_after (gen_nop (), last);
9894 mips_sim_issue_insn (&state, subinsn);
9896 mips_sim_finish_insn (&state, insn);
9898 /* Update LAST, LAST2 and ALIGNED_P for the next instruction. */
9899 length = get_attr_length (insn);
9902 /* If the instruction is an asm statement or multi-instruction
9903 mips.md patern, the length is only an estimate. Insert an
9904 8 byte alignment after it so that the following instructions
9905 can be handled correctly. */
9906 if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
9907 && (recog_memoized (insn) < 0 || length >= 8))
9909 next = emit_insn_after (gen_align (GEN_INT (3)), insn);
9910 next = NEXT_INSN (next);
9911 mips_sim_next_cycle (&state);
9914 else if (length & 4)
9915 aligned_p = !aligned_p;
9920 /* See whether INSN is an aligned label. */
9921 if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
9927 /* Subroutine of mips_reorg. If there is a hazard between INSN
9928 and a previous instruction, avoid it by inserting nops after
9931 *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
9932 this point. If *DELAYED_REG is non-null, INSN must wait a cycle
9933 before using the value of that register. *HILO_DELAY counts the
9934 number of instructions since the last hilo hazard (that is,
9935 the number of instructions since the last mflo or mfhi).
9937 After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
9938 for the next instruction.
9940 LO_REG is an rtx for the LO register, used in dependence checking. */
9943 mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
9944 rtx *delayed_reg, rtx lo_reg)
9952 pattern = PATTERN (insn);
9954 /* Do not put the whole function in .set noreorder if it contains
9955 an asm statement. We don't know whether there will be hazards
9956 between the asm statement and the gcc-generated code. */
9957 if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
9958 cfun->machine->all_noreorder_p = false;
9960 /* Ignore zero-length instructions (barriers and the like). */
9961 ninsns = get_attr_length (insn) / 4;
9965 /* Work out how many nops are needed. Note that we only care about
9966 registers that are explicitly mentioned in the instruction's pattern.
9967 It doesn't matter that calls use the argument registers or that they
9968 clobber hi and lo. */
9969 if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
9970 nops = 2 - *hilo_delay;
9971 else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
9976 /* Insert the nops between this instruction and the previous one.
9977 Each new nop takes us further from the last hilo hazard. */
9978 *hilo_delay += nops;
9980 emit_insn_after (gen_hazard_nop (), after);
9982 /* Set up the state for the next instruction. */
9983 *hilo_delay += ninsns;
9985 if (INSN_CODE (insn) >= 0)
9986 switch (get_attr_hazard (insn))
9996 set = single_set (insn);
9997 gcc_assert (set != 0);
9998 *delayed_reg = SET_DEST (set);
10004 /* Go through the instruction stream and insert nops where necessary.
10005 See if the whole function can then be put into .set noreorder &
10009 mips_avoid_hazards (void)
10011 rtx insn, last_insn, lo_reg, delayed_reg;
10014 /* Force all instructions to be split into their final form. */
10015 split_all_insns_noflow ();
10017 /* Recalculate instruction lengths without taking nops into account. */
10018 cfun->machine->ignore_hazard_length_p = true;
10019 shorten_branches (get_insns ());
10021 cfun->machine->all_noreorder_p = true;
10023 /* Profiled functions can't be all noreorder because the profiler
10024 support uses assembler macros. */
10025 if (current_function_profile)
10026 cfun->machine->all_noreorder_p = false;
10028 /* Code compiled with -mfix-vr4120 can't be all noreorder because
10029 we rely on the assembler to work around some errata. */
10030 if (TARGET_FIX_VR4120)
10031 cfun->machine->all_noreorder_p = false;
10033 /* The same is true for -mfix-vr4130 if we might generate mflo or
10034 mfhi instructions. Note that we avoid using mflo and mfhi if
10035 the VR4130 macc and dmacc instructions are available instead;
10036 see the *mfhilo_{si,di}_macc patterns. */
10037 if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
10038 cfun->machine->all_noreorder_p = false;
10043 lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
10045 for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
10048 if (GET_CODE (PATTERN (insn)) == SEQUENCE)
10049 for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
10050 mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
10051 &hilo_delay, &delayed_reg, lo_reg);
10053 mips_avoid_hazard (last_insn, insn, &hilo_delay,
10054 &delayed_reg, lo_reg);
10061 /* Implement TARGET_MACHINE_DEPENDENT_REORG. */
10067 mips16_lay_out_constants ();
10068 else if (TARGET_EXPLICIT_RELOCS)
10070 if (mips_flag_delayed_branch)
10071 dbr_schedule (get_insns ());
10072 mips_avoid_hazards ();
10073 if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
10074 vr4130_align_insns ();
10078 /* This function does three things:
10080 - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
10081 - Register the mips16 hardware floating point stubs.
10082 - Register the gofast functions if selected using --enable-gofast. */
10084 #include "config/gofast.h"
10087 mips_init_libfuncs (void)
10089 if (TARGET_FIX_VR4120)
10091 set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
10092 set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
10095 if (mips16_hard_float)
10097 set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
10098 set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
10099 set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
10100 set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
10102 set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
10103 set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
10104 set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
10105 set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
10106 set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
10107 set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
10109 set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
10110 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
10112 if (TARGET_DOUBLE_FLOAT)
10114 set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
10115 set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
10116 set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
10117 set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
10119 set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
10120 set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
10121 set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
10122 set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
10123 set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
10124 set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
10126 set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
10127 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
10129 set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
10130 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
10134 gofast_maybe_init_libfuncs ();
10137 /* Return a number assessing the cost of moving a register in class
10138 FROM to class TO. The classes are expressed using the enumeration
10139 values such as `GENERAL_REGS'. A value of 2 is the default; other
10140 values are interpreted relative to that.
10142 It is not required that the cost always equal 2 when FROM is the
10143 same as TO; on some machines it is expensive to move between
10144 registers if they are not general registers.
10146 If reload sees an insn consisting of a single `set' between two
10147 hard registers, and if `REGISTER_MOVE_COST' applied to their
10148 classes returns a value of 2, reload does not check to ensure that
10149 the constraints of the insn are met. Setting a cost of other than
10150 2 will allow reload to verify that the constraints are met. You
10151 should do this if the `movM' pattern's constraints do not allow
10154 ??? We make the cost of moving from HI/LO into general
10155 registers the same as for one of moving general registers to
10156 HI/LO for TARGET_MIPS16 in order to prevent allocating a
10157 pseudo to HI/LO. This might hurt optimizations though, it
10158 isn't clear if it is wise. And it might not work in all cases. We
10159 could solve the DImode LO reg problem by using a multiply, just
10160 like reload_{in,out}si. We could solve the SImode/HImode HI reg
10161 problem by using divide instructions. divu puts the remainder in
10162 the HI reg, so doing a divide by -1 will move the value in the HI
10163 reg for all values except -1. We could handle that case by using a
10164 signed divide, e.g. -1 / 2 (or maybe 1 / -2?). We'd have to emit
10165 a compare/branch to test the input value to see which instruction
10166 we need to use. This gets pretty messy, but it is feasible. */
10169 mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
10170 enum reg_class to, enum reg_class from)
10172 if (from == M16_REGS && reg_class_subset_p (to, GENERAL_REGS))
10174 else if (from == M16_NA_REGS && reg_class_subset_p (to, GENERAL_REGS))
10176 else if (reg_class_subset_p (from, GENERAL_REGS))
10178 if (to == M16_REGS)
10180 else if (to == M16_NA_REGS)
10182 else if (reg_class_subset_p (to, GENERAL_REGS))
10189 else if (to == FP_REGS)
10191 else if (reg_class_subset_p (to, ACC_REGS))
10198 else if (reg_class_subset_p (to, ALL_COP_REGS))
10203 else if (from == FP_REGS)
10205 if (reg_class_subset_p (to, GENERAL_REGS))
10207 else if (to == FP_REGS)
10209 else if (to == ST_REGS)
10212 else if (reg_class_subset_p (from, ACC_REGS))
10214 if (reg_class_subset_p (to, GENERAL_REGS))
10222 else if (from == ST_REGS && reg_class_subset_p (to, GENERAL_REGS))
10224 else if (reg_class_subset_p (from, ALL_COP_REGS))
10230 ??? What cases are these? Shouldn't we return 2 here? */
10235 /* Return the length of INSN. LENGTH is the initial length computed by
10236 attributes in the machine-description file. */
10239 mips_adjust_insn_length (rtx insn, int length)
10241 /* A unconditional jump has an unfilled delay slot if it is not part
10242 of a sequence. A conditional jump normally has a delay slot, but
10243 does not on MIPS16. */
10244 if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
10247 /* See how many nops might be needed to avoid hardware hazards. */
10248 if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
10249 switch (get_attr_hazard (insn))
10263 /* All MIPS16 instructions are a measly two bytes. */
10271 /* Return an asm sequence to start a noat block and load the address
10272 of a label into $1. */
10275 mips_output_load_label (void)
10277 if (TARGET_EXPLICIT_RELOCS)
10281 return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
10284 return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
10287 if (ISA_HAS_LOAD_DELAY)
10288 return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
10289 return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
10293 if (Pmode == DImode)
10294 return "%[dla\t%@,%0";
10296 return "%[la\t%@,%0";
10300 /* Return the assembly code for INSN, which has the operands given by
10301 OPERANDS, and which branches to OPERANDS[1] if some condition is true.
10302 BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
10303 is in range of a direct branch. BRANCH_IF_FALSE is an inverted
10304 version of BRANCH_IF_TRUE. */
10307 mips_output_conditional_branch (rtx insn, rtx *operands,
10308 const char *branch_if_true,
10309 const char *branch_if_false)
10311 unsigned int length;
10312 rtx taken, not_taken;
10314 length = get_attr_length (insn);
10317 /* Just a simple conditional branch. */
10318 mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
10319 return branch_if_true;
10322 /* Generate a reversed branch around a direct jump. This fallback does
10323 not use branch-likely instructions. */
10324 mips_branch_likely = false;
10325 not_taken = gen_label_rtx ();
10326 taken = operands[1];
10328 /* Generate the reversed branch to NOT_TAKEN. */
10329 operands[1] = not_taken;
10330 output_asm_insn (branch_if_false, operands);
10332 /* If INSN has a delay slot, we must provide delay slots for both the
10333 branch to NOT_TAKEN and the conditional jump. We must also ensure
10334 that INSN's delay slot is executed in the appropriate cases. */
10335 if (final_sequence)
10337 /* This first delay slot will always be executed, so use INSN's
10338 delay slot if is not annulled. */
10339 if (!INSN_ANNULLED_BRANCH_P (insn))
10341 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10342 asm_out_file, optimize, 1, NULL);
10343 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
10346 output_asm_insn ("nop", 0);
10347 fprintf (asm_out_file, "\n");
10350 /* Output the unconditional branch to TAKEN. */
10352 output_asm_insn ("j\t%0%/", &taken);
10355 output_asm_insn (mips_output_load_label (), &taken);
10356 output_asm_insn ("jr\t%@%]%/", 0);
10359 /* Now deal with its delay slot; see above. */
10360 if (final_sequence)
10362 /* This delay slot will only be executed if the branch is taken.
10363 Use INSN's delay slot if is annulled. */
10364 if (INSN_ANNULLED_BRANCH_P (insn))
10366 final_scan_insn (XVECEXP (final_sequence, 0, 1),
10367 asm_out_file, optimize, 1, NULL);
10368 INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
10371 output_asm_insn ("nop", 0);
10372 fprintf (asm_out_file, "\n");
10375 /* Output NOT_TAKEN. */
10376 (*targetm.asm_out.internal_label) (asm_out_file, "L",
10377 CODE_LABEL_NUMBER (not_taken));
10381 /* Return the assembly code for INSN, which branches to OPERANDS[1]
10382 if some ordered condition is true. The condition is given by
10383 OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
10384 OPERANDS[0]. OPERANDS[2] is the comparison's first operand;
10385 its second is always zero. */
10388 mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
10390 const char *branch[2];
10392 /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
10393 Make BRANCH[0] branch on the inverse condition. */
10394 switch (GET_CODE (operands[0]))
10396 /* These cases are equivalent to comparisons against zero. */
10398 inverted_p = !inverted_p;
10399 /* Fall through. */
10401 branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
10402 branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
10405 /* These cases are always true or always false. */
10407 inverted_p = !inverted_p;
10408 /* Fall through. */
10410 branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
10411 branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
10415 branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
10416 branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
10419 return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
10422 /* Used to output div or ddiv instruction DIVISION, which has the operands
10423 given by OPERANDS. Add in a divide-by-zero check if needed.
10425 When working around R4000 and R4400 errata, we need to make sure that
10426 the division is not immediately followed by a shift[1][2]. We also
10427 need to stop the division from being put into a branch delay slot[3].
10428 The easiest way to avoid both problems is to add a nop after the
10429 division. When a divide-by-zero check is needed, this nop can be
10430 used to fill the branch delay slot.
10432 [1] If a double-word or a variable shift executes immediately
10433 after starting an integer division, the shift may give an
10434 incorrect result. See quotations of errata #16 and #28 from
10435 "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10436 in mips.md for details.
10438 [2] A similar bug to [1] exists for all revisions of the
10439 R4000 and the R4400 when run in an MC configuration.
10440 From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
10442 "19. In this following sequence:
10444 ddiv (or ddivu or div or divu)
10445 dsll32 (or dsrl32, dsra32)
10447 if an MPT stall occurs, while the divide is slipping the cpu
10448 pipeline, then the following double shift would end up with an
10451 Workaround: The compiler needs to avoid generating any
10452 sequence with divide followed by extended double shift."
10454 This erratum is also present in "MIPS R4400MC Errata, Processor
10455 Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
10456 & 3.0" as errata #10 and #4, respectively.
10458 [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
10459 (also valid for MIPS R4000MC processors):
10461 "52. R4000SC: This bug does not apply for the R4000PC.
10463 There are two flavors of this bug:
10465 1) If the instruction just after divide takes an RF exception
10466 (tlb-refill, tlb-invalid) and gets an instruction cache
10467 miss (both primary and secondary) and the line which is
10468 currently in secondary cache at this index had the first
10469 data word, where the bits 5..2 are set, then R4000 would
10470 get a wrong result for the div.
10475 ------------------- # end-of page. -tlb-refill
10480 ------------------- # end-of page. -tlb-invalid
10483 2) If the divide is in the taken branch delay slot, where the
10484 target takes RF exception and gets an I-cache miss for the
10485 exception vector or where I-cache miss occurs for the
10486 target address, under the above mentioned scenarios, the
10487 div would get wrong results.
10490 j r2 # to next page mapped or unmapped
10491 div r8,r9 # this bug would be there as long
10492 # as there is an ICache miss and
10493 nop # the "data pattern" is present
10496 beq r0, r0, NextPage # to Next page
10500 This bug is present for div, divu, ddiv, and ddivu
10503 Workaround: For item 1), OS could make sure that the next page
10504 after the divide instruction is also mapped. For item 2), the
10505 compiler could make sure that the divide instruction is not in
10506 the branch delay slot."
10508 These processors have PRId values of 0x00004220 and 0x00004300 for
10509 the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400. */
10512 mips_output_division (const char *division, rtx *operands)
10517 if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
10519 output_asm_insn (s, operands);
10522 if (TARGET_CHECK_ZERO_DIV)
10526 output_asm_insn (s, operands);
10527 s = "bnez\t%2,1f\n\tbreak\t7\n1:";
10529 else if (GENERATE_DIVIDE_TRAPS)
10531 output_asm_insn (s, operands);
10532 s = "teq\t%2,%.,7";
10536 output_asm_insn ("%(bne\t%2,%.,1f", operands);
10537 output_asm_insn (s, operands);
10538 s = "break\t7%)\n1:";
10544 /* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
10545 with a final "000" replaced by "k". Ignore case.
10547 Note: this function is shared between GCC and GAS. */
10550 mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
10552 while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
10553 given++, canonical++;
10555 return ((*given == 0 && *canonical == 0)
10556 || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
10560 /* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
10561 CPU name. We've traditionally allowed a lot of variation here.
10563 Note: this function is shared between GCC and GAS. */
10566 mips_matching_cpu_name_p (const char *canonical, const char *given)
10568 /* First see if the name matches exactly, or with a final "000"
10569 turned into "k". */
10570 if (mips_strict_matching_cpu_name_p (canonical, given))
10573 /* If not, try comparing based on numerical designation alone.
10574 See if GIVEN is an unadorned number, or 'r' followed by a number. */
10575 if (TOLOWER (*given) == 'r')
10577 if (!ISDIGIT (*given))
10580 /* Skip over some well-known prefixes in the canonical name,
10581 hoping to find a number there too. */
10582 if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
10584 else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
10586 else if (TOLOWER (canonical[0]) == 'r')
10589 return mips_strict_matching_cpu_name_p (canonical, given);
10593 /* Return the mips_cpu_info entry for the processor or ISA given
10594 by CPU_STRING. Return null if the string isn't recognized.
10596 A similar function exists in GAS. */
10598 static const struct mips_cpu_info *
10599 mips_parse_cpu (const char *cpu_string)
10601 const struct mips_cpu_info *p;
10604 /* In the past, we allowed upper-case CPU names, but it doesn't
10605 work well with the multilib machinery. */
10606 for (s = cpu_string; *s != 0; s++)
10609 warning (0, "the cpu name must be lower case");
10613 /* 'from-abi' selects the most compatible architecture for the given
10614 ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs. For the
10615 EABIs, we have to decide whether we're using the 32-bit or 64-bit
10616 version. Look first at the -mgp options, if given, otherwise base
10617 the choice on MASK_64BIT in TARGET_DEFAULT. */
10618 if (strcasecmp (cpu_string, "from-abi") == 0)
10619 return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
10620 : ABI_NEEDS_64BIT_REGS ? 3
10621 : (TARGET_64BIT ? 3 : 1));
10623 /* 'default' has traditionally been a no-op. Probably not very useful. */
10624 if (strcasecmp (cpu_string, "default") == 0)
10627 for (p = mips_cpu_info_table; p->name != 0; p++)
10628 if (mips_matching_cpu_name_p (p->name, cpu_string))
10635 /* Return the processor associated with the given ISA level, or null
10636 if the ISA isn't valid. */
10638 static const struct mips_cpu_info *
10639 mips_cpu_info_from_isa (int isa)
10641 const struct mips_cpu_info *p;
10643 for (p = mips_cpu_info_table; p->name != 0; p++)
10650 /* Implement HARD_REGNO_NREGS. The size of FP registers is controlled
10651 by UNITS_PER_FPREG. The size of FP status registers is always 4, because
10652 they only hold condition code modes, and CCmode is always considered to
10653 be 4 bytes wide. All other registers are word sized. */
10656 mips_hard_regno_nregs (int regno, enum machine_mode mode)
10658 if (ST_REG_P (regno))
10659 return ((GET_MODE_SIZE (mode) + 3) / 4);
10660 else if (! FP_REG_P (regno))
10661 return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
10663 return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
10666 /* Implement TARGET_RETURN_IN_MEMORY. Under the old (i.e., 32 and O64 ABIs)
10667 all BLKmode objects are returned in memory. Under the new (N32 and
10668 64-bit MIPS ABIs) small structures are returned in a register.
10669 Objects with varying size must still be returned in memory, of
10673 mips_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
10676 return (TYPE_MODE (type) == BLKmode);
10678 return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
10679 || (int_size_in_bytes (type) == -1));
10683 mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
10685 return !TARGET_OLDABI;
10688 /* Return true if INSN is a multiply-add or multiply-subtract
10689 instruction and PREV assigns to the accumulator operand. */
10692 mips_linked_madd_p (rtx prev, rtx insn)
10696 x = single_set (insn);
10702 if (GET_CODE (x) == PLUS
10703 && GET_CODE (XEXP (x, 0)) == MULT
10704 && reg_set_p (XEXP (x, 1), prev))
10707 if (GET_CODE (x) == MINUS
10708 && GET_CODE (XEXP (x, 1)) == MULT
10709 && reg_set_p (XEXP (x, 0), prev))
10715 /* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
10716 that may clobber hi or lo. */
10718 static rtx mips_macc_chains_last_hilo;
10720 /* A TUNE_MACC_CHAINS helper function. Record that instruction INSN has
10721 been scheduled, updating mips_macc_chains_last_hilo appropriately. */
10724 mips_macc_chains_record (rtx insn)
10726 if (get_attr_may_clobber_hilo (insn))
10727 mips_macc_chains_last_hilo = insn;
10730 /* A TUNE_MACC_CHAINS helper function. Search ready queue READY, which
10731 has NREADY elements, looking for a multiply-add or multiply-subtract
10732 instruction that is cumulative with mips_macc_chains_last_hilo.
10733 If there is one, promote it ahead of anything else that might
10734 clobber hi or lo. */
10737 mips_macc_chains_reorder (rtx *ready, int nready)
10741 if (mips_macc_chains_last_hilo != 0)
10742 for (i = nready - 1; i >= 0; i--)
10743 if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
10745 for (j = nready - 1; j > i; j--)
10746 if (recog_memoized (ready[j]) >= 0
10747 && get_attr_may_clobber_hilo (ready[j]))
10749 mips_promote_ready (ready, i, j);
10756 /* The last instruction to be scheduled. */
10758 static rtx vr4130_last_insn;
10760 /* A note_stores callback used by vr4130_true_reg_dependence_p. DATA
10761 points to an rtx that is initially an instruction. Nullify the rtx
10762 if the instruction uses the value of register X. */
10765 vr4130_true_reg_dependence_p_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
10767 rtx *insn_ptr = data;
10770 && reg_referenced_p (x, PATTERN (*insn_ptr)))
10774 /* Return true if there is true register dependence between vr4130_last_insn
10778 vr4130_true_reg_dependence_p (rtx insn)
10780 note_stores (PATTERN (vr4130_last_insn),
10781 vr4130_true_reg_dependence_p_1, &insn);
10785 /* A TUNE_MIPS4130 helper function. Given that INSN1 is at the head of
10786 the ready queue and that INSN2 is the instruction after it, return
10787 true if it is worth promoting INSN2 ahead of INSN1. Look for cases
10788 in which INSN1 and INSN2 can probably issue in parallel, but for
10789 which (INSN2, INSN1) should be less sensitive to instruction
10790 alignment than (INSN1, INSN2). See 4130.md for more details. */
10793 vr4130_swap_insns_p (rtx insn1, rtx insn2)
10797 /* Check for the following case:
10799 1) there is some other instruction X with an anti dependence on INSN1;
10800 2) X has a higher priority than INSN2; and
10801 3) X is an arithmetic instruction (and thus has no unit restrictions).
10803 If INSN1 is the last instruction blocking X, it would better to
10804 choose (INSN1, X) over (INSN2, INSN1). */
10805 FOR_EACH_DEP_LINK (dep, INSN_FORW_DEPS (insn1))
10806 if (DEP_LINK_KIND (dep) == REG_DEP_ANTI
10807 && INSN_PRIORITY (DEP_LINK_CON (dep)) > INSN_PRIORITY (insn2)
10808 && recog_memoized (DEP_LINK_CON (dep)) >= 0
10809 && get_attr_vr4130_class (DEP_LINK_CON (dep)) == VR4130_CLASS_ALU)
10812 if (vr4130_last_insn != 0
10813 && recog_memoized (insn1) >= 0
10814 && recog_memoized (insn2) >= 0)
10816 /* See whether INSN1 and INSN2 use different execution units,
10817 or if they are both ALU-type instructions. If so, they can
10818 probably execute in parallel. */
10819 enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
10820 enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
10821 if (class1 != class2 || class1 == VR4130_CLASS_ALU)
10823 /* If only one of the instructions has a dependence on
10824 vr4130_last_insn, prefer to schedule the other one first. */
10825 bool dep1 = vr4130_true_reg_dependence_p (insn1);
10826 bool dep2 = vr4130_true_reg_dependence_p (insn2);
10830 /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
10831 is not an ALU-type instruction and if INSN1 uses the same
10832 execution unit. (Note that if this condition holds, we already
10833 know that INSN2 uses a different execution unit.) */
10834 if (class1 != VR4130_CLASS_ALU
10835 && recog_memoized (vr4130_last_insn) >= 0
10836 && class1 == get_attr_vr4130_class (vr4130_last_insn))
10843 /* A TUNE_MIPS4130 helper function. (READY, NREADY) describes a ready
10844 queue with at least two instructions. Swap the first two if
10845 vr4130_swap_insns_p says that it could be worthwhile. */
10848 vr4130_reorder (rtx *ready, int nready)
10850 if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
10851 mips_promote_ready (ready, nready - 2, nready - 1);
10854 /* Remove the instruction at index LOWER from ready queue READY and
10855 reinsert it in front of the instruction at index HIGHER. LOWER must
10859 mips_promote_ready (rtx *ready, int lower, int higher)
10864 new_head = ready[lower];
10865 for (i = lower; i < higher; i++)
10866 ready[i] = ready[i + 1];
10867 ready[i] = new_head;
10870 /* Implement TARGET_SCHED_REORDER. */
10873 mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10874 rtx *ready, int *nreadyp, int cycle)
10876 if (!reload_completed && TUNE_MACC_CHAINS)
10879 mips_macc_chains_last_hilo = 0;
10881 mips_macc_chains_reorder (ready, *nreadyp);
10883 if (reload_completed && TUNE_MIPS4130 && !TARGET_VR4130_ALIGN)
10886 vr4130_last_insn = 0;
10888 vr4130_reorder (ready, *nreadyp);
10890 return mips_issue_rate ();
10893 /* Implement TARGET_SCHED_VARIABLE_ISSUE. */
10896 mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
10897 rtx insn, int more)
10899 switch (GET_CODE (PATTERN (insn)))
10903 /* Don't count USEs and CLOBBERs against the issue rate. */
10908 if (!reload_completed && TUNE_MACC_CHAINS)
10909 mips_macc_chains_record (insn);
10910 vr4130_last_insn = insn;
10916 /* Implement TARGET_SCHED_ADJUST_COST. We assume that anti and output
10917 dependencies have no cost, except on the 20Kc where output-dependence
10918 is treated like input-dependence. */
10921 mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
10922 rtx dep ATTRIBUTE_UNUSED, int cost)
10924 if (REG_NOTE_KIND (link) == REG_DEP_OUTPUT
10927 if (REG_NOTE_KIND (link) != 0)
10932 /* Return the number of instructions that can be issued per cycle. */
10935 mips_issue_rate (void)
10939 case PROCESSOR_74KC:
10940 case PROCESSOR_74KF2_1:
10941 case PROCESSOR_74KF1_1:
10942 case PROCESSOR_74KF3_2:
10943 /* The 74k is not strictly quad-issue cpu, but can be seen as one
10944 by the scheduler. It can issue 1 ALU, 1 AGEN and 2 FPU insns,
10945 but in reality only a maximum of 3 insns can be issued as the
10946 floating point load/stores also require a slot in the AGEN pipe. */
10949 case PROCESSOR_20KC:
10950 case PROCESSOR_R4130:
10951 case PROCESSOR_R5400:
10952 case PROCESSOR_R5500:
10953 case PROCESSOR_R7000:
10954 case PROCESSOR_R9000:
10957 case PROCESSOR_SB1:
10958 case PROCESSOR_SB1A:
10959 /* This is actually 4, but we get better performance if we claim 3.
10960 This is partly because of unwanted speculative code motion with the
10961 larger number, and partly because in most common cases we can't
10962 reach the theoretical max of 4. */
10970 /* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD. This should
10971 be as wide as the scheduling freedom in the DFA. */
10974 mips_multipass_dfa_lookahead (void)
10976 /* Can schedule up to 4 of the 6 function units in any one cycle. */
10983 /* Implements a store data bypass check. We need this because the cprestore
10984 pattern is type store, but defined using an UNSPEC. This UNSPEC causes the
10985 default routine to abort. We just return false for that case. */
10986 /* ??? Should try to give a better result here than assuming false. */
10989 mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
10991 if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
10994 return ! store_data_bypass_p (out_insn, in_insn);
10997 /* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
10998 return the first operand of the associated "pref" or "prefx" insn. */
11001 mips_prefetch_cookie (rtx write, rtx locality)
11003 /* store_streamed / load_streamed. */
11004 if (INTVAL (locality) <= 0)
11005 return GEN_INT (INTVAL (write) + 4);
11007 /* store / load. */
11008 if (INTVAL (locality) <= 2)
11011 /* store_retained / load_retained. */
11012 return GEN_INT (INTVAL (write) + 6);
11015 /* MIPS builtin function support. */
11017 struct builtin_description
11019 /* The code of the main .md file instruction. See mips_builtin_type
11020 for more information. */
11021 enum insn_code icode;
11023 /* The floating-point comparison code to use with ICODE, if any. */
11024 enum mips_fp_condition cond;
11026 /* The name of the builtin function. */
11029 /* Specifies how the function should be expanded. */
11030 enum mips_builtin_type builtin_type;
11032 /* The function's prototype. */
11033 enum mips_function_type function_type;
11035 /* The target flags required for this function. */
11039 /* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
11040 FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields. */
11041 #define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
11042 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
11043 MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
11045 /* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
11047 #define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS) \
11048 { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND, \
11049 "__builtin_mips_" #INSN "_" #COND "_s", \
11050 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS }, \
11051 { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND, \
11052 "__builtin_mips_" #INSN "_" #COND "_d", \
11053 MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
11055 /* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
11056 The lower and upper forms require TARGET_FLAGS while the any and all
11057 forms require MASK_MIPS3D. */
11058 #define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS) \
11059 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11060 "__builtin_mips_any_" #INSN "_" #COND "_ps", \
11061 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
11062 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11063 "__builtin_mips_all_" #INSN "_" #COND "_ps", \
11064 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D }, \
11065 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11066 "__builtin_mips_lower_" #INSN "_" #COND "_ps", \
11067 MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }, \
11068 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11069 "__builtin_mips_upper_" #INSN "_" #COND "_ps", \
11070 MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
11072 /* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s. The functions
11073 require MASK_MIPS3D. */
11074 #define CMP_4S_BUILTINS(INSN, COND) \
11075 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
11076 "__builtin_mips_any_" #INSN "_" #COND "_4s", \
11077 MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11079 { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND, \
11080 "__builtin_mips_all_" #INSN "_" #COND "_4s", \
11081 MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11084 /* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps. The comparison
11085 instruction requires TARGET_FLAGS. */
11086 #define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS) \
11087 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11088 "__builtin_mips_movt_" #INSN "_" #COND "_ps", \
11089 MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11091 { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND, \
11092 "__builtin_mips_movf_" #INSN "_" #COND "_ps", \
11093 MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF, \
11096 /* Define all the builtins related to c.cond.fmt condition COND. */
11097 #define CMP_BUILTINS(COND) \
11098 MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
11099 MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D), \
11100 CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D), \
11101 CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT), \
11102 CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D), \
11103 CMP_4S_BUILTINS (c, COND), \
11104 CMP_4S_BUILTINS (cabs, COND)
11106 static const struct builtin_description mips_bdesc[] =
11108 DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11109 DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11110 DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11111 DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11112 DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
11113 DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11114 DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11115 DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
11117 DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
11118 MASK_PAIRED_SINGLE_FLOAT),
11119 DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11120 DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11121 DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11122 DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11124 DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
11125 DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
11126 DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11127 DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
11128 DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
11129 DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11131 DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
11132 DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
11133 DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
11134 DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
11135 DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
11136 DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
11138 MIPS_FP_CONDITIONS (CMP_BUILTINS)
11141 /* Builtin functions for the SB-1 processor. */
11143 #define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
11145 static const struct builtin_description sb1_bdesc[] =
11147 DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
11150 /* Builtin functions for DSP ASE. */
11152 #define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
11153 #define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
11154 #define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
11155 #define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
11156 #define CODE_FOR_mips_mul_ph CODE_FOR_mulv2hi3
11158 /* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
11159 CODE_FOR_mips_<INSN>. FUNCTION_TYPE and TARGET_FLAGS are
11160 builtin_description fields. */
11161 #define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS) \
11162 { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN, \
11163 MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
11165 /* Define __builtin_mips_bposge<VALUE>. <VALUE> is 32 for the MIPS32 DSP
11166 branch instruction. TARGET_FLAGS is a builtin_description field. */
11167 #define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS) \
11168 { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE, \
11169 MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
11171 static const struct builtin_description dsp_bdesc[] =
11173 DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11174 DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11175 DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11176 DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11177 DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11178 DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11179 DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11180 DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11181 DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11182 DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11183 DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11184 DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11185 DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11186 DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
11187 DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
11188 DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
11189 DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
11190 DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
11191 DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
11192 DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
11193 DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
11194 DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
11195 DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11196 DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11197 DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11198 DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11199 DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11200 DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11201 DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11202 DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
11203 DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
11204 DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11205 DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11206 DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11207 DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
11208 DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11209 DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
11210 DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11211 DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
11212 DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
11213 DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11214 DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
11215 DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
11216 DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
11217 DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
11218 DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
11219 DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
11220 DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11221 DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11222 DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
11223 DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11224 DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11225 DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
11226 DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11227 DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11228 DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
11229 DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
11230 DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11231 DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
11232 DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
11233 DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
11234 DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11235 DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11236 DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
11237 BPOSGE_BUILTIN (32, MASK_DSP),
11239 /* The following are for the MIPS DSP ASE REV 2. */
11240 DIRECT_BUILTIN (absq_s_qb, MIPS_V4QI_FTYPE_V4QI, MASK_DSPR2),
11241 DIRECT_BUILTIN (addu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11242 DIRECT_BUILTIN (addu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11243 DIRECT_BUILTIN (adduh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11244 DIRECT_BUILTIN (adduh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11245 DIRECT_BUILTIN (append, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11246 DIRECT_BUILTIN (balign, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11247 DIRECT_BUILTIN (cmpgdu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11248 DIRECT_BUILTIN (cmpgdu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11249 DIRECT_BUILTIN (cmpgdu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11250 DIRECT_BUILTIN (mul_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11251 DIRECT_BUILTIN (mul_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11252 DIRECT_BUILTIN (mulq_rs_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11253 DIRECT_BUILTIN (mulq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11254 DIRECT_BUILTIN (mulq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11255 DIRECT_BUILTIN (precr_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11256 DIRECT_BUILTIN (precr_sra_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
11257 DIRECT_BUILTIN (precr_sra_r_ph_w, MIPS_V2HI_FTYPE_SI_SI_SI, MASK_DSPR2),
11258 DIRECT_BUILTIN (prepend, MIPS_SI_FTYPE_SI_SI_SI, MASK_DSPR2),
11259 DIRECT_BUILTIN (shra_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
11260 DIRECT_BUILTIN (shra_r_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSPR2),
11261 DIRECT_BUILTIN (shrl_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSPR2),
11262 DIRECT_BUILTIN (subu_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11263 DIRECT_BUILTIN (subu_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11264 DIRECT_BUILTIN (subuh_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11265 DIRECT_BUILTIN (subuh_r_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSPR2),
11266 DIRECT_BUILTIN (addqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11267 DIRECT_BUILTIN (addqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11268 DIRECT_BUILTIN (addqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11269 DIRECT_BUILTIN (addqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11270 DIRECT_BUILTIN (subqh_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11271 DIRECT_BUILTIN (subqh_r_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSPR2),
11272 DIRECT_BUILTIN (subqh_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2),
11273 DIRECT_BUILTIN (subqh_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSPR2)
11276 static const struct builtin_description dsp_32only_bdesc[] =
11278 DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11279 DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11280 DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11281 DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
11282 DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11283 DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11284 DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11285 DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
11286 DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
11287 DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11288 DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11289 DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11290 DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
11291 DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11292 DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11293 DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11294 DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11295 DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11296 DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
11297 DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
11298 DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
11300 /* The following are for the MIPS DSP ASE REV 2. */
11301 DIRECT_BUILTIN (dpa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11302 DIRECT_BUILTIN (dps_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11303 DIRECT_BUILTIN (madd, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
11304 DIRECT_BUILTIN (maddu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
11305 DIRECT_BUILTIN (msub, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSPR2),
11306 DIRECT_BUILTIN (msubu, MIPS_DI_FTYPE_DI_USI_USI, MASK_DSPR2),
11307 DIRECT_BUILTIN (mulsa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11308 DIRECT_BUILTIN (mult, MIPS_DI_FTYPE_SI_SI, MASK_DSPR2),
11309 DIRECT_BUILTIN (multu, MIPS_DI_FTYPE_USI_USI, MASK_DSPR2),
11310 DIRECT_BUILTIN (dpax_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11311 DIRECT_BUILTIN (dpsx_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11312 DIRECT_BUILTIN (dpaqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11313 DIRECT_BUILTIN (dpaqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11314 DIRECT_BUILTIN (dpsqx_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2),
11315 DIRECT_BUILTIN (dpsqx_sa_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSPR2)
11318 /* This helps provide a mapping from builtin function codes to bdesc
11323 /* The builtin function table that this entry describes. */
11324 const struct builtin_description *bdesc;
11326 /* The number of entries in the builtin function table. */
11329 /* The target processor that supports these builtin functions.
11330 PROCESSOR_MAX means we enable them for all processors. */
11331 enum processor_type proc;
11333 /* If the target has these flags, this builtin function table
11334 will not be supported. */
11335 int unsupported_target_flags;
11338 static const struct bdesc_map bdesc_arrays[] =
11340 { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX, 0 },
11341 { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1, 0 },
11342 { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX, 0 },
11343 { dsp_32only_bdesc, ARRAY_SIZE (dsp_32only_bdesc), PROCESSOR_MAX,
11347 /* Take the argument ARGNUM of the arglist of EXP and convert it into a form
11348 suitable for input operand OP of instruction ICODE. Return the value. */
11351 mips_prepare_builtin_arg (enum insn_code icode,
11352 unsigned int op, tree exp, unsigned int argnum)
11355 enum machine_mode mode;
11357 value = expand_normal (CALL_EXPR_ARG (exp, argnum));
11358 mode = insn_data[icode].operand[op].mode;
11359 if (!insn_data[icode].operand[op].predicate (value, mode))
11361 value = copy_to_mode_reg (mode, value);
11362 /* Check the predicate again. */
11363 if (!insn_data[icode].operand[op].predicate (value, mode))
11365 error ("invalid argument to builtin function");
11373 /* Return an rtx suitable for output operand OP of instruction ICODE.
11374 If TARGET is non-null, try to use it where possible. */
11377 mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
11379 enum machine_mode mode;
11381 mode = insn_data[icode].operand[op].mode;
11382 if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
11383 target = gen_reg_rtx (mode);
11388 /* Expand builtin functions. This is called from TARGET_EXPAND_BUILTIN. */
11391 mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
11392 enum machine_mode mode ATTRIBUTE_UNUSED,
11393 int ignore ATTRIBUTE_UNUSED)
11395 enum insn_code icode;
11396 enum mips_builtin_type type;
11398 unsigned int fcode;
11399 const struct builtin_description *bdesc;
11400 const struct bdesc_map *m;
11402 fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
11403 fcode = DECL_FUNCTION_CODE (fndecl);
11406 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
11408 if (fcode < m->size)
11411 icode = bdesc[fcode].icode;
11412 type = bdesc[fcode].builtin_type;
11422 case MIPS_BUILTIN_DIRECT:
11423 return mips_expand_builtin_direct (icode, target, exp, true);
11425 case MIPS_BUILTIN_DIRECT_NO_TARGET:
11426 return mips_expand_builtin_direct (icode, target, exp, false);
11428 case MIPS_BUILTIN_MOVT:
11429 case MIPS_BUILTIN_MOVF:
11430 return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
11433 case MIPS_BUILTIN_CMP_ANY:
11434 case MIPS_BUILTIN_CMP_ALL:
11435 case MIPS_BUILTIN_CMP_UPPER:
11436 case MIPS_BUILTIN_CMP_LOWER:
11437 case MIPS_BUILTIN_CMP_SINGLE:
11438 return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
11441 case MIPS_BUILTIN_BPOSGE32:
11442 return mips_expand_builtin_bposge (type, target);
11449 /* Init builtin functions. This is called from TARGET_INIT_BUILTIN. */
11452 mips_init_builtins (void)
11454 const struct builtin_description *d;
11455 const struct bdesc_map *m;
11456 tree types[(int) MIPS_MAX_FTYPE_MAX];
11457 tree V2SF_type_node;
11458 tree V2HI_type_node;
11459 tree V4QI_type_node;
11460 unsigned int offset;
11462 /* We have only builtins for -mpaired-single, -mips3d and -mdsp. */
11463 if (!TARGET_PAIRED_SINGLE_FLOAT && !TARGET_DSP)
11466 if (TARGET_PAIRED_SINGLE_FLOAT)
11468 V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
11470 types[MIPS_V2SF_FTYPE_V2SF]
11471 = build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
11473 types[MIPS_V2SF_FTYPE_V2SF_V2SF]
11474 = build_function_type_list (V2SF_type_node,
11475 V2SF_type_node, V2SF_type_node, NULL_TREE);
11477 types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
11478 = build_function_type_list (V2SF_type_node,
11479 V2SF_type_node, V2SF_type_node,
11480 integer_type_node, NULL_TREE);
11482 types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
11483 = build_function_type_list (V2SF_type_node,
11484 V2SF_type_node, V2SF_type_node,
11485 V2SF_type_node, V2SF_type_node, NULL_TREE);
11487 types[MIPS_V2SF_FTYPE_SF_SF]
11488 = build_function_type_list (V2SF_type_node,
11489 float_type_node, float_type_node, NULL_TREE);
11491 types[MIPS_INT_FTYPE_V2SF_V2SF]
11492 = build_function_type_list (integer_type_node,
11493 V2SF_type_node, V2SF_type_node, NULL_TREE);
11495 types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
11496 = build_function_type_list (integer_type_node,
11497 V2SF_type_node, V2SF_type_node,
11498 V2SF_type_node, V2SF_type_node, NULL_TREE);
11500 types[MIPS_INT_FTYPE_SF_SF]
11501 = build_function_type_list (integer_type_node,
11502 float_type_node, float_type_node, NULL_TREE);
11504 types[MIPS_INT_FTYPE_DF_DF]
11505 = build_function_type_list (integer_type_node,
11506 double_type_node, double_type_node, NULL_TREE);
11508 types[MIPS_SF_FTYPE_V2SF]
11509 = build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
11511 types[MIPS_SF_FTYPE_SF]
11512 = build_function_type_list (float_type_node,
11513 float_type_node, NULL_TREE);
11515 types[MIPS_SF_FTYPE_SF_SF]
11516 = build_function_type_list (float_type_node,
11517 float_type_node, float_type_node, NULL_TREE);
11519 types[MIPS_DF_FTYPE_DF]
11520 = build_function_type_list (double_type_node,
11521 double_type_node, NULL_TREE);
11523 types[MIPS_DF_FTYPE_DF_DF]
11524 = build_function_type_list (double_type_node,
11525 double_type_node, double_type_node, NULL_TREE);
11530 V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
11531 V4QI_type_node = build_vector_type_for_mode (intQI_type_node, V4QImode);
11533 types[MIPS_V2HI_FTYPE_V2HI_V2HI]
11534 = build_function_type_list (V2HI_type_node,
11535 V2HI_type_node, V2HI_type_node,
11538 types[MIPS_SI_FTYPE_SI_SI]
11539 = build_function_type_list (intSI_type_node,
11540 intSI_type_node, intSI_type_node,
11543 types[MIPS_V4QI_FTYPE_V4QI_V4QI]
11544 = build_function_type_list (V4QI_type_node,
11545 V4QI_type_node, V4QI_type_node,
11548 types[MIPS_SI_FTYPE_V4QI]
11549 = build_function_type_list (intSI_type_node,
11553 types[MIPS_V2HI_FTYPE_V2HI]
11554 = build_function_type_list (V2HI_type_node,
11558 types[MIPS_SI_FTYPE_SI]
11559 = build_function_type_list (intSI_type_node,
11563 types[MIPS_V4QI_FTYPE_V2HI_V2HI]
11564 = build_function_type_list (V4QI_type_node,
11565 V2HI_type_node, V2HI_type_node,
11568 types[MIPS_V2HI_FTYPE_SI_SI]
11569 = build_function_type_list (V2HI_type_node,
11570 intSI_type_node, intSI_type_node,
11573 types[MIPS_SI_FTYPE_V2HI]
11574 = build_function_type_list (intSI_type_node,
11578 types[MIPS_V2HI_FTYPE_V4QI]
11579 = build_function_type_list (V2HI_type_node,
11583 types[MIPS_V4QI_FTYPE_V4QI_SI]
11584 = build_function_type_list (V4QI_type_node,
11585 V4QI_type_node, intSI_type_node,
11588 types[MIPS_V2HI_FTYPE_V2HI_SI]
11589 = build_function_type_list (V2HI_type_node,
11590 V2HI_type_node, intSI_type_node,
11593 types[MIPS_V2HI_FTYPE_V4QI_V2HI]
11594 = build_function_type_list (V2HI_type_node,
11595 V4QI_type_node, V2HI_type_node,
11598 types[MIPS_SI_FTYPE_V2HI_V2HI]
11599 = build_function_type_list (intSI_type_node,
11600 V2HI_type_node, V2HI_type_node,
11603 types[MIPS_DI_FTYPE_DI_V4QI_V4QI]
11604 = build_function_type_list (intDI_type_node,
11605 intDI_type_node, V4QI_type_node, V4QI_type_node,
11608 types[MIPS_DI_FTYPE_DI_V2HI_V2HI]
11609 = build_function_type_list (intDI_type_node,
11610 intDI_type_node, V2HI_type_node, V2HI_type_node,
11613 types[MIPS_DI_FTYPE_DI_SI_SI]
11614 = build_function_type_list (intDI_type_node,
11615 intDI_type_node, intSI_type_node, intSI_type_node,
11618 types[MIPS_V4QI_FTYPE_SI]
11619 = build_function_type_list (V4QI_type_node,
11623 types[MIPS_V2HI_FTYPE_SI]
11624 = build_function_type_list (V2HI_type_node,
11628 types[MIPS_VOID_FTYPE_V4QI_V4QI]
11629 = build_function_type_list (void_type_node,
11630 V4QI_type_node, V4QI_type_node,
11633 types[MIPS_SI_FTYPE_V4QI_V4QI]
11634 = build_function_type_list (intSI_type_node,
11635 V4QI_type_node, V4QI_type_node,
11638 types[MIPS_VOID_FTYPE_V2HI_V2HI]
11639 = build_function_type_list (void_type_node,
11640 V2HI_type_node, V2HI_type_node,
11643 types[MIPS_SI_FTYPE_DI_SI]
11644 = build_function_type_list (intSI_type_node,
11645 intDI_type_node, intSI_type_node,
11648 types[MIPS_DI_FTYPE_DI_SI]
11649 = build_function_type_list (intDI_type_node,
11650 intDI_type_node, intSI_type_node,
11653 types[MIPS_VOID_FTYPE_SI_SI]
11654 = build_function_type_list (void_type_node,
11655 intSI_type_node, intSI_type_node,
11658 types[MIPS_SI_FTYPE_PTR_SI]
11659 = build_function_type_list (intSI_type_node,
11660 ptr_type_node, intSI_type_node,
11663 types[MIPS_SI_FTYPE_VOID]
11664 = build_function_type (intSI_type_node, void_list_node);
11668 types[MIPS_V4QI_FTYPE_V4QI]
11669 = build_function_type_list (V4QI_type_node,
11673 types[MIPS_SI_FTYPE_SI_SI_SI]
11674 = build_function_type_list (intSI_type_node,
11675 intSI_type_node, intSI_type_node,
11676 intSI_type_node, NULL_TREE);
11678 types[MIPS_DI_FTYPE_DI_USI_USI]
11679 = build_function_type_list (intDI_type_node,
11681 unsigned_intSI_type_node,
11682 unsigned_intSI_type_node, NULL_TREE);
11684 types[MIPS_DI_FTYPE_SI_SI]
11685 = build_function_type_list (intDI_type_node,
11686 intSI_type_node, intSI_type_node,
11689 types[MIPS_DI_FTYPE_USI_USI]
11690 = build_function_type_list (intDI_type_node,
11691 unsigned_intSI_type_node,
11692 unsigned_intSI_type_node, NULL_TREE);
11694 types[MIPS_V2HI_FTYPE_SI_SI_SI]
11695 = build_function_type_list (V2HI_type_node,
11696 intSI_type_node, intSI_type_node,
11697 intSI_type_node, NULL_TREE);
11702 /* Iterate through all of the bdesc arrays, initializing all of the
11703 builtin functions. */
11706 for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
11708 if ((m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
11709 && (m->unsupported_target_flags & target_flags) == 0)
11710 for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
11711 if ((d->target_flags & target_flags) == d->target_flags)
11712 add_builtin_function (d->name, types[d->function_type],
11713 d - m->bdesc + offset,
11714 BUILT_IN_MD, NULL, NULL);
11719 /* Expand a MIPS_BUILTIN_DIRECT function. ICODE is the code of the
11720 .md pattern and CALL is the function expr with arguments. TARGET,
11721 if nonnull, suggests a good place to put the result.
11722 HAS_TARGET indicates the function must return something. */
11725 mips_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
11728 rtx ops[MAX_RECOG_OPERANDS];
11734 /* We save target to ops[0]. */
11735 ops[0] = mips_prepare_builtin_target (icode, 0, target);
11739 /* We need to test if the arglist is not zero. Some instructions have extra
11740 clobber registers. */
11741 for (; i < insn_data[icode].n_operands && i <= call_expr_nargs (exp); i++, j++)
11742 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
11747 emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
11751 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
11755 emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
11759 gcc_unreachable ();
11764 /* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
11765 function (TYPE says which). EXP is the tree for the function
11766 function, ICODE is the instruction that should be used to compare
11767 the first two arguments, and COND is the condition it should test.
11768 TARGET, if nonnull, suggests a good place to put the result. */
11771 mips_expand_builtin_movtf (enum mips_builtin_type type,
11772 enum insn_code icode, enum mips_fp_condition cond,
11773 rtx target, tree exp)
11775 rtx cmp_result, op0, op1;
11777 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
11778 op0 = mips_prepare_builtin_arg (icode, 1, exp, 0);
11779 op1 = mips_prepare_builtin_arg (icode, 2, exp, 1);
11780 emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
11782 icode = CODE_FOR_mips_cond_move_tf_ps;
11783 target = mips_prepare_builtin_target (icode, 0, target);
11784 if (type == MIPS_BUILTIN_MOVT)
11786 op1 = mips_prepare_builtin_arg (icode, 2, exp, 2);
11787 op0 = mips_prepare_builtin_arg (icode, 1, exp, 3);
11791 op0 = mips_prepare_builtin_arg (icode, 1, exp, 2);
11792 op1 = mips_prepare_builtin_arg (icode, 2, exp, 3);
11794 emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
11798 /* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
11799 into TARGET otherwise. Return TARGET. */
11802 mips_builtin_branch_and_move (rtx condition, rtx target,
11803 rtx value_if_true, rtx value_if_false)
11805 rtx true_label, done_label;
11807 true_label = gen_label_rtx ();
11808 done_label = gen_label_rtx ();
11810 /* First assume that CONDITION is false. */
11811 emit_move_insn (target, value_if_false);
11813 /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise. */
11814 emit_jump_insn (gen_condjump (condition, true_label));
11815 emit_jump_insn (gen_jump (done_label));
11818 /* Fix TARGET if CONDITION is true. */
11819 emit_label (true_label);
11820 emit_move_insn (target, value_if_true);
11822 emit_label (done_label);
11826 /* Expand a comparison builtin of type BUILTIN_TYPE. ICODE is the code
11827 of the comparison instruction and COND is the condition it should test.
11828 EXP is the function call and arguments and TARGET, if nonnull,
11829 suggests a good place to put the boolean result. */
11832 mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
11833 enum insn_code icode, enum mips_fp_condition cond,
11834 rtx target, tree exp)
11836 rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
11840 if (target == 0 || GET_MODE (target) != SImode)
11841 target = gen_reg_rtx (SImode);
11843 /* Prepare the operands to the comparison. */
11844 cmp_result = mips_prepare_builtin_target (icode, 0, 0);
11845 for (i = 1; i < insn_data[icode].n_operands - 1; i++, j++)
11846 ops[i] = mips_prepare_builtin_arg (icode, i, exp, j);
11848 switch (insn_data[icode].n_operands)
11851 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond)));
11855 emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2],
11856 ops[3], ops[4], GEN_INT (cond)));
11860 gcc_unreachable ();
11863 /* If the comparison sets more than one register, we define the result
11864 to be 0 if all registers are false and -1 if all registers are true.
11865 The value of the complete result is indeterminate otherwise. */
11866 switch (builtin_type)
11868 case MIPS_BUILTIN_CMP_ALL:
11869 condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
11870 return mips_builtin_branch_and_move (condition, target,
11871 const0_rtx, const1_rtx);
11873 case MIPS_BUILTIN_CMP_UPPER:
11874 case MIPS_BUILTIN_CMP_LOWER:
11875 offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
11876 condition = gen_single_cc (cmp_result, offset);
11877 return mips_builtin_branch_and_move (condition, target,
11878 const1_rtx, const0_rtx);
11881 condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
11882 return mips_builtin_branch_and_move (condition, target,
11883 const1_rtx, const0_rtx);
11887 /* Expand a bposge builtin of type BUILTIN_TYPE. TARGET, if nonnull,
11888 suggests a good place to put the boolean result. */
11891 mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
11893 rtx condition, cmp_result;
11896 if (target == 0 || GET_MODE (target) != SImode)
11897 target = gen_reg_rtx (SImode);
11899 cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
11901 if (builtin_type == MIPS_BUILTIN_BPOSGE32)
11906 condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
11907 return mips_builtin_branch_and_move (condition, target,
11908 const1_rtx, const0_rtx);
11911 /* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
11912 FIRST is true if this is the first time handling this decl. */
11915 mips_encode_section_info (tree decl, rtx rtl, int first)
11917 default_encode_section_info (decl, rtl, first);
11919 if (TREE_CODE (decl) == FUNCTION_DECL)
11921 rtx symbol = XEXP (rtl, 0);
11923 if ((TARGET_LONG_CALLS && !mips_near_type_p (TREE_TYPE (decl)))
11924 || mips_far_type_p (TREE_TYPE (decl)))
11925 SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
11929 /* Implement TARGET_EXTRA_LIVE_ON_ENTRY. Some code models use the incoming
11930 value of PIC_FUNCTION_ADDR_REGNUM to set up the global pointer. */
11933 mips_extra_live_on_entry (bitmap regs)
11935 if (TARGET_USE_GOT && !TARGET_ABSOLUTE_ABICALLS)
11936 bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
11939 /* SImode values are represented as sign-extended to DImode. */
11942 mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
11944 if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
11945 return SIGN_EXTEND;
11950 /* MIPS implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
11953 mips_output_dwarf_dtprel (FILE *file, int size, rtx x)
11958 fputs ("\t.dtprelword\t", file);
11962 fputs ("\t.dtpreldword\t", file);
11966 gcc_unreachable ();
11968 output_addr_const (file, x);
11969 fputs ("+0x8000", file);
11972 #include "gt-mips.h"